Motr  M0
user_aarch64_atomic.h
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2021 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 
23 #pragma once
24 
25 #ifndef __MOTR_LIB_USER_AARCH64_ATOMIC_H__
26 #define __MOTR_LIB_USER_AARCH64_ATOMIC_H__
27 
28 #include "lib/types.h"
29 #include "lib/assert.h"
30 
40 struct m0_atomic64 {
41  long a_value;
42 };
43 
44 static inline void m0_atomic64_add(struct m0_atomic64 *a, int64_t num);
45 static inline void m0_atomic64_sub(struct m0_atomic64 *a, int64_t num);
46 
47 static inline void m0_atomic64_set(struct m0_atomic64 *a, int64_t num)
48 {
49  M0_CASSERT(sizeof a->a_value == sizeof num);
50 
51  a->a_value = num;
52 }
53 
57 static inline int64_t m0_atomic64_get(const struct m0_atomic64 *a)
58 {
59  return a->a_value;
60 }
61 
69 static inline void m0_atomic64_inc(struct m0_atomic64 *a)
70 {
71  m0_atomic64_add(a, (int64_t)1);
72 }
73 
81 static inline void m0_atomic64_dec(struct m0_atomic64 *a)
82 {
83  m0_atomic64_sub(a, (int64_t)1);
84 }
85 
89 static inline void m0_atomic64_add(struct m0_atomic64 *a, int64_t num)
90 {
91  long result;
92  unsigned long tmp;
93  asm volatile("// atomic64_add \n" \
94  " prfm pstl1strm, %2\n" \
95  "1:ldxr %0, %2\n" \
96  " add %0, %0, %3\n" \
97  " stxr %w1, %0, %2\n" \
98  " cbnz %w1, 1b" \
99  : "=&r" (result), "=&r" (tmp), "+Q" (a->a_value) \
100  : "Ir" (num));
101 }
102 
106 static inline void m0_atomic64_sub(struct m0_atomic64 *a, int64_t num)
107 {
108  long result;
109  unsigned long tmp;
110 
111  asm volatile("// atomic64_sub \n" \
112  " prfm pstl1strm, %2\n" \
113  "1:ldxr %0, %2\n" \
114  " sub %0, %0, %3\n" \
115  " stxr %w1, %0, %2\n" \
116  " cbnz %w1, 1b" \
117  : "=&r" (result), "=&r" (tmp), "+Q" (a->a_value) \
118  : "Ir" (num));
119 }
120 
121 
129 static inline int64_t m0_atomic64_add_return(struct m0_atomic64 *a,
130  int64_t delta)
131 {
132  int64_t result;
133  uint64_t tmp;
134 
135  asm volatile("// atomic64_add_return \n" \
136  " prfm pstl1strm, %2\n" \
137  "1:ldxr %0, %2\n" \
138  " add %0, %0, %3\n" \
139  " stlxr %w1, %0, %2\n" \
140  " cbnz %w1, 1b\n" \
141  " dmb ish" \
142  : "=&r" (result), "=&r" (tmp), "+Q" (a->a_value) \
143  : "Ir" (delta) \
144  : "memory");
145  return result;
146 }
147 
155 static inline int64_t m0_atomic64_sub_return(struct m0_atomic64 *a,
156  int64_t delta)
157 {
158  int64_t result;
159  uint64_t tmp;
160 
161  asm volatile( "// atomic64_sub_return \n" \
162  " prfm pstl1strm, %2\n" \
163  "1:ldxr %0, %2\n" \
164  " sub %0, %0, %3\n" \
165  " stlxr %w1, %0, %2\n" \
166  " cbnz %w1, 1b\n" \
167  " dmb ish" \
168  : "=&r" (result), "=&r" (tmp), "+Q" (a->a_value) \
169  : "Ir" (delta) \
170  : "memory");
171  return result;
172 }
173 
174 static inline bool m0_atomic64_inc_and_test(struct m0_atomic64 *a)
175 {
176  return (m0_atomic64_add_return(a, 1) == 0);
177 }
178 
179 static inline bool m0_atomic64_dec_and_test(struct m0_atomic64 *a)
180 {
181  return (m0_atomic64_sub_return(a, 1) == 0);
182 }
183 
184 static inline bool m0_atomic64_cas(int64_t * loc, int64_t oldval, int64_t newval)
185 {
193 /* unsigned long tmp;
194  int64_t old=0;
195 
196  M0_CASSERT(8 == sizeof oldval);
197 
198  asm volatile("// atomic64_cas_return \n" \
199  " prfm pstl1strm, %[v]\n" \
200  "1:ldxr %[old], %[v]\n" \
201  " eor %[tmp], %[oldval], %[old]\n" \
202  " cbnz %[tmp], 2f\n" \
203  " stxr %w[tmp], %[newval], %[v]\n" \
204  " cbnz %w[tmp], 1b\n" \
205  " \n" \
206  "2:" \
207  : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
208  [v] "+Q" (*(unsigned long *)loc) \
209  : [old] "Lr" (old), [newval] "r" (newval) \
210  :);
211  return old == oldval;// need to be reviewed
212 */
213 
214  M0_CASSERT(8 == sizeof oldval);
215  return __atomic_compare_exchange_n(loc, &oldval, newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
216 }
217 
218 static inline void m0_mb(void)
219 {
220  asm volatile("dsb sy":::"memory");
221 }
222 
224 #endif /* __MOTR_LIB_USER_AARCH64_ATOMIC_H__ */
225 
226 /*
227  * Local variables:
228  * c-indentation-style: "K&R"
229  * c-basic-offset: 8
230  * tab-width: 8
231  * fill-column: 80
232  * scroll-step: 1
233  * End:
234  */
static void m0_mb(void)
static void m0_atomic64_dec(struct m0_atomic64 *a)
#define M0_CASSERT(cond)
static void m0_atomic64_inc(struct m0_atomic64 *a)
static bool m0_atomic64_inc_and_test(struct m0_atomic64 *a)
static void m0_atomic64_add(struct m0_atomic64 *a, int64_t num)
static int64_t m0_atomic64_add_return(struct m0_atomic64 *a, int64_t delta)
static int64_t m0_atomic64_get(const struct m0_atomic64 *a)
static void m0_atomic64_sub(struct m0_atomic64 *a, int64_t num)
static bool m0_atomic64_dec_and_test(struct m0_atomic64 *a)
static bool m0_atomic64_cas(int64_t *loc, int64_t oldval, int64_t newval)
static int64_t m0_atomic64_sub_return(struct m0_atomic64 *a, int64_t delta)
static void m0_atomic64_set(struct m0_atomic64 *a, int64_t num)
atomic64_t a_value
Definition: atomic64.h:38
int num
Definition: bulk_if.c:54