CPPMyth
Library to interoperate with MythTV server
atomic.h
1 /*
2  * Copyright (C) 2014-2015 Jean-Luc Barriere
3  *
4  * This library is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU Lesser General Public License as published
6  * by the Free Software Foundation; either version 3, or (at your option)
7  * any later version.
8  *
9  * This library is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public License
15  * along with this library; see the file COPYING. If not, write to
16  * the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
17  * MA 02110-1301 USA
18  * http://www.gnu.org/copyleft/gpl.html
19  *
20  */
21 
22 #ifndef ATOMIC_H
23 #define ATOMIC_H
24 
25 #include <cppmyth_config.h>
26 
27 #ifdef __GNUC__
28 
29 #if defined __arm__ && (!defined __thumb__ || defined __thumb2__)
30 /* The __ARM_ARCH define is provided by gcc 4.8. Construct it otherwise. */
31 #ifndef __ARM_ARCH
32 #ifdef __ARM_ARCH_2__
33 #define __ARM_ARCH 2
34 #elif defined (__ARM_ARCH_3__) || defined (__ARM_ARCH_3M__)
35 #define __ARM_ARCH 3
36 #elif defined (__ARM_ARCH_4__) || defined (__ARM_ARCH_4T__)
37 #define __ARM_ARCH 4
38 #elif defined (__ARM_ARCH_5__) || defined (__ARM_ARCH_5E__) \
39  || defined(__ARM_ARCH_5T__) || defined(__ARM_ARCH_5TE__) \
40  || defined(__ARM_ARCH_5TEJ__)
41 #define __ARM_ARCH 5
42 #elif defined (__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
43  || defined (__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \
44  || defined (__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__)
45 #define __ARM_ARCH 6
46 #elif defined (__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
47  || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
48  || defined(__ARM_ARCH_7EM__)
49 #define __ARM_ARCH 7
50 #endif
51 #endif
52 #endif
53 
54 namespace NSROOT
55 {
56  template<typename T>
57  class atomic
58  {
59  public:
60  typedef volatile T atomic_t;
61 
62  atomic(T val) : m_val(val) {}
63 
64  atomic_t load()
65  {
66  return m_val;
67  }
68 
69  private:
70  atomic_t m_val;
71  };
72 
73  template<>
74  class atomic<int>
75  {
76  public:
77  typedef volatile int atomic_t;
78 
79  atomic(int val) : m_val(val) {}
80 
81  int __attribute__((always_inline)) load()
82  {
83  return m_val;
84  }
85 
86  int __attribute__((always_inline)) operator()()
87  {
88  return load();
89  }
90 
91  int __attribute__((always_inline)) add_fetch(int amount)
92  {
93  atomic_t __val;
94 
95 #if defined __mips__
96  int temp;
97  __asm__ volatile (
98  " .set arch=r4000\n"
99  "1: ll %0, %1\n" /* load old value */
100  " addu %2, %0, %3\n" /* calculate new value */
101  " sc %2, %1\n" /* attempt to store */
102  " beqzl %2, 1b\n" /* spin if failed */
103  " .set mips0\n"
104  : "=&r" (__val), "=m" (m_val), "=&r" (temp)
105  : "r" (amount), "m" (m_val));
106  /* __val is the old value, so normalize it. */
107  __val += amount;
108 
109 #elif defined __i386__ || defined __i486__ || defined __i586__ || defined __i686__ || defined __x86_64__
110  __asm__ volatile (
111  "lock xaddl %0, (%1);"
112  : "=r" (__val)
113  : "r" (&m_val), "0" (amount)
114  : "cc", "memory"
115  );
116  /* __val is the old value, so normalize it. */
117  __val += amount;
118 
119 #elif defined __powerpc__ || defined __ppc__ || defined __ppc64__
120  __asm__ volatile (
121  "1: lwarx %0,0,%1\n"
122  " add %0,%2,%0\n"
123  " dcbt %0,%1\n"
124  " stwcx. %0,0,%1\n"
125  " bne- 1b\n"
126  " isync\n"
127  : "=&r" (__val)
128  : "r" (&m_val), "r" (amount)
129  : "cc", "memory");
130 
131 #elif defined __sparc__ || defined __sparc64__
132  atomic_t __old, __new = m_val;
133  do
134  {
135  __old = __new;
136  __new = __old + amount;
137  /* compare and swap: if (*a == b) swap(*a, c) else c = *a */
138  __asm__ volatile (
139  "cas [%2], %3, %0"
140  : "=&r" (__new)
141  : "" (__new), "r" (&m_val), "r" (__old)
142  : "memory");
143  }
144  while (__new != __old);
145  __val = __old + amount;
146 
147 #elif (defined __ARM_ARCH && __ARM_ARCH == 7)
148  __asm__ volatile (
149  " dmb ish\n" /* Memory barrier */
150  "1: ldrex %0, [%1]\n"
151  " add %0, %0, %2\n"
152  " strex r1, %0, [%1]\n"
153  " cmp r1, #0\n"
154  " bne 1b\n"
155  " dmb ish\n" /* Memory barrier */
156  : "=&r" (__val)
157  : "r" (&m_val), "r" (amount)
158  : "r1", "memory");
159 
160 #elif (defined __ARM_ARCH && __ARM_ARCH == 6)
161  __asm__ volatile (
162  "mcr p15, 0, %0, c7, c10, 5" /* Memory barrier */
163  : : "r" (0) : "memory");
164  __asm__ volatile (
165  "1: ldrex %0, [%1]\n"
166  " add %0, %0, %2\n"
167  " strex r1, %0, [%1]\n"
168  " cmp r1, #0\n"
169  " bne 1b\n"
170  : "=&r" (__val)
171  : "r" (&m_val), "r" (amount)
172  : "r1");
173  __asm__ volatile (
174  "mcr p15, 0, %0, c7, c10, 5" /* Memory barrier */
175  : : "r" (0) : "memory");
176 
177 #elif (defined __ARM_ARCH && __ARM_ARCH < 6)
178  int tmp1, tmp2;
179  __asm__ volatile (
180  "0: ldr %0, [%3]\n"
181  " add %1, %0, %4\n"
182  " swp %2, %1, [%3]\n"
183  " cmp %0, %2\n"
184  " swpne %0, %2, [%3]\n"
185  " bne 0b\n"
186  : "=&r" (tmp1), "=&r" (__val), "=&r" (tmp2)
187  : "r" (&m_val), "r" (amount)
188  : "cc", "memory");
189 
190 #elif defined __aarch64__
191  unsigned long tmp;
192  __asm__ volatile (
193  " dmb ish\n" /* Memory barrier */
194  "1: ldxr %w0, %2\n"
195  " add %w0, %w0, %w3\n"
196  " stlxr %w1, %w0, %2\n"
197  " cbnz %w1, 1b\n"
198  " dmb ish\n" /* Memory barrier */
199  : "=&r" (__val), "=&r" (tmp), "+Q" (m_val)
200  : "Ir" (amount)
201  : "memory");
202 
203 #else
204 /* warning unknown architecture, atomic increment is not... */
205 #ifndef ATOMIC_NOATOMIC
206 #define ATOMIC_NOATOMIC
207 #endif
208  __val = m_val += amount;
209 
210 #endif
211  return __val;
212  }
213 
214  int __attribute__((always_inline)) operator++()
215  {
216  return add_fetch(1);
217  }
218 
219  int __attribute__((always_inline)) sub_fetch(int amount)
220  {
221  atomic_t __val;
222 
223 #if defined __mips__
224  int temp;
225  __asm__ volatile (
226  " .set arch=r4000\n"
227  "1: ll %0, %1\n" /* load old value */
228  " subu %2, %0, %3\n" /* calculate new value */
229  " sc %2, %1\n" /* attempt to store */
230  " beqzl %2, 1b\n" /* spin if failed */
231  " .set mips0\n"
232  : "=&r" (__val), "=m" (m_val), "=&r" (temp)
233  : "r" (amount), "m" (m_val));
234  /* __val is the old value, so normalize it */
235  __val -= amount;
236 
237 #elif defined __i386__ || defined __i486__ || defined __i586__ || defined __i686__ || defined __x86_64__
238  __asm__ volatile (
239  "lock xaddl %0, (%1);"
240  : "=r" (__val)
241  : "r" (&m_val), "0" (-amount)
242  : "cc", "memory"
243  );
244  /* __val is the pre-decrement value, so normalize it */
245  __val -= amount;
246 
247 #elif defined __powerpc__ || defined __ppc__ || defined __ppc64__
248  __asm__ volatile (
249  "1: lwarx %0,0,%1\n"
250  " subf %0,%2,%0\n"
251  " dcbt %0,%1\n"
252  " stwcx. %0,0,%1\n"
253  " bne- 1b\n"
254  " isync\n"
255  : "=&r" (__val)
256  : "r" (&m_val), "r" (amount)
257  : "cc", "memory");
258 
259 #elif defined __sparc__ || defined __sparc64__
260  atomic_t __old, __new = m_val;
261  do
262  {
263  __old = __new;
264  __new = __old - amount;
265  /* compare and swap: if (*a == b) swap(*a, c) else c = *a */
266  __asm__ volatile (
267  "cas [%2], %3, %0"
268  : "=&r" (__new)
269  : "" (__new), "r" (&m_val), "r" (__old)
270  : "memory");
271  }
272  while (__new != __old);
273  __val = __old - amount;
274 
275 #elif (defined __ARM_ARCH && __ARM_ARCH == 7)
276  __asm__ volatile (
277  " dmb ish\n" /* Memory barrier */
278  "1: ldrex %0, [%1]\n"
279  " sub %0, %0, %2\n"
280  " strex r1, %0, [%1]\n"
281  " cmp r1, #0\n"
282  " bne 1b\n"
283  " dmb ish\n" /* Memory barrier */
284  : "=&r" (__val)
285  : "r" (&m_val), "r" (amount)
286  : "r1", "memory");
287 
288 #elif (defined __ARM_ARCH && __ARM_ARCH == 6)
289  __asm__ volatile (
290  "mcr p15, 0, %0, c7, c10, 5" /* Memory barrier */
291  : : "r" (0) : "memory");
292  __asm__ volatile (
293  "1: ldrex %0, [%1]\n"
294  " sub %0, %0, %2\n"
295  " strex r1, %0, [%1]\n"
296  " cmp r1, #0\n"
297  " bne 1b\n"
298  : "=&r" (__val)
299  : "r" (&m_val), "r" (amount)
300  : "r1");
301  __asm__ volatile (
302  "mcr p15, 0, %0, c7, c10, 5" /* Memory barrier */
303  : : "r" (0) : "memory");
304 
305 #elif (defined __ARM_ARCH && __ARM_ARCH < 6)
306  int tmp1, tmp2;
307  __asm__ volatile (
308  "0: ldr %0, [%3]\n"
309  " add %1, %0, %4\n"
310  " swp %2, %1, [%3]\n"
311  " cmp %0, %2\n"
312  " swpne %0, %2, [%3]\n"
313  " bne 0b\n"
314  : "=&r" (tmp1), "=&r" (__val), "=&r" (tmp2)
315  : "r" (&m_val), "r" (-amount)
316  : "cc", "memory");
317 
318 #elif defined __aarch64__
319  unsigned long tmp;
320  __asm__ volatile (
321  " dmb ish\n" /* Memory barrier */
322  "1: ldxr %w0, %2\n"
323  " sub %w0, %w0, %w3\n"
324  " stlxr %w1, %w0, %2\n"
325  " cbnz %w1, 1b\n"
326  " dmb ish\n" /* Memory barrier */
327  : "=&r" (__val), "=&r" (tmp), "+Q" (m_val)
328  : "Ir" (amount)
329  : "memory");
330 
331 #else
332 /* warning unknown architecture, atomic deccrement is not... */
333 #ifndef ATOMIC_NOATOMIC
334 #define ATOMIC_NOATOMIC
335 #endif
336  __val = m_val -= amount;
337 
338 #endif
339  return __val;
340  }
341 
342  int __attribute__((always_inline)) operator--()
343  {
344  return sub_fetch(1);
345  }
346 
347  private:
348  atomic_t m_val;
349  };
350 }
351 
352 #else
353 #ifndef ATOMIC_NOATOMIC
354 #define ATOMIC_NOATOMIC
355 #endif
356 #endif
357 
358 #endif /* ATOMIC_H */