root/opal/mca/pmix/pmix4x/pmix/src/atomics/sys/arm/atomic.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. pmix_atomic_mb
  2. pmix_atomic_rmb
  3. pmix_atomic_wmb
  4. pmix_atomic_isync
  5. pmix_atomic_compare_exchange_strong_32
  6. pmix_atomic_compare_exchange_strong_acq_32
  7. pmix_atomic_compare_exchange_strong_rel_32
  8. pmix_atomic_compare_exchange_strong_64
  9. pmix_atomic_compare_exchange_strong_acq_64
  10. pmix_atomic_compare_exchange_strong_rel_64
  11. pmix_atomic_fetch_add_32
  12. pmix_atomic_fetch_sub_32

   1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
   2 /*
   3  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
   4  *                         University Research and Technology
   5  *                         Corporation.  All rights reserved.
   6  * Copyright (c) 2004-2005 The University of Tennessee and The University
   7  *                         of Tennessee Research Foundation.  All rights
   8  *                         reserved.
   9  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
  10  *                         University of Stuttgart.  All rights reserved.
  11  * Copyright (c) 2004-2005 The Regents of the University of California.
  12  *                         All rights reserved.
  13  * Copyright (c) 2010      IBM Corporation.  All rights reserved.
  14  * Copyright (c) 2010      ARM ltd.  All rights reserved.
  15  * Copyright (c) 2017-2018 Los Alamos National Security, LLC. All rights
  16  *                         reserved.
  17  * Copyright (c) 2018      Intel, Inc.  All rights reserved.
  18  * $COPYRIGHT$
  19  *
  20  * Additional copyrights may follow
  21  *
  22  * $HEADER$
  23  */
  24 
  25 /*
  26  * ARMv5 and earlier lack robust atomic operations and therefore this file uses
  27  * Linux kernel support where needed.  The kernel also provides memory barriers
  28  * and this file uses them for ARMv5 and earlier processors, which lack the
  29  * memory barrier instruction.  These kernel functions are available on kernel
  30  * versions 2.6.15 and greater; using them will result in undefined behavior on
  31  * older kernels.
  32  * See Documentation/arm/kernel_user_helpers.txt in the kernel tree for details
  33  */
  34 
  35 #ifndef PMIX_SYS_ARCH_ATOMIC_H
  36 #define PMIX_SYS_ARCH_ATOMIC_H 1
  37 
  38 #if (PMIX_ASM_ARM_VERSION >= 7)
  39 
  40 #define PMIX_HAVE_ATOMIC_MEM_BARRIER 1
  41 /* use the DMB instruction if available... */
  42 
  43 #define PMIXMB()  __asm__ __volatile__ ("dmb" : : : "memory")
  44 #define PMIXRMB() __asm__ __volatile__ ("dmb" : : : "memory")
  45 #define PMIXWMB() __asm__ __volatile__ ("dmb" : : : "memory")
  46 
  47 #elif (PMIX_ASM_ARM_VERSION == 6)
  48 
  49 #define PMIX_HAVE_ATOMIC_MEM_BARRIER 1
  50 /* ...or the v6-specific equivalent... */
  51 
  52 #define PMIXMB()  __asm__ __volatile__ ("mcr p15, 0, r0, c7, c10, 5" : : : "memory")
  53 #define PMIXRMB() PMIXMB()
  54 #define PMIXWMB() PMIXMB()
  55 
  56 #else
  57 
  58 #define PMIX_HAVE_ATOMIC_MEM_BARRIER 1
  59 /* ...otherwise use the Linux kernel-provided barrier */
  60 
  61 #define PMIXMB() (*((void (*)(void))(0xffff0fa0)))()
  62 #define PMIXRMB() PMIXMB()
  63 #define PMIXWMB() PMIXMB()
  64 
  65 #endif
  66 
  67 /**********************************************************************
  68  *
  69  * Memory Barriers
  70  *
  71  *********************************************************************/
  72 
  73 #if (PMIX_HAVE_ATOMIC_MEM_BARRIER == 1)
  74 
  75 static inline
  76 void pmix_atomic_mb(void)
  77 {
  78     PMIXMB();
  79 }
  80 
  81 
  82 static inline
  83 void pmix_atomic_rmb(void)
  84 {
  85     PMIXRMB();
  86 }
  87 
  88 
  89 static inline
  90 void pmix_atomic_wmb(void)
  91 {
  92     PMIXWMB();
  93 }
  94 
  95 static inline
  96 void pmix_atomic_isync(void)
  97 {
  98 }
  99 
 100 #endif
 101 
 102 
 103 /**********************************************************************
 104  *
 105  * Atomic math operations
 106  *
 107  *********************************************************************/
 108 
 109 #if (PMIX_GCC_INLINE_ASSEMBLY && (PMIX_ASM_ARM_VERSION >= 6))
 110 
 111 #define PMIX_HAVE_ATOMIC_COMPARE_EXCHANGE_32 1
 112 #define PMIX_HAVE_ATOMIC_MATH_32 1
 113 static inline bool pmix_atomic_compare_exchange_strong_32 (pmix_atomic_int32_t *addr, int32_t *oldval, int32_t newval)
 114 {
 115   int32_t prev, tmp;
 116   bool ret;
 117 
 118    __asm__ __volatile__ (
 119                          "1:  ldrex   %0, [%2]        \n"
 120                          "    cmp     %0, %3          \n"
 121                          "    bne     2f              \n"
 122                          "    strex   %1, %4, [%2]    \n"
 123                          "    cmp     %1, #0          \n"
 124                          "    bne     1b              \n"
 125                          "2:                          \n"
 126 
 127                          : "=&r" (prev), "=&r" (tmp)
 128                          : "r" (addr), "r" (*oldval), "r" (newval)
 129                          : "cc", "memory");
 130 
 131    ret = (prev == *oldval);
 132    *oldval = prev;
 133    return ret;
 134 }
 135 
 136 /* these two functions aren't inlined in the non-gcc case because then
 137    there would be two function calls (since neither cmpset_32 nor
 138    atomic_?mb can be inlined).  Instead, we "inline" them by hand in
 139    the assembly, meaning there is one function call overhead instead
 140    of two */
 141 static inline bool pmix_atomic_compare_exchange_strong_acq_32 (pmix_atomic_int32_t *addr, int32_t *oldval, int32_t newval)
 142 {
 143     bool rc;
 144 
 145     rc = pmix_atomic_compare_exchange_strong_32 (addr, oldval, newval);
 146     pmix_atomic_rmb();
 147 
 148     return rc;
 149 }
 150 
 151 
 152 static inline bool pmix_atomic_compare_exchange_strong_rel_32 (pmix_atomic_int32_t *addr, int32_t *oldval, int32_t newval)
 153 {
 154     pmix_atomic_wmb();
 155     return pmix_atomic_compare_exchange_strong_32 (addr, oldval, newval);
 156 }
 157 
 158 #if (PMIX_ASM_SUPPORT_64BIT == 1)
 159 
 160 #define PMIX_HAVE_ATOMIC_COMPARE_EXCHANGE_64 1
 161 static inline bool pmix_atomic_compare_exchange_strong_64 (pmix_atomic_int64_t *addr, int64_t *oldval, int64_t newval)
 162 {
 163     int64_t prev;
 164     int tmp;
 165     bool ret;
 166 
 167     __asm__ __volatile__ (
 168                           "1:  ldrexd  %0, %H0, [%2]           \n"
 169                           "    cmp     %0, %3                  \n"
 170                           "    it      eq                      \n"
 171                           "    cmpeq   %H0, %H3                \n"
 172                           "    bne     2f                      \n"
 173                           "    strexd  %1, %4, %H4, [%2]       \n"
 174                           "    cmp     %1, #0                  \n"
 175                           "    bne     1b                      \n"
 176                           "2:                                    \n"
 177 
 178                           : "=&r" (prev), "=&r" (tmp)
 179                           : "r" (addr), "r" (*oldval), "r" (newval)
 180                           : "cc", "memory");
 181 
 182     ret = (prev == *oldval);
 183     *oldval = prev;
 184     return ret;
 185 }
 186 
 187 /* these two functions aren't inlined in the non-gcc case because then
 188    there would be two function calls (since neither cmpset_64 nor
 189    atomic_?mb can be inlined).  Instead, we "inline" them by hand in
 190    the assembly, meaning there is one function call overhead instead
 191    of two */
 192 static inline bool pmix_atomic_compare_exchange_strong_acq_64 (pmix_atomic_int64_t *addr, int64_t *oldval, int64_t newval)
 193 {
 194     bool rc;
 195 
 196     rc = pmix_atomic_compare_exchange_strong_64 (addr, oldval, newval);
 197     pmix_atomic_rmb();
 198 
 199     return rc;
 200 }
 201 
 202 
 203 static inline bool pmix_atomic_compare_exchange_strong_rel_64 (pmix_atomic_int64_t *addr, int64_t *oldval, int64_t newval)
 204 {
 205     pmix_atomic_wmb();
 206     return pmix_atomic_compare_exchange_strong_64 (addr, oldval, newval);
 207 }
 208 
 209 #endif
 210 
 211 
 212 #define PMIX_HAVE_ATOMIC_ADD_32 1
 213 static inline int32_t pmix_atomic_fetch_add_32(pmix_atomic_int32_t* v, int inc)
 214 {
 215     int32_t t, old;
 216     int tmp;
 217 
 218     __asm__ __volatile__(
 219                          "1:  ldrex   %1, [%3]        \n"
 220                          "    add     %0, %1, %4      \n"
 221                          "    strex   %2, %0, [%3]    \n"
 222                          "    cmp     %2, #0          \n"
 223                          "    bne     1b              \n"
 224 
 225                          : "=&r" (t), "=&r" (old), "=&r" (tmp)
 226                          : "r" (v), "r" (inc)
 227                          : "cc", "memory");
 228 
 229 
 230     return old;
 231 }
 232 
 233 #define PMIX_HAVE_ATOMIC_SUB_32 1
 234 static inline int32_t pmix_atomic_fetch_sub_32(pmix_atomic_int32_t* v, int dec)
 235 {
 236     int32_t t, old;
 237     int tmp;
 238 
 239     __asm__ __volatile__(
 240                          "1:  ldrex   %1, [%3]        \n"
 241                          "    sub     %0, %1, %4      \n"
 242                          "    strex   %2, %0, [%3]    \n"
 243                          "    cmp     %2, #0          \n"
 244                          "    bne     1b              \n"
 245 
 246                          : "=&r" (t), "=&r" (old), "=&r" (tmp)
 247                          : "r" (v), "r" (dec)
 248                          : "cc", "memory");
 249 
 250     return t;
 251 }
 252 
 253 #endif
 254 
 255 #endif /* ! PMIX_SYS_ARCH_ATOMIC_H */

/* [<][>][^][v][top][bottom][index][help] */