@@ -261,13 +261,13 @@ typedef struct _Py_atomic_int {
261261#define _Py_atomic_store_64bit (ATOMIC_VAL , NEW_VAL , ORDER ) \
262262 switch (ORDER) { \
263263 case _Py_memory_order_acquire: \
264- _InterlockedExchange64_HLEAcquire((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
264+ _InterlockedExchange64_HLEAcquire((__int64 volatile*)&(( ATOMIC_VAL)->_value) , (__int64)( NEW_VAL) ); \
265265 break; \
266266 case _Py_memory_order_release: \
267- _InterlockedExchange64_HLERelease((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
267+ _InterlockedExchange64_HLERelease((__int64 volatile*)&(( ATOMIC_VAL)->_value) , (__int64)( NEW_VAL) ); \
268268 break; \
269269 default: \
270- _InterlockedExchange64((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
270+ _InterlockedExchange64((__int64 volatile*)&(( ATOMIC_VAL)->_value) , (__int64)( NEW_VAL) ); \
271271 break; \
272272 }
273273#else
@@ -277,13 +277,13 @@ typedef struct _Py_atomic_int {
277277#define _Py_atomic_store_32bit (ATOMIC_VAL , NEW_VAL , ORDER ) \
278278 switch (ORDER) { \
279279 case _Py_memory_order_acquire: \
280- _InterlockedExchange_HLEAcquire((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
280+ _InterlockedExchange_HLEAcquire((volatile long*)&(( ATOMIC_VAL)->_value) , (int)( NEW_VAL) ); \
281281 break; \
282282 case _Py_memory_order_release: \
283- _InterlockedExchange_HLERelease((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
283+ _InterlockedExchange_HLERelease((volatile long*)&(( ATOMIC_VAL)->_value) , (int)( NEW_VAL) ); \
284284 break; \
285285 default: \
286- _InterlockedExchange((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
286+ _InterlockedExchange((volatile long*)&(( ATOMIC_VAL)->_value) , (int)( NEW_VAL) ); \
287287 break; \
288288 }
289289
@@ -292,7 +292,7 @@ typedef struct _Py_atomic_int {
292292 gil_created() uses -1 as a sentinel value, if this returns
293293 a uintptr_t it will do an unsigned compare and crash
294294*/
295- inline intptr_t _Py_atomic_load_64bit (volatile uintptr_t * value , int order ) {
295+ inline intptr_t _Py_atomic_load_64bit_impl (volatile uintptr_t * value , int order ) {
296296 __int64 old ;
297297 switch (order ) {
298298 case _Py_memory_order_acquire :
@@ -323,11 +323,14 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) {
323323 return old ;
324324}
325325
326+ #define _Py_atomic_load_64bit (ATOMIC_VAL , ORDER ) \
327+ _Py_atomic_load_64bit_impl((volatile uintptr_t*)&((ATOMIC_VAL)->_value), (ORDER))
328+
326329#else
327- #define _Py_atomic_load_64bit (ATOMIC_VAL , ORDER ) *( ATOMIC_VAL)
330+ #define _Py_atomic_load_64bit (ATOMIC_VAL , ORDER ) (( ATOMIC_VAL)->_value )
328331#endif
329332
330- inline int _Py_atomic_load_32bit (volatile int * value , int order ) {
333+ inline int _Py_atomic_load_32bit_impl (volatile int * value , int order ) {
331334 long old ;
332335 switch (order ) {
333336 case _Py_memory_order_acquire :
@@ -358,16 +361,19 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) {
358361 return old ;
359362}
360363
364+ #define _Py_atomic_load_32bit (ATOMIC_VAL , ORDER ) \
365+ _Py_atomic_load_32bit_impl((volatile int*)&((ATOMIC_VAL)->_value), (ORDER))
366+
361367#define _Py_atomic_store_explicit (ATOMIC_VAL , NEW_VAL , ORDER ) \
362368 if (sizeof((ATOMIC_VAL)->_value) == 8) { \
363- _Py_atomic_store_64bit((volatile long long*)&(( ATOMIC_VAL)->_value ), NEW_VAL, ORDER) } else { \
364- _Py_atomic_store_32bit((volatile long*)&(( ATOMIC_VAL)->_value ), NEW_VAL, ORDER) }
369+ _Py_atomic_store_64bit((ATOMIC_VAL), NEW_VAL, ORDER) } else { \
370+ _Py_atomic_store_32bit((ATOMIC_VAL), NEW_VAL, ORDER) }
365371
366372#define _Py_atomic_load_explicit (ATOMIC_VAL , ORDER ) \
367373 ( \
368374 sizeof((ATOMIC_VAL)->_value) == 8 ? \
369- _Py_atomic_load_64bit((volatile long long*)&(( ATOMIC_VAL)->_value ), ORDER) : \
370- _Py_atomic_load_32bit((volatile long*)&(( ATOMIC_VAL)->_value ), ORDER) \
375+ _Py_atomic_load_64bit((ATOMIC_VAL), ORDER) : \
376+ _Py_atomic_load_32bit((ATOMIC_VAL), ORDER) \
371377 )
372378#elif defined(_M_ARM ) || defined(_M_ARM64 )
373379typedef enum _Py_memory_order {
@@ -422,7 +428,7 @@ typedef struct _Py_atomic_int {
422428 gil_created() uses -1 as a sentinel value, if this returns
423429 a uintptr_t it will do an unsigned compare and crash
424430*/
425- inline intptr_t _Py_atomic_load_64bit (volatile uintptr_t * value , int order ) {
431+ inline intptr_t _Py_atomic_load_64bit_impl (volatile uintptr_t * value , int order ) {
426432 uintptr_t old ;
427433 switch (order ) {
428434 case _Py_memory_order_acquire :
@@ -453,11 +459,14 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) {
453459 return old ;
454460}
455461
462+ #define _Py_atomic_load_64bit (ATOMIC_VAL , ORDER ) \
463+ _Py_atomic_load_64bit_impl((volatile uintptr_t*)&((ATOMIC_VAL)->_value), (ORDER))
464+
456465#else
457- #define _Py_atomic_load_64bit (ATOMIC_VAL , ORDER ) *( ATOMIC_VAL)
466+ #define _Py_atomic_load_64bit (ATOMIC_VAL , ORDER ) (( ATOMIC_VAL)->_value )
458467#endif
459468
460- inline int _Py_atomic_load_32bit (volatile int * value , int order ) {
469+ inline int _Py_atomic_load_32bit_impl (volatile int * value , int order ) {
461470 int old ;
462471 switch (order ) {
463472 case _Py_memory_order_acquire :
@@ -488,16 +497,19 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) {
488497 return old ;
489498}
490499
500+ #define _Py_atomic_load_32bit (ATOMIC_VAL , ORDER ) \
501+ _Py_atomic_load_32bit_impl((volatile int*)&((ATOMIC_VAL)->_value), (ORDER))
502+
491503#define _Py_atomic_store_explicit (ATOMIC_VAL , NEW_VAL , ORDER ) \
492504 if (sizeof((ATOMIC_VAL)->_value) == 8) { \
493- _Py_atomic_store_64bit(&(( ATOMIC_VAL)->_value), NEW_VAL, ORDER) } else { \
494- _Py_atomic_store_32bit(&(( ATOMIC_VAL)->_value), NEW_VAL, ORDER) }
505+ _Py_atomic_store_64bit(( ATOMIC_VAL), ( NEW_VAL), ( ORDER) ) } else { \
506+ _Py_atomic_store_32bit(( ATOMIC_VAL), ( NEW_VAL), ( ORDER) ) }
495507
496508#define _Py_atomic_load_explicit (ATOMIC_VAL , ORDER ) \
497509 ( \
498510 sizeof((ATOMIC_VAL)->_value) == 8 ? \
499- _Py_atomic_load_64bit(&(( ATOMIC_VAL)->_value), ORDER) : \
500- _Py_atomic_load_32bit(&(( ATOMIC_VAL)->_value), ORDER) \
511+ _Py_atomic_load_64bit(( ATOMIC_VAL), ( ORDER) ) : \
512+ _Py_atomic_load_32bit(( ATOMIC_VAL), ( ORDER) ) \
501513 )
502514#endif
503515#else /* !gcc x86 !_msc_ver */
@@ -529,16 +541,16 @@ typedef struct _Py_atomic_int {
529541
530542/* Standardized shortcuts. */
531543#define _Py_atomic_store (ATOMIC_VAL , NEW_VAL ) \
532- _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_seq_cst)
544+ _Py_atomic_store_explicit(( ATOMIC_VAL), ( NEW_VAL) , _Py_memory_order_seq_cst)
533545#define _Py_atomic_load (ATOMIC_VAL ) \
534- _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_seq_cst)
546+ _Py_atomic_load_explicit(( ATOMIC_VAL) , _Py_memory_order_seq_cst)
535547
536548/* Python-local extensions */
537549
538550#define _Py_atomic_store_relaxed (ATOMIC_VAL , NEW_VAL ) \
539- _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_relaxed)
551+ _Py_atomic_store_explicit(( ATOMIC_VAL), ( NEW_VAL) , _Py_memory_order_relaxed)
540552#define _Py_atomic_load_relaxed (ATOMIC_VAL ) \
541- _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_relaxed)
553+ _Py_atomic_load_explicit(( ATOMIC_VAL) , _Py_memory_order_relaxed)
542554
543555#ifdef __cplusplus
544556}
0 commit comments