这个 atomic.h 是 GlusterFS 的 跨平台原子操作封装库,让多线程环境下整数操作既安全又高效,同时屏蔽了不同平台和编译器的差异。
GlusterFS
Ahmedabad
/*
Copyright (c) 2017 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#ifndef _ATOMIC_H
#define _ATOMIC_H
#include <inttypes.h>
#include <stdbool.h>
#include "glusterfs/locking.h"
/* Macros used to join two arguments and generate a new macro name. */
#define GF_ATOMIC_MACRO_1(_macro) _macro
#define GF_ATOMIC_MACRO(_base, _name) GF_ATOMIC_MACRO_1(_base##_name)
/* There's a problem on 32-bit architectures when we try to use atomic
* builtins with 64-bit types. Only way to solve the problem is to use
* a mutex to protect the access to the atomic, but we don't want to
* use mutexes for other smaller types that could work with the atomic
* builtins.
*
* So on each atomic type we add a field for the mutex if atomic operation
* is not supported and a dummy zero size field if it's supported. This way
* we can have different atomic types, some with a mutex and some without.
*
* To define these types, we use two macros:
*
* GF_ATOMIC_MUTEX_FIELD_0 = char lk[0]
* GF_ATOMIC_MUTEX_FILED_1 = gf_lock_t lk
*
* Both macros define the 'lk' field that will be used in the atomic
* structure. One when the atomic is supported by the architecture and
* another when not. We need to define the field even if it won't be
* used. Otherwise the compiler will return an error.
*
* Now we need to take the mutex or not depending on the existence of
* the mutex field in the structure. To do so we check the size of the
* structure, and if it's bigger than uint64_t (all structures with a
* mutex will be bigger), we use the mutex-based version. Otherwise we
* use the atomic builtin. This check is easily optimized out by the
* compiler, leaving a clean and efficient compiled code. */
#define GF_ATOMIC_MUTEX_FIELD_0 char lk[0] // GCC 扩展属性, GCC 的零长度数组扩展
#define GF_ATOMIC_MUTEX_FIELD_1 gf_lock_t lk
/* We'll use SIZEOF_LONG to determine the architecture. 32-bit machines
* will have 4 here, while 64-bit machines will have 8. If additional
* needs or restrictions appear on other platforms, these tests can be
* extended to handle them. */
/* GF_ATOMIC_SIZE_X macros map each type size to one of the
* GF_ATOMIC_MUTEX_FIELD_X macros, depending on detected conditions. */
// 判断宏定义的较新的 GCC/Clang 原子内建函数或者旧式 __sync_* 内建函数
#if defined(HAVE_ATOMIC_BUILTINS) || defined(HAVE_SYNC_BUILTINS)
#define GF_ATOMIC_SIZE_1 GF_ATOMIC_MUTEX_FIELD_0
#define GF_ATOMIC_SIZE_2 GF_ATOMIC_MUTEX_FIELD_0
#define GF_ATOMIC_SIZE_4 GF_ATOMIC_MUTEX_FIELD_0
#if SIZEOF_LONG >= 8
#define GF_ATOMIC_SIZE_8 GF_ATOMIC_MUTEX_FIELD_0
#endif
#endif /* HAVE_(ATOMIC|SYNC)_BUILTINS */
/* Any GF_ATOMIC_SIZE_X macro not yet defined will use the mutex version */
#ifndef GF_ATOMIC_SIZE_1
#define GF_ATOMIC_SIZE_1 GF_ATOMIC_MUTEX_FIELD_1
#endif
#ifndef GF_ATOMIC_SIZE_2
#define GF_ATOMIC_SIZE_2 GF_ATOMIC_MUTEX_FIELD_1
#endif
#ifndef GF_ATOMIC_SIZE_4
#define GF_ATOMIC_SIZE_4 GF_ATOMIC_MUTEX_FIELD_1
#endif
#ifndef GF_ATOMIC_SIZE_8
#define GF_ATOMIC_SIZE_8 GF_ATOMIC_MUTEX_FIELD_1
#endif
/* This macro is used to define all atomic types supported. First field
* represents the size of the type in bytes, and the second one the name. */
#define GF_ATOMIC_TYPE(_size, _name) \
typedef struct _gf_atomic_##_name##_t { \
GF_ATOMIC_MACRO(GF_ATOMIC_SIZE_, _size); \
_name##_t value; \
} gf_atomic_##_name##_t
/* The atomic types we support */
GF_ATOMIC_TYPE(1, int8); /* gf_atomic_int8_t */
GF_ATOMIC_TYPE(2, int16); /* gf_atomic_int16_t */
GF_ATOMIC_TYPE(4, int32); /* gf_atomic_int32_t */
GF_ATOMIC_TYPE(8, int64); /* gf_atomic_int64_t */
GF_ATOMIC_TYPE(SIZEOF_LONG, intptr); /* gf_atomic_intptr_t */
GF_ATOMIC_TYPE(1, uint8); /* gf_atomic_uint8_t */
GF_ATOMIC_TYPE(2, uint16); /* gf_atomic_uint16_t */
GF_ATOMIC_TYPE(4, uint32); /* gf_atomic_uint32_t */
GF_ATOMIC_TYPE(8, uint64); /* gf_atomic_uint64_t */
GF_ATOMIC_TYPE(SIZEOF_LONG, uintptr); /* gf_atomic_uintptr_t */
/* Define the default atomic type as int64_t */
#define gf_atomic_t gf_atomic_int64_t
/* This macro will choose between the mutex based version and the atomic
* builtin version depending on the size of the atomic structure. */
#define GF_ATOMIC_CHOOSE(_atomic, _op, _args...) \
((sizeof(_atomic) > sizeof(uint64_t)) \
? ({ \
GF_ATOMIC_MACRO(GF_ATOMIC_LOCK_, _op) \
(_atomic, ##_args); \
}) \
: ({ \
GF_ATOMIC_MACRO(GF_ATOMIC_BASE_, _op) \
(_atomic, ##_args); \
}))
/* Macros to implement the mutex-based atomics. */
#define GF_ATOMIC_OP_PREPARE(_atomic, _name) \
typeof(_atomic) *__atomic = &(_atomic); \
gf_lock_t *__lock = (gf_lock_t *)&__atomic->lk; \
LOCK(__lock); \
typeof(__atomic->value) _name = __atomic->value
#define GF_ATOMIC_OP_STORE(_value) (__atomic->value = (_value))
#define GF_ATOMIC_OP_RETURN(_value) \
({ \
UNLOCK(__lock); \
_value; \
})
#define GF_ATOMIC_LOCK_INIT(_atomic, _value) \
do { \
typeof(_atomic) *__atomic = &(_atomic); \
LOCK_INIT((gf_lock_t *)&__atomic->lk); \
__atomic->value = (_value); \
} while (0)
#define GF_ATOMIC_LOCK_GET(_atomic) \
({ \
GF_ATOMIC_OP_PREPARE(_atomic, __value); \
GF_ATOMIC_OP_RETURN(__value); \
})
#define GF_ATOMIC_LOCK_ADD(_atomic, _value) \
({ \
GF_ATOMIC_OP_PREPARE(_atomic, __value); \
GF_ATOMIC_OP_STORE(__value += (_value)); \
GF_ATOMIC_OP_RETURN(__value); \
})
#define GF_ATOMIC_LOCK_SUB(_atomic, _value) \
({ \
GF_ATOMIC_OP_PREPARE(_atomic, __value); \
GF_ATOMIC_OP_STORE(__value -= (_value)); \
GF_ATOMIC_OP_RETURN(__value); \
})
#define GF_ATOMIC_LOCK_AND(_atomic, _value) \
({ \
GF_ATOMIC_OP_PREPARE(_atomic, __value); \
GF_ATOMIC_OP_STORE(__value &= (_value)); \
GF_ATOMIC_OP_RETURN(__value); \
})
#define GF_ATOMIC_LOCK_OR(_atomic, _value) \
({ \
GF_ATOMIC_OP_PREPARE(_atomic, __value); \
GF_ATOMIC_OP_STORE(__value |= (_value)); \
GF_ATOMIC_OP_RETURN(__value); \
})
#define GF_ATOMIC_LOCK_XOR(_atomic, _value) \
({ \
GF_ATOMIC_OP_PREPARE(_atomic, __value); \
GF_ATOMIC_OP_STORE(__value ^= (_value)); \
GF_ATOMIC_OP_RETURN(__value); \
})
#define GF_ATOMIC_LOCK_NAND(_atomic, _value) \
({ \
GF_ATOMIC_OP_PREPARE(_atomic, __value); \
GF_ATOMIC_OP_STORE(__value = ~(__value & (_value))); \
GF_ATOMIC_OP_RETURN(__value); \
})
#define GF_ATOMIC_LOCK_FETCH_ADD(_atomic, _value) \
({ \
GF_ATOMIC_OP_PREPARE(_atomic, __value); \
GF_ATOMIC_OP_STORE(__value + (_value)); \
GF_ATOMIC_OP_RETURN(__value); \
})
#define GF_ATOMIC_LOCK_FETCH_SUB(_atomic, _value) \
({ \
GF_ATOMIC_OP_PREPARE(_atomic, __value); \
GF_ATOMIC_OP_STORE(__value - (_value)); \
GF_ATOMIC_OP_RETURN(__value); \
})
#define GF_ATOMIC_LOCK_FETCH_AND(_atomic, _value) \
({ \
GF_ATOMIC_OP_PREPARE(_atomic, __value); \
GF_ATOMIC_OP_STORE(__value &(_value)); \
GF_ATOMIC_OP_RETURN(__value); \
})
#define GF_ATOMIC_LOCK_FETCH_OR(_atomic, _value) \
({ \
GF_ATOMIC_OP_PREPARE(_atomic, __value); \
GF_ATOMIC_OP_STORE(__value | (_value)); \
GF_ATOMIC_OP_RETURN(__value); \
})
#define GF_ATOMIC_LOCK_FETCH_XOR(_atomic, _value) \
({ \
GF_ATOMIC_OP_PREPARE(_atomic, __value); \
GF_ATOMIC_OP_STORE(__value ^ (_value)); \
GF_ATOMIC_OP_RETURN(__value); \
})
#define GF_ATOMIC_LOCK_FETCH_NAND(_atomic, _value) \
({ \
GF_ATOMIC_OP_PREPARE(_atomic, __value); \
GF_ATOMIC_OP_STORE(~(__value & (_value))); \
GF_ATOMIC_OP_RETURN(__value); \
})
#define GF_ATOMIC_LOCK_SWAP(_atomic, _value) \
({ \
GF_ATOMIC_OP_PREPARE(_atomic, __value); \
GF_ATOMIC_OP_STORE(_value); \
GF_ATOMIC_OP_RETURN(__value); \
})
#define GF_ATOMIC_LOCK_CMP_SWAP(_atomic, _expected, _value) \
({ \
GF_ATOMIC_OP_PREPARE(_atomic, __value); \
bool __ret = (__value == (_expected)); \
if (__ret) { \
GF_ATOMIC_OP_STORE(_value); \
} \
GF_ATOMIC_OP_RETURN(__ret); \
})
#if defined(HAVE_ATOMIC_BUILTINS)
/* If compiler supports __atomic builtins, we use them. */
#define GF_ATOMIC_BASE_INIT(_atomic, _value) \
__atomic_store_n(&(_atomic).value, (_value), __ATOMIC_RELEASE)
#define GF_ATOMIC_BASE_GET(_atomic) \
__atomic_load_n(&(_atomic).value, __ATOMIC_ACQUIRE)
#define GF_ATOMIC_BASE_ADD(_atomic, _value) \
__atomic_add_fetch(&(_atomic).value, (_value), __ATOMIC_ACQ_REL)
#define GF_ATOMIC_BASE_SUB(_atomic, _value) \
__atomic_sub_fetch(&(_atomic).value, (_value), __ATOMIC_ACQ_REL)
#define GF_ATOMIC_BASE_AND(_atomic, _value) \
__atomic_and_fetch(&(_atomic).value, (_value), __ATOMIC_ACQ_REL)
#define GF_ATOMIC_BASE_OR(_atomic, _value) \
__atomic_or_fetch(&(_atomic).value, (_value), __ATOMIC_ACQ_REL)
#define GF_ATOMIC_BASE_XOR(_atomic, _value) \
__atomic_xor_fetch(&(_atomic).value, (_value), __ATOMIC_ACQ_REL)
#define GF_ATOMIC_BASE_NAND(_atomic, _value) \
__atomic_nand_fetch(&(_atomic).value, (_value), __ATOMIC_ACQ_REL)
#define GF_ATOMIC_BASE_FETCH_ADD(_atomic, _value) \
__atomic_fetch_add(&(_atomic).value, (_value), __ATOMIC_ACQ_REL)
#define GF_ATOMIC_BASE_FETCH_SUB(_atomic, _value) \
__atomic_fetch_sub(&(_atomic).value, (_value), __ATOMIC_ACQ_REL)
#define GF_ATOMIC_BASE_FETCH_AND(_atomic, _value) \
__atomic_fetch_and(&(_atomic).value, (_value), __ATOMIC_ACQ_REL)
#define GF_ATOMIC_BASE_FETCH_OR(_atomic, _value) \
__atomic_fetch_or(&(_atomic).value, (_value), __ATOMIC_ACQ_REL)
#define GF_ATOMIC_BASE_FETCH_XOR(_atomic, _value) \
__atomic_fetch_xor(&(_atomic).value, (_value), __ATOMIC_ACQ_REL)
#define GF_ATOMIC_BASE_FETCH_NAND(_atomic, _value) \
__atomic_fetch_nand(&(_atomic).value, (_value), __ATOMIC_ACQ_REL)
#define GF_ATOMIC_BASE_SWAP(_atomic, _value) \
__atomic_exchange_n(&(_atomic).value, (_value), __ATOMIC_ACQ_REL)
#define GF_ATOMIC_BASE_CMP_SWAP(_atomic, _expected, _value) \
({ \
typeof((_atomic).value) __expected = (_expected); \
__atomic_compare_exchange_n(&(_atomic).value, &__expected, (_value), \
0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE); \
})
#elif defined(HAVE_SYNC_BUILTINS)
/* If compiler doesn't support __atomic builtins but supports __sync builtins,
* we use them. */
#define GF_ATOMIC_BASE_INIT(_atomic, _value) \
do { \
(_atomic).value = (_value); \
__sync_synchronize(); \
} while (0)
#define GF_ATOMIC_BASE_ADD(_atomic, _value) \
__sync_add_and_fetch(&(_atomic).value, (_value))
#define GF_ATOMIC_BASE_SUB(_atomic, _value) \
__sync_sub_and_fetch(&(_atomic).value, (_value))
#define GF_ATOMIC_BASE_AND(_atomic, _value) \
__sync_and_and_fetch(&(_atomic).value, (_value))
#define GF_ATOMIC_BASE_OR(_atomic, _value) \
__sync_or_and_fetch(&(_atomic).value, (_value))
#define GF_ATOMIC_BASE_XOR(_atomic, _value) \
__sync_xor_and_fetch(&(_atomic).value, (_value))
#define GF_ATOMIC_BASE_NAND(_atomic, _value) \
__sync_nand_and_fetch(&(_atomic).value, (_value))
#define GF_ATOMIC_BASE_FETCH_ADD(_atomic, _value) \
__sync_fetch_and_add(&(_atomic).value, (_value))
#define GF_ATOMIC_BASE_FETCH_SUB(_atomic, _value) \
__sync_fetch_and_sub(&(_atomic).value, (_value))
#define GF_ATOMIC_BASE_FETCH_AND(_atomic, _value) \
__sync_fetch_and_and(&(_atomic).value, (_value))
#define GF_ATOMIC_BASE_FETCH_OR(_atomic, _value) \
__sync_fetch_and_or(&(_atomic).value, (_value))
#define GF_ATOMIC_BASE_FETCH_XOR(_atomic, _value) \
__sync_fetch_and_xor(&(_atomic).value, (_value))
#define GF_ATOMIC_BASE_FETCH_NAND(_atomic, _value) \
__sync_fetch_and_nand(&(_atomic).value, (_value))
#define GF_ATOMIC_BASE_SWAP(_atomic, _value) \
({ \
__sync_synchronize(); \
__sync_lock_test_and_set(&(_atomic).value, (_value)); \
})
#define GF_ATOMIC_BASE_CMP_SWAP(_atomic, _expected, _value) \
__sync_bool_compare_and_swap(&(_atomic).value, (_expected), (_value))
#define GF_ATOMIC_BASE_GET(_atomic) GF_ATOMIC_BASE_ADD(_atomic, 0)
#else /* !HAVE_ATOMIC_BUILTINS && !HAVE_SYNC_BUILTINS */
/* The compiler doesn't support any atomic builtin. We fallback to the
* mutex-based implementation. */
#define GF_ATOMIC_BASE_INIT(_atomic, _value) \
GF_ATOMIC_LOCK_INIT(_atomic, _value)
#define GF_ATOMIC_BASE_GET(_atomic) GF_ATOMIC_LOCK_GET(_atomic)
#define GF_ATOMIC_BASE_ADD(_atomic, _value) GF_ATOMIC_LOCK_ADD(_atomic, _value)
#define GF_ATOMIC_BASE_SUB(_atomic, _value) GF_ATOMIC_LOCK_SUB(_atomic, _value)
#define GF_ATOMIC_BASE_AND(_atomic, _value) GF_ATOMIC_LOCK_AND(_atomic, _value)
#define GF_ATOMIC_BASE_OR(_atomic, _value) GF_ATOMIC_LOCK_OR(_atomic, _value)
#define GF_ATOMIC_BASE_XOR(_atomic, _value) GF_ATOMIC_LOCK_XOR(_atomic, _value)
#define GF_ATOMIC_BASE_NAND(_atomic, _value) \
GF_ATOMIC_LOCK_NAND(_atomic, _value)
#define GF_ATOMIC_BASE_FETCH_ADD(_atomic, _value) \
GF_ATOMIC_LOCK_FETCH_ADD(_atomic, _value)
#define GF_ATOMIC_BASE_FETCH_SUB(_atomic, _value) \
GF_ATOMIC_LOCK_FETCH_SUB(_atomic, _value)
#define GF_ATOMIC_BASE_FETCH_AND(_atomic, _value) \
GF_ATOMIC_LOCK_FETCH_AND(_atomic, _value)
#define GF_ATOMIC_BASE_FETCH_OR(_atomic, _value) \
GF_ATOMIC_LOCK_FETCH_OR(_atomic, _value)
#define GF_ATOMIC_BASE_FETCH_XOR(_atomic, _value) \
GF_ATOMIC_LOCK_FETCH_XOR(_atomic, _value)
#define GF_ATOMIC_BASE_FETCH_NAND(_atomic, _value) \
GF_ATOMIC_LOCK_FETCH_NAND(_atomic, _value)
#define GF_ATOMIC_BASE_SWAP(_atomic, _value) \
GF_ATOMIC_LOCK_SWAP(_atomic, _value)
#define GF_ATOMIC_BASE_CMP_SWAP(_atomic, _expected, _value) \
GF_ATOMIC_LOCK_CMP_SWAP(_atomic, _expected, _value)
#endif /* HAVE_(ATOMIC|SYNC)_BUILTINS */
/* Here we declare the real atomic macros available to the user. */
/* All macros have a 'gf_atomic_xxx' as 1st argument */
#define GF_ATOMIC_INIT(_atomic, _value) GF_ATOMIC_CHOOSE(_atomic, INIT, _value)
#define GF_ATOMIC_GET(_atomic) GF_ATOMIC_CHOOSE(_atomic, GET)
#define GF_ATOMIC_ADD(_atomic, _value) GF_ATOMIC_CHOOSE(_atomic, ADD, _value)
#define GF_ATOMIC_SUB(_atomic, _value) GF_ATOMIC_CHOOSE(_atomic, SUB, _value)
#define GF_ATOMIC_AND(_atomic, _value) GF_ATOMIC_CHOOSE(_atomic, AND, _value)
#define GF_ATOMIC_OR(_atomic, _value) GF_ATOMIC_CHOOSE(_atomic, OR, _value)
#define GF_ATOMIC_XOR(_atomic, _value) GF_ATOMIC_CHOOSE(_atomic, XOR, _value)
#define GF_ATOMIC_NAND(_atomic, _value) GF_ATOMIC_CHOOSE(_atomic, NAND, _value)
#define GF_ATOMIC_FETCH_ADD(_atomic, _value) \
GF_ATOMIC_CHOOSE(_atomic, FETCH_ADD, _value)
#define GF_ATOMIC_FETCH_SUB(_atomic, _value) \
GF_ATOMIC_CHOOSE(_atomic, FETCH_SUB, _value)
#define GF_ATOMIC_FETCH_AND(_atomic, _value) \
GF_ATOMIC_CHOOSE(_atomic, FETCH_AND, _value)
#define GF_ATOMIC_FETCH_OR(_atomic, _value) \
GF_ATOMIC_CHOOSE(_atomic, FETCH_OR, _value)
#define GF_ATOMIC_FETCH_XOR(_atomic, _value) \
GF_ATOMIC_CHOOSE(_atomic, FETCH_XOR, _value)
#define GF_ATOMIC_FETCH_NAND(_atomic, _value) \
GF_ATOMIC_CHOOSE(_atomic, FETCH_NAND, _value)
#define GF_ATOMIC_SWAP(_atomic, _value) GF_ATOMIC_CHOOSE(_atomic, SWAP, _value)
#define GF_ATOMIC_CMP_SWAP(_atomic, _expected, _value) \
GF_ATOMIC_CHOOSE(_atomic, CMP_SWAP, _expected, _value)
#define GF_ATOMIC_INC(_atomic) GF_ATOMIC_ADD(_atomic, 1)
#define GF_ATOMIC_DEC(_atomic) GF_ATOMIC_SUB(_atomic, 1)
#define GF_ATOMIC_FETCH_INC(_atomic) GF_ATOMIC_FETCH_ADD(_atomic, 1)
#define GF_ATOMIC_FETCH_DEC(_atomic) GF_ATOMIC_FETCH_SUB(_atomic, 1)
#endif /* _ATOMIC_H */
本人的能力有限,无法一次性看明白,所有记录下,希望能够和多个大佬共同研习。淡然在AI 时代必须贴上AI 关于这段内容的具体解释。最后,我还是想说,虽然代码被摘出来,但是不意味你完全弄懂了!Patient 和 严谨 努力学习!
GlusterFS atomic.h
代码作用总结
这份代码是 GlusterFS 的原子操作封装头文件 atomic.h
,它的核心作用是提供一种 跨平台、安全的原子操作接口,让不同大小和类型的整数可以在多线程/多核环境下安全操作,同时对不同架构(32 位、64 位)和不同编译器支持的原子指令做了兼容处理。
1️⃣ 主要目标
- 实现原子操作(atomic operations):如
add
,sub
,and
,or
,xor
,nand
等,包括普通原子操作和“取值-再操作”(fetch_add
/fetch_sub
等)。 - 兼容不同平台与编译器:
- 支持
__atomic
内建函数的 GCC/Clang 使用高性能原子操作。 - 仅支持老式
__sync
内建函数的编译器使用兼容原子操作。 - 完全不支持原子指令的编译器或架构退回到互斥锁 (mutex) 保护。
- 支持
- 处理不同数据类型大小:1/2/4/8 字节的整数类型,使用宏
GF_ATOMIC_TYPE
自动生成封装类型。
2️⃣ 核心结构与宏
2.1 原子类型定义
#define GF_ATOMIC_TYPE(_size, _name) \
typedef struct _gf_atomic_##_name##_t { \
GF_ATOMIC_MACRO(GF_ATOMIC_SIZE_, _size); \
_name##_t value; \
} gf_atomic_##_name##_t;
为每种整数类型生成结构体,内部包含:
value
:存储实际的数值。lk
:可能是零长度数组或互斥锁(取决于平台和编译器是否支持原子指令)。
2.2 平台检测与锁选择
GF_ATOMIC_SIZE_X
宏根据字节大小选择是否使用锁。GF_ATOMIC_CHOOSE
根据结构体大小动态选择:大于uint64_t
→ 锁保护,否则 → 原子指令。
2.3 原子操作封装
分三种实现方式:
- 锁保护版本(GF_ATOMIC_LOCK_*)
- 新式
__atomic
内建函数版本 - 旧式
__sync
内建函数版本
用户使用统一宏,如:
GF_ATOMIC_ADD(a, 5);
GF_ATOMIC_FETCH_SUB(a, 3);
GF_ATOMIC_CMP_SWAP(a, expected, new_val);
宏会自动选择锁版本或内建版本,无需用户关心底层实现。
2.4 支持类型
- 有符号整数:
int8, int16, int32, int64, intptr
- 无符号整数:
uint8, uint16, uint32, uint64, uintptr
- 默认原子类型:
gf_atomic_t = gf_atomic_int64_t
2.5 常用操作宏
宏 | 功能 |
---|---|
GF_ATOMIC_INIT(a, val) | 初始化原子值 |
GF_ATOMIC_GET(a) | 获取当前值 |
GF_ATOMIC_ADD(a, x) | 原子加 |
GF_ATOMIC_SUB(a, x) | 原子减 |
GF_ATOMIC_FETCH_ADD(a, x) | 先获取旧值再加 |
GF_ATOMIC_SWAP(a, x) | 交换值 |
GF_ATOMIC_CMP_SWAP(a, e, x) | 比较交换(CAS) |
GF_ATOMIC_INC(a) / GF_ATOMIC_DEC(a) | 自增 / 自减 |
3️⃣ 核心特点总结
- 跨平台:自动选择锁/原子指令。
- 高性能:支持 GCC/Clang 的原子内建函数。
- 通用接口:统一操作宏,类型安全。
- 兼容性强:支持旧编译器和 32/64 位架构。
- 可扩展性:可增加新的整数类型或扩展平台条件。
一句话概括:
这个 atomic.h
是 GlusterFS 的 跨平台原子操作封装库,让多线程环境下整数操作既安全又高效,同时屏蔽了不同平台和编译器的差异。
Ahmedabad
Tags
glusterfs