晋太元中,武陵人捕鱼为业。缘溪行,忘路之远近。忽逢桃花林,夹岸数百步,中无杂树,芳草鲜美,落英缤纷。渔人甚异之,复前行,欲穷其林。 林尽水源,便得一山,山有小口,仿佛若有光。便舍船,从口入。初极狭,才通人。复行数十步,豁然开朗。土地平旷,屋舍俨然,有良田、美池、桑竹之属。阡陌交通,鸡犬相闻。其中往来种作,男女衣着,悉如外人。黄发垂髫,并怡然自乐。 见渔人,乃大惊,问所从来。具答之。便要还家,设酒杀鸡作食。村中闻有此人,咸来问讯。自云先世避秦时乱,率妻子邑人来此绝境,不复出焉,遂与外人间隔。问今是何世,乃不知有汉,无论魏晋。此人一一为具言所闻,皆叹惋。余人各复延至其家,皆出酒食。停数日,辞去。此中人语云:“不足为外人道也。”(间隔 一作:隔绝) 既出,得其船,便扶向路,处处志之。及郡下,诣太守,说如此。太守即遣人随其往,寻向所志,遂迷,不复得路。 南阳刘子骥,高尚士也,闻之,欣然规往。未果,寻病终。后遂无问津者。
|
Server : Apache System : Linux srv.rainic.com 4.18.0-553.47.1.el8_10.x86_64 #1 SMP Wed Apr 2 05:45:37 EDT 2025 x86_64 User : rainic ( 1014) PHP Version : 7.4.33 Disable Function : exec,passthru,shell_exec,system Directory : /proc/self/root/usr/include/python3.6m/ |
Upload File : |
#ifndef Py_ATOMIC_H
#define Py_ATOMIC_H
#ifdef Py_BUILD_CORE
#include "dynamic_annotations.h"
#include "pyconfig.h"
#if defined(HAVE_STD_ATOMIC)
#include <stdatomic.h>
#endif
/* This is modeled after the atomics interface from C1x, according to
* the draft at
* http://www.open-std.org/JTC1/SC22/wg14/www/docs/n1425.pdf.
* Operations and types are named the same except with a _Py_ prefix
* and have the same semantics.
*
* Beware, the implementations here are deep magic.
*/
#if defined(HAVE_STD_ATOMIC)
typedef enum _Py_memory_order {
_Py_memory_order_relaxed = memory_order_relaxed,
_Py_memory_order_acquire = memory_order_acquire,
_Py_memory_order_release = memory_order_release,
_Py_memory_order_acq_rel = memory_order_acq_rel,
_Py_memory_order_seq_cst = memory_order_seq_cst
} _Py_memory_order;
typedef struct _Py_atomic_address {
atomic_uintptr_t _value;
} _Py_atomic_address;
typedef struct _Py_atomic_int {
atomic_int _value;
} _Py_atomic_int;
#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \
atomic_signal_fence(ORDER)
#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \
atomic_thread_fence(ORDER)
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
atomic_store_explicit(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER)
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
atomic_load_explicit(&(ATOMIC_VAL)->_value, ORDER)
/* Use builtin atomic operations in GCC >= 4.7 */
#elif defined(HAVE_BUILTIN_ATOMIC)
typedef enum _Py_memory_order {
_Py_memory_order_relaxed = __ATOMIC_RELAXED,
_Py_memory_order_acquire = __ATOMIC_ACQUIRE,
_Py_memory_order_release = __ATOMIC_RELEASE,
_Py_memory_order_acq_rel = __ATOMIC_ACQ_REL,
_Py_memory_order_seq_cst = __ATOMIC_SEQ_CST
} _Py_memory_order;
typedef struct _Py_atomic_address {
uintptr_t _value;
} _Py_atomic_address;
typedef struct _Py_atomic_int {
int _value;
} _Py_atomic_int;
#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \
__atomic_signal_fence(ORDER)
#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \
__atomic_thread_fence(ORDER)
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
(assert((ORDER) == __ATOMIC_RELAXED \
|| (ORDER) == __ATOMIC_SEQ_CST \
|| (ORDER) == __ATOMIC_RELEASE), \
__atomic_store_n(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER))
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
(assert((ORDER) == __ATOMIC_RELAXED \
|| (ORDER) == __ATOMIC_SEQ_CST \
|| (ORDER) == __ATOMIC_ACQUIRE \
|| (ORDER) == __ATOMIC_CONSUME), \
__atomic_load_n(&(ATOMIC_VAL)->_value, ORDER))
#else
typedef enum _Py_memory_order {
_Py_memory_order_relaxed,
_Py_memory_order_acquire,
_Py_memory_order_release,
_Py_memory_order_acq_rel,
_Py_memory_order_seq_cst
} _Py_memory_order;
typedef struct _Py_atomic_address {
uintptr_t _value;
} _Py_atomic_address;
typedef struct _Py_atomic_int {
int _value;
} _Py_atomic_int;
/* Only support GCC (for expression statements) and x86 (for simple
* atomic semantics) for now */
#if defined(__GNUC__) && (defined(__i386__) || defined(__amd64))
static __inline__ void
_Py_atomic_signal_fence(_Py_memory_order order)
{
if (order != _Py_memory_order_relaxed)
__asm__ volatile("":::"memory");
}
static __inline__ void
_Py_atomic_thread_fence(_Py_memory_order order)
{
if (order != _Py_memory_order_relaxed)
__asm__ volatile("mfence":::"memory");
}
/* Tell the race checker about this operation's effects. */
static __inline__ void
_Py_ANNOTATE_MEMORY_ORDER(const volatile void *address, _Py_memory_order order)
{
(void)address; /* shut up -Wunused-parameter */
switch(order) {
case _Py_memory_order_release:
case _Py_memory_order_acq_rel:
case _Py_memory_order_seq_cst:
_Py_ANNOTATE_HAPPENS_BEFORE(address);
break;
case _Py_memory_order_relaxed:
case _Py_memory_order_acquire:
break;
}
switch(order) {
case _Py_memory_order_acquire:
case _Py_memory_order_acq_rel:
case _Py_memory_order_seq_cst:
_Py_ANNOTATE_HAPPENS_AFTER(address);
break;
case _Py_memory_order_relaxed:
case _Py_memory_order_release:
break;
}
}
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
__extension__ ({ \
__typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \
__typeof__(atomic_val->_value) new_val = NEW_VAL;\
volatile __typeof__(new_val) *volatile_data = &atomic_val->_value; \
_Py_memory_order order = ORDER; \
_Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \
\
/* Perform the operation. */ \
_Py_ANNOTATE_IGNORE_WRITES_BEGIN(); \
switch(order) { \
case _Py_memory_order_release: \
_Py_atomic_signal_fence(_Py_memory_order_release); \
/* fallthrough */ \
case _Py_memory_order_relaxed: \
*volatile_data = new_val; \
break; \
\
case _Py_memory_order_acquire: \
case _Py_memory_order_acq_rel: \
case _Py_memory_order_seq_cst: \
__asm__ volatile("xchg %0, %1" \
: "+r"(new_val) \
: "m"(atomic_val->_value) \
: "memory"); \
break; \
} \
_Py_ANNOTATE_IGNORE_WRITES_END(); \
})
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
__extension__ ({ \
__typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \
__typeof__(atomic_val->_value) result; \
volatile __typeof__(result) *volatile_data = &atomic_val->_value; \
_Py_memory_order order = ORDER; \
_Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \
\
/* Perform the operation. */ \
_Py_ANNOTATE_IGNORE_READS_BEGIN(); \
switch(order) { \
case _Py_memory_order_release: \
case _Py_memory_order_acq_rel: \
case _Py_memory_order_seq_cst: \
/* Loads on x86 are not releases by default, so need a */ \
/* thread fence. */ \
_Py_atomic_thread_fence(_Py_memory_order_release); \
break; \
default: \
/* No fence */ \
break; \
} \
result = *volatile_data; \
switch(order) { \
case _Py_memory_order_acquire: \
case _Py_memory_order_acq_rel: \
case _Py_memory_order_seq_cst: \
/* Loads on x86 are automatically acquire operations so */ \
/* can get by with just a compiler fence. */ \
_Py_atomic_signal_fence(_Py_memory_order_acquire); \
break; \
default: \
/* No fence */ \
break; \
} \
_Py_ANNOTATE_IGNORE_READS_END(); \
result; \
})
#else /* !gcc x86 */
/* Fall back to other compilers and processors by assuming that simple
volatile accesses are atomic. This is false, so people should port
this. */
#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) ((void)0)
#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) ((void)0)
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
((ATOMIC_VAL)->_value = NEW_VAL)
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
((ATOMIC_VAL)->_value)
#endif /* !gcc x86 */
#endif
/* Standardized shortcuts. */
#define _Py_atomic_store(ATOMIC_VAL, NEW_VAL) \
_Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_seq_cst)
#define _Py_atomic_load(ATOMIC_VAL) \
_Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_seq_cst)
/* Python-local extensions */
#define _Py_atomic_store_relaxed(ATOMIC_VAL, NEW_VAL) \
_Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_relaxed)
#define _Py_atomic_load_relaxed(ATOMIC_VAL) \
_Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_relaxed)
#endif /* Py_BUILD_CORE */
#endif /* Py_ATOMIC_H */