This source file includes following definitions.
- mca_btl_vader_xpmem_init
- vader_check_reg
- vader_return_registration
- vader_get_registation
- mca_btl_vader_endpoint_xpmem_rcache_cleanup
- mca_btl_vader_xpmem_cleanup_endpoint
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 #include "btl_vader.h"
16
17 #include "opal/include/opal/align.h"
18 #include "opal/mca/memchecker/base/base.h"
19
20 #if OPAL_BTL_VADER_HAVE_XPMEM
21
22 int mca_btl_vader_xpmem_init (void)
23 {
24 mca_btl_vader_component.my_seg_id = xpmem_make (0, VADER_MAX_ADDRESS, XPMEM_PERMIT_MODE, (void *)0666);
25 if (-1 == mca_btl_vader_component.my_seg_id) {
26 return OPAL_ERR_NOT_AVAILABLE;
27 }
28
29 mca_btl_vader.super.btl_get = mca_btl_vader_get_xpmem;
30 mca_btl_vader.super.btl_put = mca_btl_vader_put_xpmem;
31
32 return OPAL_SUCCESS;
33 }
34
35 struct vader_check_reg_ctx_t {
36 mca_btl_base_endpoint_t *ep;
37 mca_rcache_base_registration_t **reg;
38 uintptr_t base;
39 uintptr_t bound;
40 };
41 typedef struct vader_check_reg_ctx_t vader_check_reg_ctx_t;
42
43 static int vader_check_reg (mca_rcache_base_registration_t *reg, void *ctx)
44 {
45 vader_check_reg_ctx_t *vader_ctx = (vader_check_reg_ctx_t *) ctx;
46
47 if ((intptr_t) reg->alloc_base != vader_ctx->ep->peer_smp_rank ||
48 (reg->flags & MCA_RCACHE_FLAGS_PERSIST)) {
49
50 return OPAL_SUCCESS;
51 }
52
53 vader_ctx->reg[0] = reg;
54
55 if (vader_ctx->bound <= (uintptr_t) reg->bound && vader_ctx->base >= (uintptr_t) reg->base) {
56 opal_atomic_add (®->ref_count, 1);
57 return 1;
58 }
59
60 return 2;
61 }
62
63 void vader_return_registration (mca_rcache_base_registration_t *reg, struct mca_btl_base_endpoint_t *ep)
64 {
65 mca_rcache_base_vma_module_t *vma_module = mca_btl_vader_component.vma_module;
66 int32_t ref_count;
67
68 ref_count = opal_atomic_add_fetch_32 (®->ref_count, -1);
69 if (OPAL_UNLIKELY(0 == ref_count && !(reg->flags & MCA_RCACHE_FLAGS_PERSIST))) {
70 mca_rcache_base_vma_delete (vma_module, reg);
71
72 opal_memchecker_base_mem_noaccess (reg->rcache_context, (uintptr_t)(reg->bound - reg->base));
73 (void)xpmem_detach (reg->rcache_context);
74 OBJ_RELEASE (reg);
75 }
76 }
77
78
79
80 mca_rcache_base_registration_t *vader_get_registation (struct mca_btl_base_endpoint_t *ep, void *rem_ptr,
81 size_t size, int flags, void **local_ptr)
82 {
83 mca_rcache_base_vma_module_t *vma_module = mca_btl_vader_component.vma_module;
84 uint64_t attach_align = 1 << mca_btl_vader_component.log_attach_align;
85 mca_rcache_base_registration_t *reg = NULL;
86 vader_check_reg_ctx_t check_ctx = {.ep = ep, .reg = ®};
87 xpmem_addr_t xpmem_addr;
88 uintptr_t base, bound;
89 int rc;
90
91 base = OPAL_DOWN_ALIGN((uintptr_t) rem_ptr, attach_align, uintptr_t);
92 bound = OPAL_ALIGN((uintptr_t) rem_ptr + size - 1, attach_align, uintptr_t) + 1;
93 if (OPAL_UNLIKELY(bound > VADER_MAX_ADDRESS)) {
94 bound = VADER_MAX_ADDRESS;
95 }
96
97 check_ctx.base = base;
98 check_ctx.bound = bound;
99
100
101 rc = mca_rcache_base_vma_iterate (vma_module, (void *) base, bound - base, true, vader_check_reg, &check_ctx);
102 if (2 == rc) {
103
104
105 mca_rcache_base_vma_delete (vma_module, reg);
106
107
108 base = (uintptr_t) reg->base < base ? (uintptr_t) reg->base : base;
109
110
111 vader_return_registration (reg, ep);
112
113 reg = NULL;
114 }
115
116 if (NULL == reg) {
117 reg = OBJ_NEW(mca_rcache_base_registration_t);
118 if (OPAL_LIKELY(NULL != reg)) {
119
120 reg->ref_count = 2;
121 reg->base = (unsigned char *) base;
122 reg->bound = (unsigned char *) bound;
123 reg->flags = flags;
124 reg->alloc_base = (void *) (intptr_t) ep->peer_smp_rank;
125
126 #if defined(HAVE_SN_XPMEM_H)
127 xpmem_addr.id = ep->segment_data.xpmem.apid;
128 #else
129 xpmem_addr.apid = ep->segment_data.xpmem.apid;
130 #endif
131 xpmem_addr.offset = base;
132
133 reg->rcache_context = xpmem_attach (xpmem_addr, bound - base, NULL);
134 if (OPAL_UNLIKELY((void *)-1 == reg->rcache_context)) {
135 OBJ_RELEASE(reg);
136 return NULL;
137 }
138
139 opal_memchecker_base_mem_defined (reg->rcache_context, bound - base);
140
141 if (!(flags & MCA_RCACHE_FLAGS_PERSIST)) {
142 mca_rcache_base_vma_insert (vma_module, reg, 0);
143 }
144 }
145 }
146
147 opal_atomic_wmb ();
148 *local_ptr = (void *) ((uintptr_t) reg->rcache_context +
149 (ptrdiff_t)((uintptr_t) rem_ptr - (uintptr_t) reg->base));
150
151 return reg;
152 }
153
154 static int mca_btl_vader_endpoint_xpmem_rcache_cleanup (mca_rcache_base_registration_t *reg, void *ctx)
155 {
156 mca_btl_vader_endpoint_t *ep = (mca_btl_vader_endpoint_t *) ctx;
157 if ((intptr_t) reg->alloc_base == ep->peer_smp_rank) {
158
159 reg->ref_count = 0;
160 OBJ_RELEASE(reg);
161 }
162
163 return OPAL_SUCCESS;
164 }
165
166 void mca_btl_vader_xpmem_cleanup_endpoint (struct mca_btl_base_endpoint_t *ep)
167 {
168
169 (void) mca_rcache_base_vma_iterate (mca_btl_vader_component.vma_module,
170 NULL, (size_t) -1, true,
171 mca_btl_vader_endpoint_xpmem_rcache_cleanup,
172 (void *) ep);
173 if (ep->segment_base) {
174 xpmem_release (ep->segment_data.xpmem.apid);
175 ep->segment_data.xpmem.apid = 0;
176 }
177 }
178
179 #endif