Motr  M0
klnet_utils.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2012-2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 
23 /* This file is designed to be included in klnet_core.c. */
24 
25 #ifdef NLX_DEBUG
26 #include <linux/version.h>
27 /* LINUX_VERSION_CODE, OBD_OCD_VERSION */
28 #if M0_LUSTRE_VERSION < 2110
29 #include <lustre_ver.h>
30 #else
31 #include <lustre/lustre_ver.h>
32 #include <lustre/lustre_idl.h>
33 #endif
34 
35 #define M0_TRACE_SUBSYSTEM M0_TRACE_SUBSYS_LNET
36 #include "lib/trace.h" /* M0_LOG and M0_ENTRY */
37 
39 
40 static void __nlx_kprint_lnet_handle(const char *pre, uint64_t cookie)
41 {
42  M0_LOG(M0_DEBUG, "%s: %" PRIx64 " (lnet_handle_any_t)", (char *)pre,
43  cookie);
44 }
45 #define nlx_kprint_lnet_handle(pre, h) \
46  __nlx_kprint_lnet_handle(pre, (h).cookie)
47 
48 
49 static void nlx_kprint_lnet_process_id(const char *pre, lnet_process_id_t p)
50 {
51  M0_LOG(M0_DEBUG, "%s: NID=%lu PID=%u\n", (char*)pre,
52  (long unsigned) p.nid, (unsigned) p.pid);
53 }
54 
55 static void nlx_kprint_lnet_md(const char *pre, const lnet_md_t *md)
56 {
57  M0_LOG(M0_DEBUG, "%s: %p (lnet_md_t)\n"
58  "\t start: %p\n"
59  "\t options: %x\n"
60  "\t length: %d\n"
61  "\tthreshold: %d\n"
62  "\t max_size: %d\n"
63  "\t user_ptr: %p\n",
64  (char*)pre, md, md->start, md->options, md->length,
65  md->threshold, md->max_size, md->user_ptr);
66 
67  nlx_kprint_lnet_handle("\teq_handle", md->eq_handle);
68 #if 0
69  {
70  int i;
71  for(i = 0; i < kcb->kb_kiov_len; ++i) {
72  M0_LOG(M0_DEBUG, "\t[%d] %p %d %d\n", i,
73  kcb->kb_kiov[i].kiov_page,
74  kcb->kb_kiov[i].kiov_len,
75  kcb->kb_kiov[i].kiov_offset);
76  }
77  }
78 #endif
79 }
80 
81 static const char *nlx_kcore_lnet_event_type_to_string(lnet_event_kind_t et)
82 {
83  const char *name;
84 
85  /*
86  * This enum is used for build-time checks to ensure that we are using
87  * correct mapping of LNet event names
88  *
89  * LNET_EVENT_XXX values has been changed in lustre release 2.2.57.0 see
90  * commit v2_2_57-10-g75a8f4b "LU-56 lnet: split lnet_commit_md and
91  * cleanup" in lustre git repository
92  */
93  enum {
94 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 2, 57, 0)
95  M0_LNET_EV_GET = 0,
96  M0_LNET_EV_PUT = 1,
97  M0_LNET_EV_REPLY = 2,
98  M0_LNET_EV_ACK = 3,
99  M0_LNET_EV_SEND = 4,
100  M0_LNET_EV_UNLINK = 5,
101 #else
102  M0_LNET_EV_GET = 1,
103  M0_LNET_EV_PUT = 2,
104  M0_LNET_EV_REPLY = 3,
105  M0_LNET_EV_ACK = 4,
106  M0_LNET_EV_SEND = 5,
107  M0_LNET_EV_UNLINK = 6,
108 #endif
109  M0_LNET_EV_UNKNOWN,
110 
111  M0_LNET_EV__FIRST = M0_LNET_EV_GET,
112  M0_LNET_EV__LAST = M0_LNET_EV_UNLINK,
113  };
114 
115  static const char *lnet_event_s[] = {
116  [LNET_EVENT_GET] = "GET",
117  [LNET_EVENT_PUT] = "PUT",
118  [LNET_EVENT_REPLY] = "REPLY",
119  [LNET_EVENT_ACK] = "ACK",
120  [LNET_EVENT_SEND] = "SEND",
121  [LNET_EVENT_UNLINK] = "UNLINK",
122 
123  [M0_LNET_EV_UNKNOWN] = "<Unknown>"
124  };
125 
126  M0_CASSERT((int)LNET_EVENT_GET == (int)M0_LNET_EV_GET);
127  M0_CASSERT((int)LNET_EVENT_PUT == (int)M0_LNET_EV_PUT);
128  M0_CASSERT((int)LNET_EVENT_REPLY == (int)M0_LNET_EV_REPLY);
129  M0_CASSERT((int)LNET_EVENT_ACK == (int)M0_LNET_EV_ACK);
130  M0_CASSERT((int)LNET_EVENT_SEND == (int)M0_LNET_EV_SEND);
131  M0_CASSERT((int)LNET_EVENT_UNLINK == (int)M0_LNET_EV_UNLINK);
132 
133  if (et >= (int)M0_LNET_EV__FIRST && et <= (int)M0_LNET_EV__LAST)
134  name = lnet_event_s[et];
135  else
136  name = lnet_event_s[M0_LNET_EV_UNKNOWN];
137 
138  return name;
139 }
140 
141 static void nlx_kprint_lnet_event(const char *pre, const lnet_event_t *e)
142 {
143 
144  if (e == NULL) {
145  M0_LOG(M0_DEBUG, "%s: <null> (lnet_event_t)\n", (char*) pre);
146  return;
147  }
148 
149  M0_LOG(M0_DEBUG, "%s: %p (lnet_event_t)\n", (char*) pre, e);
150 
151  nlx_kprint_lnet_process_id("\t target:", e->target);
152  nlx_kprint_lnet_process_id("\tinitiator:", e->target);
153 
155  "\t sender: %ld\n"
156  "\t type: %d %s\n"
157  "\t pt_index: %u\n"
158  "\tmatch_bits: %lx\n"
159  "\t rlength: %u\n"
160  "\t mlength: %u\n",
161  (long unsigned) e->sender, e->type,
162  (char*) nlx_kcore_lnet_event_type_to_string(e->type),
163  e->pt_index, (long unsigned) e->match_bits,
164  e->rlength, e->mlength);
165 
166  nlx_kprint_lnet_handle("\t md_handle", e->md_handle);
167 
169  "\t hdr_data: %lx\n"
170  "\t status: %d\n"
171  "\t unlinked: %d\n"
172  "\t offset: %u\n",
173  (long unsigned) e->hdr_data, e->status, e->unlinked, e->offset);
174 
175  nlx_kprint_lnet_md("\t md", &e->md);
176 }
177 
178 static void nlx_kprint_kcore_tm(const char *pre,
179  const struct nlx_kcore_transfer_mc *ktm)
180 {
181  M0_LOG(M0_DEBUG, "%s: %p (nlx_kcore_transfer_mc)\n", (char*) pre, ktm);
182  if (ktm == NULL)
183  return;
184  M0_LOG(M0_DEBUG, "\t magic: %lu\n", (unsigned long)ktm->ktm_magic);
185  nlx_kprint_lnet_handle("\t eqh", ktm->ktm_eqh);
186 }
187 #endif
188 
200 static inline uint64_t nlx_kcore_hdr_data_encode_raw(uint32_t tmid,
201  uint32_t portal)
202 {
203  return ((uint64_t) tmid << M0_NET_LNET_TMID_SHIFT) |
204  (portal & M0_NET_LNET_PORTAL_MASK);
205 }
206 
213 {
214  struct nlx_core_ep_addr *cepa;
215 
217  cepa = &kctm->ktm_addr;
219 }
220 
221 
229 static inline void nlx_kcore_hdr_data_decode(uint64_t hdr_data,
230  uint32_t *portal,
231  uint32_t *tmid)
232 {
233  *portal = (uint32_t) (hdr_data & M0_NET_LNET_PORTAL_MASK);
234  *tmid = hdr_data >> M0_NET_LNET_TMID_SHIFT;
235 }
236 
255 static void nlx_kcore_umd_init(struct nlx_kcore_transfer_mc *kctm,
256  struct nlx_core_buffer *lcbuf,
257  struct nlx_kcore_buffer *kcb,
258  int threshold,
259  int max_size,
260  unsigned options,
261  bool isLNetGetOp,
262  lnet_md_t *umd)
263 {
267  M0_PRE(threshold > 0);
268  M0_PRE(kcb->kb_kiov_len > 0);
269  M0_PRE(max_size >= 0);
270  M0_PRE(options == 0 ||
271  options == LNET_MD_OP_PUT ||
272  options == LNET_MD_OP_GET);
273 
274  M0_SET0(umd);
275  umd->options = options;
276  umd->start = kcb->kb_kiov;
277  umd->options |= LNET_MD_KIOV;
278  umd->length = kcb->kb_kiov_len;
279  kcb->kb_qtype = lcbuf->cb_qtype;
280  kcb->kb_add_time = m0_time_now();
281  if (isLNetGetOp) {
282  umd->threshold = 2;
283  kcb->kb_ooo_reply = false;
284  kcb->kb_ooo_mlength = 0;
285  kcb->kb_ooo_status = 0;
286  kcb->kb_ooo_offset = 0;
287  } else
288  umd->threshold = threshold;
289  if (max_size != 0) {
290  umd->max_size = max_size;
291  umd->options |= LNET_MD_MAX_SIZE;
292  }
293  umd->user_ptr = kcb;
294  umd->eq_handle = kctm->ktm_eqh;
295 
296  NLXDBG(kctm, 2, nlx_kprint_lnet_md("umd init", umd));
297  M0_POST(ergo(isLNetGetOp, umd->threshold == 2 && !kcb->kb_ooo_reply));
298 }
299 
315  struct nlx_kcore_buffer *kcb,
316  lnet_md_t *umd,
317  m0_bcount_t bytes)
318 {
319  size_t num;
320  unsigned last;
321 
322  M0_PRE(umd->start != NULL);
323  M0_PRE(umd->options & LNET_MD_KIOV);
324  M0_PRE(umd->length > 0);
327  M0_PRE(umd->start == kcb->kb_kiov);
328 
329  num = nlx_kcore_num_kiov_entries_for_bytes((lnet_kiov_t *) umd->start,
330  umd->length, bytes, &last);
331  NLXDBGP(ktm, 2, "%p: kbuf:%p size:%ld vec:%lu/%lu loff:%u\n",
332  ktm, kcb, (unsigned long) bytes,
333  (unsigned long) num, (unsigned long) umd->length, last);
334  kcb->kb_kiov_adj_idx = num - 1;
335  M0_POST(kcb->kb_kiov_adj_idx >= 0);
336  M0_POST(kcb->kb_kiov_adj_idx < kcb->kb_kiov_len);
337  kcb->kb_kiov_orig_len = kcb->kb_kiov[kcb->kb_kiov_adj_idx].kiov_len;
338  kcb->kb_kiov[kcb->kb_kiov_adj_idx].kiov_len = last;
339  umd->length = num;
340  M0_POST(nlx_kcore_kiov_invariant(umd->start, umd->length));
341  return;
342 }
343 
353 {
355  M0_PRE(kcb->kb_kiov_adj_idx >= 0);
356  M0_PRE(kcb->kb_kiov_adj_idx < kcb->kb_kiov_len);
357  kcb->kb_kiov[kcb->kb_kiov_adj_idx].kiov_len = kcb->kb_kiov_orig_len;
359  return;
360 }
361 
378  struct nlx_core_buffer *lcbuf,
379  struct nlx_kcore_buffer *kcb,
380  lnet_md_t *umd)
381 {
382  lnet_handle_me_t me_handle;
383  lnet_process_id_t id;
384  int rc;
385 
389  M0_PRE(lcbuf->cb_match_bits != 0);
390 
391  id.nid = LNET_NID_ANY;
392  id.pid = LNET_PID_ANY;
393  rc = LNetMEAttach(lcbuf->cb_addr.cepa_portal, id,
394  lcbuf->cb_match_bits, 0,
395  LNET_UNLINK, LNET_INS_AFTER, &me_handle);
396  if (rc != 0) {
397  NLXDBGP(kctm, 1,"LNetMEAttach: %d\n", rc);
398  return M0_RC(rc);
399  }
400  M0_POST(!LNetHandleIsInvalid(me_handle));
401  NLXDBG(kctm, 2, nlx_print_core_buffer("nlx_kcore_LNetMDAttach", lcbuf));
402 
403  kcb->kb_ktm = kctm; /* loopback can deliver in the LNetPut call */
404  rc = LNetMDAttach(me_handle, *umd, LNET_UNLINK, &kcb->kb_mdh);
405  if (rc == 0) {
406  NLXDBG(kctm, 1, nlx_kprint_lnet_handle("MDAttach",kcb->kb_mdh));
407  } else {
408  int trc = LNetMEUnlink(me_handle);
409  NLXDBGP(kctm, 1, "LNetMDAttach: %d\n", rc);
410  NLXDBGP(kctm, 1, "LNetMEUnlink: %d\n", trc);
411  M0_ASSERT(trc == 0);
412  LNetInvalidateMDHandle(&kcb->kb_mdh);
413  kcb->kb_ktm = NULL;
414  }
415 
416  /* Cannot make these assertions here as delivery is asynchronous, and
417  could have completed before we got here.
418  M0_POST(ergo(rc == 0, !LNetMDHandleIsInvalid(kcb->kb_mdh)));
419  M0_POST(ergo(rc == 0, kcb->kb_ktm == kctm));
420  */
421  return M0_RC(rc);
422 }
423 
434  struct nlx_kcore_buffer *kcb)
435 {
436  int rc;
437 
440  rc = LNetMDUnlink(kcb->kb_mdh);
441  NLXDBG(kctm, 1, NLXP("LNetMDUnlink: %d\n", rc));
442  return M0_RC(rc);
443 }
444 
461 static int nlx_kcore_LNetPut(struct nlx_kcore_transfer_mc *kctm,
462  struct nlx_core_buffer *lcbuf,
463  struct nlx_kcore_buffer *kcb,
464  lnet_md_t *umd)
465 {
466  lnet_process_id_t target;
467  int rc;
468 
472  M0_PRE(lcbuf->cb_match_bits != 0);
473 
474  rc = LNetMDBind(*umd, LNET_UNLINK, &kcb->kb_mdh);
475  if (rc != 0) {
476  NLXDBGP(kctm, 1,"LNetMDBind: %d\n", rc);
477  return M0_RC(rc);
478  }
479  NLXDBG(kctm, 2, nlx_print_core_buffer("nlx_kcore_LNetPut", lcbuf));
480  NLXDBG(kctm, 2, nlx_kprint_lnet_handle("LNetMDBind", kcb->kb_mdh));
481 
482  target.nid = lcbuf->cb_addr.cepa_nid;
483  target.pid = lcbuf->cb_addr.cepa_pid;
484  kcb->kb_ktm = kctm; /* loopback can deliver in the LNetPut call */
485  rc = LNetPut(kctm->ktm_addr.cepa_nid, kcb->kb_mdh, LNET_NOACK_REQ,
486  target, lcbuf->cb_addr.cepa_portal,
487  lcbuf->cb_match_bits, 0,
489  if (rc != 0) {
490  int trc = LNetMDUnlink(kcb->kb_mdh);
491  NLXDBGP(kctm, 1, "LNetPut: %d\n", rc);
492  NLXDBGP(kctm, 1, "LNetMDUnlink: %d\n", trc);
493  M0_ASSERT(trc == 0);
494  LNetInvalidateMDHandle(&kcb->kb_mdh);
495  kcb->kb_ktm = NULL;
496  }
497 
498  /* Cannot make these assertions here, because loopback can deliver
499  before we get here. Leaving the assertions in the comment.
500  M0_POST(ergo(rc == 0, !LNetMDHandleIsInvalid(kcb->kb_mdh)));
501  M0_POST(ergo(rc == 0, kcb->kb_ktm == kctm));
502  */
503  return M0_RC(rc);
504 }
505 
523 static int nlx_kcore_LNetGet(struct nlx_kcore_transfer_mc *kctm,
524  struct nlx_core_buffer *lcbuf,
525  struct nlx_kcore_buffer *kcb,
526  lnet_md_t *umd)
527 {
528  lnet_process_id_t target;
529  int rc;
530 
534  M0_PRE(lcbuf->cb_match_bits != 0);
535 
536  M0_PRE(umd->threshold == 2);
537 
538  rc = LNetMDBind(*umd, LNET_UNLINK, &kcb->kb_mdh);
539  if (rc != 0) {
540  NLXDBGP(kctm, 1,"LNetMDBind: %d\n", rc);
541  return M0_RC(rc);
542  }
543  NLXDBG(kctm, 2, nlx_print_core_buffer("nlx_kcore_LNetGet", lcbuf));
544  NLXDBG(kctm, 2, nlx_kprint_lnet_handle("LNetMDBind", kcb->kb_mdh));
545 
546  target.nid = lcbuf->cb_addr.cepa_nid;
547  target.pid = lcbuf->cb_addr.cepa_pid;
548  kcb->kb_ktm = kctm; /* loopback can deliver in the LNetGet call */
549  rc = LNetGet(kctm->ktm_addr.cepa_nid, kcb->kb_mdh,
550  target, lcbuf->cb_addr.cepa_portal,
551  lcbuf->cb_match_bits, 0
552 #if LUSTRE_VERSION_CODE >= OBD_OCD_VERSION(2, 11, 55, 0)
553  , false
554 #endif
555  );
556 
557  if (rc != 0) {
558  int trc = LNetMDUnlink(kcb->kb_mdh);
559  NLXDBGP(kctm, 1, "LNetGet: %d\n", rc);
560  NLXDBGP(kctm, 1, "LNetMDUnlink: %d\n", trc);
561  M0_ASSERT(trc == 0);
562  LNetInvalidateMDHandle(&kcb->kb_mdh);
563  kcb->kb_ktm = NULL;
564  }
565 
566  /* Cannot make these assertions here, because loopback can deliver
567  before we get here. Leaving the assertions in the comment.
568  M0_POST(ergo(rc == 0, !LNetMDHandleIsInvalid(kcb->kb_mdh)));
569  M0_POST(ergo(rc == 0, kcb->kb_ktm == kctm));
570  */
571  return M0_RC(rc);
572 }
573 
583  struct nlx_kcore_domain *kd)
584 {
585  char *ptr;
586  struct nlx_core_kmem_loc *loc;
587  struct nlx_core_domain *ret;
588 
590  loc = &kd->kd_cd_loc;
592  ptr = kmap(loc->kl_page);
593  ret = (struct nlx_core_domain *) (ptr + loc->kl_offset);
594  M0_POST(ret != NULL);
595  return ret;
596 }
597 
604 {
606  kunmap(kd->kd_cd_loc.kl_page);
607 }
608 
618  struct nlx_kcore_buffer *kb)
619 {
620  char *ptr;
621  struct nlx_core_kmem_loc *loc;
622  struct nlx_core_buffer *ret;
623 
625  loc = &kb->kb_cb_loc;
626  ptr = kmap(loc->kl_page);
627  ret = (struct nlx_core_buffer *) (ptr + loc->kl_offset);
628  M0_POST(ret != NULL);
629  return ret;
630 }
631 
638 {
640  kunmap(kb->kb_cb_loc.kl_page);
641 }
642 
652  struct nlx_kcore_buffer_event *kbe)
653 {
654  char *ptr;
655  struct nlx_core_kmem_loc *loc;
656  struct nlx_core_buffer_event *ret;
657 
659  loc = &kbe->kbe_bev_loc;
660  ptr = kmap(loc->kl_page);
661  ret = (struct nlx_core_buffer_event *) (ptr + loc->kl_offset);
662  M0_POST(ret != NULL);
663  return ret;
664 }
665 
672 {
674  kunmap(kbe->kbe_bev_loc.kl_page);
675 }
676 
686  struct nlx_kcore_transfer_mc *ktm)
687 {
688  char *ptr;
689  struct nlx_core_kmem_loc *loc;
690  struct nlx_core_transfer_mc *ret;
691 
693  loc = &ktm->ktm_ctm_loc;
694  ptr = kmap(loc->kl_page);
695  ret = (struct nlx_core_transfer_mc *) (ptr + loc->kl_offset);
696  M0_POST(ret != NULL);
697  return ret;
698 }
699 
706 {
708  kunmap(ktm->ktm_ctm_loc.kl_page);
709 }
710 
721 static struct nlx_core_transfer_mc *
723 {
724  char *ptr;
725  struct nlx_core_kmem_loc *loc;
726  struct nlx_core_transfer_mc *ret;
727 
729  loc = &ktm->ktm_ctm_loc;
730 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
731  ptr = kmap_atomic(loc->kl_page);
732 #else
733  ptr = kmap_atomic(loc->kl_page, KM_USER0);
734 #endif
735  ret = (struct nlx_core_transfer_mc *) (ptr + loc->kl_offset);
736  M0_POST(ret != NULL);
737  return ret;
738 }
739 
750 {
752 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
753  kunmap_atomic(ctm);
754 #else
755  kunmap_atomic(ctm, KM_USER0);
756 #endif
757 }
758  /* KLNetCore */
760 
761 /*
762  * Local variables:
763  * c-indentation-style: "K&R"
764  * c-basic-offset: 8
765  * tab-width: 8
766  * fill-column: 79
767  * scroll-step: 1
768  * End:
769  */
static void nlx_kcore_core_domain_unmap(struct nlx_kcore_domain *kd)
Definition: klnet_utils.c:603
uint64_t id
Definition: cob.h:2380
static void nlx_kcore_hdr_data_decode(uint64_t hdr_data, uint32_t *portal, uint32_t *tmid)
Definition: klnet_utils.c:229
struct nlx_core_kmem_loc kd_cd_loc
Definition: klnet_core.h:95
static void nlx_kcore_kiov_restore_length(struct nlx_kcore_buffer *kcb)
Definition: klnet_utils.c:352
static void ptr(struct m0_addb2__context *ctx, const uint64_t *v, char *buf)
Definition: dump.c:440
size_t kb_kiov_len
Definition: klnet_core.h:191
static struct m0_addb2_philter p
Definition: consumer.c:40
static uint64_t nlx_kcore_hdr_data_encode(struct nlx_kcore_transfer_mc *kctm)
Definition: klnet_utils.c:212
#define M0_PRE(cond)
static uint64_t nlx_kcore_hdr_data_encode_raw(uint32_t tmid, uint32_t portal)
Definition: klnet_utils.c:200
static struct nlx_core_transfer_mc * nlx_kcore_core_tm_map(struct nlx_kcore_transfer_mc *ktm)
Definition: klnet_utils.c:685
unsigned kb_kiov_orig_len
Definition: klnet_core.h:203
#define NULL
Definition: misc.h:38
#define ergo(a, b)
Definition: misc.h:293
lnet_handle_eq_t ktm_eqh
Definition: klnet_core.h:151
struct nlx_core_ep_addr cb_addr
static size_t nlx_kcore_num_kiov_entries_for_bytes(const lnet_kiov_t *kiov, size_t kiov_len, m0_bcount_t bytes, unsigned *last_len)
Definition: klnet_vec.c:302
struct page * kl_page
#define M0_LOG(level,...)
Definition: trace.h:167
static void nlx_kcore_umd_init(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, int threshold, int max_size, unsigned options, bool isLNetGetOp, lnet_md_t *umd)
Definition: klnet_utils.c:255
static bool nlx_kcore_tm_invariant(const struct nlx_kcore_transfer_mc *kctm)
Definition: klnet_core.c:929
static bool nlx_core_buffer_invariant(const struct nlx_core_buffer *lcb)
Definition: lnet_core.c:149
#define NLXDBG(ptr, dbg, stmt)
Definition: lnet_main.c:877
struct m0hsm_options options
Definition: m0hsm_api.c:56
#define M0_CASSERT(cond)
static struct m0_mdstore md
Definition: sd_common.c:42
static void nlx_kcore_core_tm_unmap_atomic(struct nlx_core_transfer_mc *ctm)
Definition: klnet_utils.c:749
unsigned kb_ooo_mlength
Definition: klnet_core.h:214
static struct nlx_core_domain * nlx_kcore_core_domain_map(struct nlx_kcore_domain *kd)
Definition: klnet_utils.c:582
uint64_t m0_bcount_t
Definition: types.h:77
lnet_handle_md_t kb_mdh
Definition: klnet_core.h:206
#define M0_SET0(obj)
Definition: misc.h:64
static struct nlx_core_buffer * nlx_kcore_core_buffer_map(struct nlx_kcore_buffer *kb)
Definition: klnet_utils.c:617
size_t kb_kiov_adj_idx
Definition: klnet_core.h:197
#define NLXP(fmt,...)
Definition: lnet_main.c:876
#define PRIx64
Definition: types.h:61
static int nlx_kcore_LNetMDAttach(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, lnet_md_t *umd)
Definition: klnet_utils.c:377
return M0_RC(rc)
int i
Definition: dir.c:1033
const char * name
Definition: trace.c:110
#define LNetHandleIsInvalid(h)
Definition: klnet_core.h:247
#define M0_ASSERT(cond)
m0_time_t m0_time_now(void)
Definition: time.c:134
static bool nlx_kcore_buffer_event_invariant(const struct nlx_kcore_buffer_event *kbe)
Definition: klnet_core.c:918
unsigned kb_ooo_offset
Definition: klnet_core.h:224
static void nlx_kcore_core_bev_unmap(struct nlx_kcore_buffer_event *kbe)
Definition: klnet_utils.c:671
#define M0_POST(cond)
static int nlx_kcore_LNetGet(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, lnet_md_t *umd)
Definition: klnet_utils.c:523
struct nlx_core_ep_addr ktm_addr
Definition: klnet_core.h:139
lnet_kiov_t * kb_kiov
Definition: klnet_core.h:188
static void nlx_kcore_kiov_adjust_length(struct nlx_kcore_transfer_mc *ktm, struct nlx_kcore_buffer *kcb, lnet_md_t *umd, m0_bcount_t bytes)
Definition: klnet_utils.c:314
struct nlx_kcore_transfer_mc * kb_ktm
Definition: klnet_core.h:166
static void nlx_kcore_core_tm_unmap(struct nlx_kcore_transfer_mc *ktm)
Definition: klnet_utils.c:705
static int nlx_kcore_LNetPut(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, lnet_md_t *umd)
Definition: klnet_utils.c:461
uint64_t cb_match_bits
m0_time_t kb_add_time
Definition: klnet_core.h:185
static struct nlx_core_transfer_mc * nlx_kcore_core_tm_map_atomic(struct nlx_kcore_transfer_mc *ktm)
Definition: klnet_utils.c:722
struct nlx_core_kmem_loc kb_cb_loc
Definition: klnet_core.h:163
#define NLXDBGP(ptr, dbg, fmt,...)
Definition: lnet_main.c:879
static bool nlx_kcore_kiov_invariant(const lnet_kiov_t *k, size_t len)
enum m0_net_queue_type cb_qtype
static bool nlx_core_tm_invariant(const struct nlx_core_transfer_mc *lctm)
Definition: lnet_core.c:120
struct nlx_core_kmem_loc kbe_bev_loc
Definition: klnet_core.h:236
static struct nlx_core_buffer_event * nlx_kcore_core_bev_map(struct nlx_kcore_buffer_event *kbe)
Definition: klnet_utils.c:651
enum m0_net_queue_type kb_qtype
Definition: klnet_core.h:182
static bool nlx_core_kmem_loc_invariant(const struct nlx_core_kmem_loc *loc)
Definition: lnet_pvt.h:81
static bool nlx_core_kmem_loc_is_empty(const struct nlx_core_kmem_loc *loc)
Definition: lnet_pvt.h:91
int num
Definition: bulk_if.c:54
struct nlx_core_kmem_loc ktm_ctm_loc
Definition: klnet_core.h:124
static bool nlx_kcore_buffer_invariant(const struct nlx_kcore_buffer *kcb)
Definition: klnet_core.c:908
static void nlx_kcore_core_buffer_unmap(struct nlx_kcore_buffer *kb)
Definition: klnet_utils.c:637
int32_t rc
Definition: trigger_fop.h:47
static struct m0_addb2_frame_header last
Definition: storage.c:93
static bool nlx_kcore_domain_invariant(const struct nlx_kcore_domain *kd)
Definition: klnet_core.c:897
static int nlx_kcore_LNetMDUnlink(struct nlx_kcore_transfer_mc *kctm, struct nlx_kcore_buffer *kcb)
Definition: klnet_utils.c:433