Motr  M0
klnet_ut.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2012-2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 
23 /*
24  * Kernel specific LNet unit tests.
25  * The tests cases are loaded from the address space agnostic ../lnet_ut.c
26  * file.
27  */
28 
29 #include <linux/version.h> /* LINUX_VERSION_CODE */
30 #include <linux/module.h> /* THIS_MODULE */
31 #include <linux/proc_fs.h>
32 
35 
36 enum {
39 };
40 
41 static struct proc_dir_entry *proc_lnet_ut;
42 
43 static struct m0_mutex ktest_mutex;
44 static struct m0_cond ktest_cond;
45 static struct m0_semaphore ktest_sem;
46 static int ktest_id;
47 static bool ktest_user_failed;
48 static bool ktest_done;
49 
50 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
51 static ssize_t read_lnet_ut(struct file *file, char __user *buffer, size_t len,
52  loff_t *offset)
53 {
54  char code;
55  int rc;
57 
59 
62  code = UT_TEST_DONE;
63  else
64  code = ktest_id;
65 
66  /* main thread will wait for user to read 1 DONE value */
67  if (code == UT_TEST_DONE) {
68  ktest_done = true;
70  }
72 
73  rc = copy_to_user(buffer, &code, sizeof code);
74  if (rc != 0)
75  return -EFAULT;
76  return sizeof code;
77 }
78 #else
79 static int read_lnet_ut(char *page, char **start, off_t off,
80  int count, int *eof, void *data)
81 {
84 
85  /* page[PAGE_SIZE] and simpleminded proc file */
88  *page = UT_TEST_DONE;
89  else
90  *page = ktest_id;
91  /* main thread will wait for user to read 1 DONE value */
92  if (*page == UT_TEST_DONE) {
93  ktest_done = true;
94  *eof = 1;
96  }
98  return 1;
99 }
100 #endif
101 
106 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
107 static ssize_t write_lnet_ut(struct file *file, const char __user *buffer,
108  size_t count, loff_t *offset)
109 #else
110 static int write_lnet_ut(struct file *file, const char __user *buffer,
111  unsigned long count, void *data)
112 #endif
113 {
114  char buf[UT_PROC_WRITE_SIZE];
116 
117  if (count >= UT_PROC_WRITE_SIZE) {
118  printk("%s: writing wrong size %ld to proc file, max %d\n",
119  __func__, count, UT_PROC_WRITE_SIZE - 1);
120  return -EINVAL;
121  }
122  if (copy_from_user(buf, buffer, count))
123  return -EFAULT;
125  switch (*buf) {
126  case UT_USER_READY:
127  if (ktest_id != UT_TEST_NONE) {
128  ktest_user_failed = true;
129  count = -EINVAL;
130  } else
132  break;
133  case UT_USER_SUCCESS:
134  /* test passed */
135  if (ktest_id == UT_TEST_NONE) {
136  ktest_user_failed = true;
137  count = -EINVAL;
138  } else if (ktest_id == UT_TEST_MAX)
140  else
141  ++ktest_id;
142  break;
143  case UT_USER_FAIL:
144  /* test failed */
145  if (ktest_id == UT_TEST_NONE)
146  count = -EINVAL;
147  ktest_user_failed = true;
148  break;
149  default:
150  printk("%s: unknown user test state: %02x\n", __func__, *buf);
151  count = -EINVAL;
152  }
155  return count;
156 }
157 
158 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
159 static int open_lnet_ut(struct inode *inode, struct file *file)
160 {
161  return 0;
162 }
163 
164 static int close_lnet_ut(struct inode *inode, struct file *file)
165 {
166  return 0;
167 }
168 
169 static struct file_operations proc_lnet_fops = {
170  .owner = THIS_MODULE,
171  .open = open_lnet_ut,
172  .release = close_lnet_ut,
173  .read = read_lnet_ut,
174  .write = write_lnet_ut,
175  .llseek = default_llseek,
176 };
177 #endif
178 
179 static int ktest_lnet_init(void)
180 {
181 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
182  proc_lnet_ut = proc_create(UT_PROC_NAME, 0644, NULL, &proc_lnet_fops);
183 #else
184  proc_lnet_ut = create_proc_entry(UT_PROC_NAME, 0644, NULL);
185 #endif
186  if (proc_lnet_ut == NULL)
187  return -ENOENT;
188 
193  ktest_user_failed = false;
194 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
195  proc_lnet_ut->read_proc = read_lnet_ut;
196  proc_lnet_ut->write_proc = write_lnet_ut;
197 #endif
198  return 0;
199 }
200 
201 static void ktest_lnet_fini(void)
202 {
204  remove_proc_entry(UT_PROC_NAME, NULL);
208  proc_lnet_ut = NULL;
209 }
210 
211 static bool ut_bufvec_alloc(struct m0_bufvec *bv, size_t n)
212 {
214  M0_ALLOC_ARR(bv->ov_buf, n);
215  if (bv->ov_vec.v_count == 0 || bv->ov_buf == NULL) {
216  m0_free(bv->ov_vec.v_count);
217  return false;
218  }
219  bv->ov_vec.v_nr = n;
220  return true;
221 }
222 
223 #define UT_BUFVEC_ALLOC(v,n) \
224 if (!ut_bufvec_alloc(&v,n)) { \
225  M0_IMPOSSIBLE("no memory");\
226  return; \
227 }
228 
229 #define UT_BUFVEC_FREE(v) \
230  m0_free(v.ov_vec.v_count); \
231  m0_free(v.ov_buf)
232 
233 static void ktest_buf_shape(void)
234 {
235  struct m0_net_domain dom1;
236  struct m0_bufvec bv1;
237  void *base;
238  unsigned num_pages;
239 
240  M0_SET0(&dom1);
242 
243  /* buffer shape APIs */
245  == LNET_MAX_PAYLOAD);
247  == PAGE_SIZE);
249  == LNET_MAX_IOV);
250 
251  /* test the segment page count computation */
252  UT_BUFVEC_ALLOC(bv1, 1);
253  base = (void *)((uint64_t)&base & PAGE_MASK);/* arbitrary, pg aligned */
254 
255 #define EXP_SEG_COUNT(ptr,segsize,expcount) \
256  bv1.ov_buf[0] = (ptr); \
257  bv1.ov_vec.v_count[0] = (segsize); \
258  num_pages = bufvec_seg_page_count(&bv1, 0); \
259  M0_UT_ASSERT(num_pages == (expcount))
260 
261  EXP_SEG_COUNT(base, PAGE_SIZE, 1);/* pg aligned, 1 pg */
262  EXP_SEG_COUNT(base, PAGE_SIZE+1, 2);/* pg aligned,>1 pg */
263  EXP_SEG_COUNT(base, PAGE_SIZE-1, 1);/* pg aligned,<1 pg */
264  EXP_SEG_COUNT(base, 2*PAGE_SIZE, 2);/* pg aligned, 2 pg */
265  EXP_SEG_COUNT(base+PAGE_SIZE/2, 2*PAGE_SIZE, 3);/* mid-pg, 2 pg */
266  EXP_SEG_COUNT(base+PAGE_SIZE/2, PAGE_SIZE, 2);/* mid-pg, 1 pg */
267  EXP_SEG_COUNT(base+PAGE_SIZE/2, PAGE_SIZE/2+1, 2);/* mid-pg, >0.5 pg */
268  EXP_SEG_COUNT(base+PAGE_SIZE/2, PAGE_SIZE/2, 1);/* mid-pg, 0.5 pg */
269  EXP_SEG_COUNT(base+PAGE_SIZE/2, PAGE_SIZE/2-1, 1);/* mid-pg, <0.5 pg */
270 
271 #undef EXP_SEG_COUNT
272 
273  /* fini */
274  UT_BUFVEC_FREE(bv1);
275  m0_net_domain_fini(&dom1);
276 }
277 
278 static void ktest_buf_reg(void)
279 {
280  struct m0_net_domain dom1;
281  struct m0_net_buffer nb1;
282  struct m0_net_buffer nb3;
283  m0_bcount_t bsize;
284  m0_bcount_t bsegsize;
285  int32_t bsegs;
286  struct nlx_xo_buffer *xb;
287  struct nlx_core_buffer *cb;
288  struct nlx_kcore_buffer *kcb1;
289  int i;
290  struct m0_bufvec *v1;
291  struct m0_bufvec *v3;
292  m0_bcount_t thunk;
293  struct nlx_core_domain *cd;
294  struct nlx_xo_domain *dp;
295  struct nlx_kcore_domain *kd;
296 
297  M0_SET0(&dom1);
298  M0_SET0(&nb1);
299  M0_SET0(&nb3);
300 
302 
303  /* TEST
304  Register a network buffer of maximal size and perfectly aligned on
305  page boundaries.
306  */
307  bsize = LNET_MAX_PAYLOAD;
308  bsegsize = PAGE_SIZE;
309  bsegs = LNET_MAX_IOV;
310  /* Allocate a bufvec into the buffer. */
311  M0_UT_ASSERT(m0_bufvec_alloc(&nb1.nb_buffer, bsegs, bsegsize) == 0);
312  M0_UT_ASSERT(m0_vec_count(&nb1.nb_buffer.ov_vec) == bsize);
313 
314  /* register the buffer */
315  nb1.nb_flags = 0;
316  M0_UT_ASSERT(!m0_net_buffer_register(&nb1, &dom1));
318 
319  /* check the kcore data structure */
320  xb = nb1.nb_xprt_private;
321  cb = &xb->xb_core;
323  kcb1 = cb->cb_kpvt;
325  M0_UT_ASSERT(LNetMDHandleIsInvalid(kcb1->kb_mdh));
326  M0_UT_ASSERT(kcb1->kb_kiov != NULL);
327  M0_UT_ASSERT(kcb1->kb_kiov_len == bsegs);
328  for (i = 0; i < kcb1->kb_kiov_len; ++i) {
329  void *addr;
330  M0_UT_ASSERT(kcb1->kb_kiov[i].kiov_len == bsegsize);
331  M0_UT_ASSERT(kcb1->kb_kiov[i].kiov_offset == 0);
332  addr = page_address(kcb1->kb_kiov[i].kiov_page);
334  }
335  dp = dom1.nd_xprt_private;
336  cd = &dp->xd_core;
337  kd = cd->cd_kpvt;
339  v1 = &nb1.nb_buffer;
340  /* TEST
341  Provide a buffer whose m0_bufvec shape is legal, but whose kiov will
342  exceed the internal limits.
343  Use the same allocated memory segments from the first network buffer.
344  */
345  UT_BUFVEC_ALLOC(nb3.nb_buffer, bsegs);
346  v3 = &nb3.nb_buffer;
347  thunk = PAGE_SIZE;
348  for (i = 0; i < v3->ov_vec.v_nr; ++i) {
349  /* each segment spans 2 pages */
350  v3->ov_vec.v_count[i] = thunk;
351  v3->ov_buf[i] = v1->ov_buf[i] + PAGE_SIZE - 1;
352  }
353 
354  /* register the buffer */
355  nb3.nb_flags = 0;
356  i = m0_net_buffer_register(&nb3, &dom1);
357  M0_UT_ASSERT(i == -EFBIG);
358 
359  /* fini */
360  m0_net_buffer_deregister(&nb1, &dom1);
363 
364 
365  UT_BUFVEC_FREE(nb3.nb_buffer); /* just vector, not segs */
367  m0_net_domain_fini(&dom1);
368 }
369 
370 static void ktest_core_ep_addr(void)
371 {
372  struct nlx_xo_domain dom;
373  struct nlx_core_ep_addr tmaddr;
374  const char *epstr[] = {
375  "127.0.0.1@tcp:12345:30:10",
376  "127.0.0.1@tcp:12345:30:*",
377  "4.4.4.4@tcp:42:29:28"
378  };
379  const char *failepstr[] = {
380  "notip@tcp:12345:30:10",
381  "notnid:12345:30:10",
382  "127.0.0.1@tcp:notpid:30:10",
383  "127.0.0.1@tcp:12:notportal:10",
384  "127.0.0.1@tcp:12:30:nottm",
385  "127.0.0.1@tcp:12:30:-10", /* positive required */
386  "127.0.0.1@tcp:12:30:4096", /* in range */
387  };
388  const struct nlx_core_ep_addr ep_addr[] = {
389  {
390  .cepa_pid = 12345,
391  .cepa_portal = 30,
392  .cepa_tmid = 10,
393  },
394  {
395  .cepa_pid = 12345,
396  .cepa_portal = 30,
397  .cepa_tmid = M0_NET_LNET_TMID_INVALID,
398  },
399  {
400  .cepa_pid = 42,
401  .cepa_portal = 29,
402  .cepa_tmid = 28,
403  },
404  };
406  char **nidstrs;
407  int rc;
408  int i;
409 
410  M0_UT_ASSERT(!nlx_core_nidstrs_get(&dom.xd_core, &nidstrs));
411  M0_UT_ASSERT(nidstrs != NULL);
412  for (i = 0; nidstrs[i] != NULL; ++i) {
413  char *network;
414  network = strchr(nidstrs[i], '@');
415  if (network != NULL && strcmp(network, "@tcp") == 0)
416  break;
417  }
418 
419  /* whether we're using tcp network */
420  if (nidstrs[i] != NULL) {
422  for (i = 0; i < ARRAY_SIZE(epstr); ++i) {
423  rc = nlx_core_ep_addr_decode(&dom.xd_core, epstr[i],
424  &tmaddr);
425  M0_UT_ASSERT(rc == 0);
426  M0_UT_ASSERT(ep_addr[i].cepa_pid == tmaddr.cepa_pid);
428  tmaddr.cepa_portal);
430  nlx_core_ep_addr_encode(&dom.xd_core, &tmaddr, buf);
431  M0_UT_ASSERT(strcmp(buf, epstr[i]) == 0);
432  }
433  }
434  nlx_core_nidstrs_put(&dom.xd_core, &nidstrs);
435  M0_UT_ASSERT(nidstrs == NULL);
436 
437  for (i = 0; i < ARRAY_SIZE(failepstr); ++i) {
438  rc = nlx_core_ep_addr_decode(&dom.xd_core, failepstr[i],
439  &tmaddr);
440  M0_UT_ASSERT(rc == -EINVAL);
441  }
442 }
443 
444 static void ktest_enc_dec(void)
445 {
446  uint32_t tmid;
447  uint32_t portal;
448  struct nlx_core_transfer_mc ctm;
449  struct nlx_kcore_transfer_mc ktm = {
451  };
452 
453  /* TEST
454  Check that hdr data decode reverses encode.
455  */
456  nlx_core_kmem_loc_set(&ktm.ktm_ctm_loc, virt_to_page(&ctm),
457  NLX_PAGE_OFFSET((unsigned long) &ctm));
458  M0_UT_ASSERT(nlx_kcore_tm_invariant(&ktm)); /* to make this pass */
459 
460 #define TEST_HDR_DATA_ENCODE(_p, _t) \
461  ktm.ktm_addr.cepa_tmid = (_t); \
462  ktm.ktm_addr.cepa_portal = (_p); \
463  nlx_kcore_hdr_data_decode(nlx_kcore_hdr_data_encode(&ktm), \
464  &portal, &tmid); \
465  M0_UT_ASSERT(portal == (_p)); \
466  M0_UT_ASSERT(tmid == (_t))
467 
468  TEST_HDR_DATA_ENCODE(0, 0);
469  TEST_HDR_DATA_ENCODE(30, 0);
471  TEST_HDR_DATA_ENCODE(63, 0);
473 
474 #undef TEST_HDR_DATA_ENCODE
475 }
476 
477 /* ktest_msg */
478 enum {
480 };
483  struct nlx_core_buffer *lcbuf,
484  struct nlx_kcore_buffer *kcb,
485  lnet_md_t *umd)
486 {
487  uint32_t tmid;
488  uint64_t counter;
489 
491  NLXDBG(kctm, 1, printk("intercepted LNetMDAttach\n"));
492  NLXDBG(kctm, 1, nlx_kprint_lnet_md("ktest_msg", umd));
493 
494  M0_UT_ASSERT(umd->options & LNET_MD_KIOV);
495  M0_UT_ASSERT(umd->start == kcb->kb_kiov);
496  M0_UT_ASSERT(umd->length == kcb->kb_kiov_len);
497 
498  M0_UT_ASSERT(umd->threshold == UT_KMSG_OPS);
499 
500  M0_UT_ASSERT(umd->options & LNET_MD_MAX_SIZE);
501  M0_UT_ASSERT(umd->max_size == UT_MSG_SIZE);
502 
503  M0_UT_ASSERT(umd->options & LNET_MD_OP_PUT);
504  M0_UT_ASSERT(umd->user_ptr == kcb);
505  M0_UT_ASSERT(LNetHandleIsEqual(umd->eq_handle, kctm->ktm_eqh));
506 
508  M0_UT_ASSERT(tmid == kctm->ktm_addr.cepa_tmid);
509  M0_UT_ASSERT(counter == 0);
510 
511  kcb->kb_ktm = kctm;
512 
513  return 0;
514 }
515 
519  struct nlx_core_buffer *lcbuf,
520  struct nlx_kcore_buffer *kcb,
521  lnet_md_t *umd)
522 {
523  struct nlx_core_ep_addr *cepa;
524  size_t len;
525  unsigned last;
526 
528  NLXDBG(kctm, 1, printk("intercepted LNetPut\n"));
529  NLXDBG(kctm, 1, nlx_kprint_lnet_md("ktest_msg", umd));
530 
531  M0_UT_ASSERT((lnet_kiov_t *) umd->start == kcb->kb_kiov);
533  kcb->kb_kiov_len,
534  lcbuf->cb_length,
535  &last);
536  M0_UT_ASSERT(umd->length == len);
537  M0_UT_ASSERT(umd->options & LNET_MD_KIOV);
538  M0_UT_ASSERT(umd->threshold == 1);
539  M0_UT_ASSERT(umd->user_ptr == kcb);
540  M0_UT_ASSERT(umd->max_size == 0);
541  M0_UT_ASSERT(!(umd->options & (LNET_MD_OP_PUT | LNET_MD_OP_GET)));
542  M0_UT_ASSERT(LNetHandleIsEqual(umd->eq_handle, kctm->ktm_eqh));
543 
546  M0_UT_ASSERT(nlx_core_ep_eq(cepa, &lcbuf->cb_addr));
547 
548  kcb->kb_ktm = kctm;
549 
550  return 0;
551 }
552 
556  struct nlx_core_transfer_mc *lctm,
558 {
560  struct m0_clink cl;
563  m0_clink_init(&cl, NULL);
566  m0_clink_del_lock(&cl);
567  return -ETIMEDOUT;
568  }
569  return nlx_core_buf_event_wait(lcdom, lctm, timeout);
570 }
571 
573 static int ut_ktest_msg_ep_create(struct m0_net_end_point **epp,
574  struct m0_net_transfer_mc *tm,
575  const struct nlx_core_ep_addr *cepa)
576 {
579  return -ENOMEM;
580  }
581  return nlx_ep_create(epp, tm, cepa);
582 }
583 
584 static void ut_ktest_msg_put_event(struct nlx_kcore_buffer *kcb,
585  unsigned mlength,
586  unsigned offset,
587  int status,
588  int unlinked,
589  struct nlx_core_ep_addr *addr)
590 {
591  lnet_event_t ev;
592 
593  M0_SET0(&ev);
594  ev.md.user_ptr = kcb;
595  ev.type = LNET_EVENT_PUT;
596  ev.mlength = mlength;
597  ev.rlength = mlength;
598  ev.offset = offset;
599  ev.status = status;
600  ev.unlinked = unlinked;
601  ev.initiator.nid = addr->cepa_nid;
602  ev.initiator.pid = addr->cepa_pid;
603  ev.hdr_data = nlx_kcore_hdr_data_encode_raw(addr->cepa_tmid,
604  addr->cepa_portal);
605  nlx_kcore_eq_cb(&ev);
606 }
607 
609  unsigned mlength,
610  int status)
611 {
612  lnet_event_t ev;
613 
614  M0_SET0(&ev);
615  ev.md.user_ptr = kcb;
616  ev.type = LNET_EVENT_SEND;
617  ev.mlength = mlength;
618  ev.rlength = mlength;
619  ev.offset = 0;
620  ev.status = status;
621  ev.unlinked = 1;
622  ev.hdr_data = 0;
623  nlx_kcore_eq_cb(&ev);
624 }
625 
626 /* Scatter ACK events in all the test suites. They should be ignored. */
627 static void ut_ktest_ack_event(struct nlx_kcore_buffer *kcb)
628 {
629  lnet_event_t ev;
630 
631  M0_SET0(&ev);
632  ev.type = LNET_EVENT_ACK;
633  ev.unlinked = 1;
634  nlx_kcore_eq_cb(&ev);
635 }
636 
637 
638 /* memory duplicate only; no ref count increment */
639 static lnet_kiov_t *ut_ktest_kiov_mem_dup(const lnet_kiov_t *kiov, size_t len)
640 {
641  lnet_kiov_t *k;
642  size_t i;
643 
644  M0_ALLOC_ARR(k, len);
645  M0_UT_ASSERT(k != NULL);
646  if (k == NULL)
647  return NULL;
648  for (i = 0; i < len; ++i, ++kiov) {
649  k[i] = *kiov;
650  }
651  return k;
652 }
653 
654 /* inverse of mem_dup */
655 static void ut_ktest_kiov_mem_free(lnet_kiov_t *kiov)
656 {
657  m0_free(kiov);
658 }
659 
660 static bool ut_ktest_kiov_eq(const lnet_kiov_t *k1,
661  const lnet_kiov_t *k2,
662  size_t len)
663 {
664  int i;
665 
666  for (i = 0; i < len; ++i, ++k1, ++k2)
667  if (k1->kiov_page != k2->kiov_page ||
668  k1->kiov_len != k2->kiov_len ||
669  k1->kiov_offset != k2->kiov_offset)
670  return false;
671  return true;
672 }
673 
674 static unsigned ut_ktest_kiov_count(const lnet_kiov_t *k, size_t len)
675 {
676  unsigned count;
677  size_t i;
678 
679  for (i = 0, count = 0; i < len; ++i, ++k) {
680  count += k->kiov_len;
681  }
682  return count;
683 }
684 
685 static void ktest_msg_body(struct ut_data *td)
686 {
687  struct m0_net_buffer *nb1 = &td->bufs1[0];
688  struct nlx_xo_transfer_mc *tp1 = TM1->ntm_xprt_private;
689  struct nlx_core_transfer_mc *lctm1 = &tp1->xtm_core;
690  struct nlx_xo_buffer *bp1 = nb1->nb_xprt_private;
691  struct nlx_core_buffer *lcbuf1 = &bp1->xb_core;
692  struct nlx_kcore_transfer_mc *kctm1 = lctm1->ctm_kpvt;
693  struct nlx_kcore_buffer *kcb1 = lcbuf1->cb_kpvt;
694  struct nlx_core_ep_addr *cepa;
695  struct nlx_core_ep_addr addr;
696  lnet_md_t umd;
697  lnet_kiov_t *kdup;
698  int needed;
699  unsigned len;
700  unsigned offset;
701  unsigned bevs_left;
702  unsigned count;
703  unsigned last;
704 
705  /* TEST
706  Check that the lnet_md_t is properly constructed from a registered
707  network buffer.
708  */
709  NLXDBGPnl(td,1,"TEST: net buffer to MD\n");
710 
712  nb1->nb_max_receive_msgs = 1;
714 
715  nlx_kcore_umd_init(kctm1, lcbuf1, kcb1, 1, 1, 0, false, &umd);
716  M0_UT_ASSERT(umd.start == kcb1->kb_kiov);
717  M0_UT_ASSERT(umd.length == kcb1->kb_kiov_len);
718  M0_UT_ASSERT(umd.options & LNET_MD_KIOV);
719  M0_UT_ASSERT(umd.user_ptr == kcb1);
720  M0_UT_ASSERT(LNetHandleIsEqual(umd.eq_handle, kctm1->ktm_eqh));
721 
722  /* TEST
723  Check that the count of the number of kiov elements required
724  for different byte sizes.
725  */
726  NLXDBGPnl(td,1,"TEST: kiov size arithmetic\n");
727 
728 #define KEB(b) \
729  nlx_kcore_num_kiov_entries_for_bytes((lnet_kiov_t *) umd.start, \
730  umd.length, (b), &last)
731 
732  M0_UT_ASSERT(KEB(td->buf_size1) == umd.length);
734 
735  M0_UT_ASSERT(KEB(1) == 1);
736  M0_UT_ASSERT(last == 1);
737 
738  M0_UT_ASSERT(KEB(PAGE_SIZE - 1) == 1);
739  M0_UT_ASSERT(last == PAGE_SIZE - 1);
740 
741  M0_UT_ASSERT(KEB(PAGE_SIZE) == 1);
743 
744  M0_UT_ASSERT(KEB(PAGE_SIZE + 1) == 2);
745  M0_UT_ASSERT(last == 1);
746 
749 
750 #undef KEB
751 
752  /* TEST
753  Ensure that the kiov adjustment and restoration logic works
754  correctly.
755  */
756  NLXDBGPnl(td,1,"TEST: kiov adjustments for length\n");
757 
758  kdup = ut_ktest_kiov_mem_dup(kcb1->kb_kiov, kcb1->kb_kiov_len);
759  if (kdup == NULL)
760  goto done;
761  M0_UT_ASSERT(ut_ktest_kiov_eq(kcb1->kb_kiov, kdup, kcb1->kb_kiov_len));
762 
763  /* init the UMD that will be adjusted */
764  nlx_kcore_umd_init(kctm1, lcbuf1, kcb1, 1, 0, 0, false, &umd);
765  M0_UT_ASSERT(kcb1->kb_kiov == umd.start);
766  M0_UT_ASSERT(ut_ktest_kiov_count(umd.start,umd.length)
767  == td->buf_size1);
768  M0_UT_ASSERT(UT_MSG_SIZE < td->buf_size1);
769 
770  /* Adjust for message size. This should not modify the kiov data. */
771  {
772  size_t size;
773  lnet_kiov_t *k = kcb1->kb_kiov;
774  size = kcb1->kb_kiov_len;
775  nlx_kcore_kiov_adjust_length(kctm1, kcb1, &umd, UT_MSG_SIZE);
776  M0_UT_ASSERT(kcb1->kb_kiov == k);
777  M0_UT_ASSERT(kcb1->kb_kiov_len == size);
778  }
779 
780  /* validate adjustments */
781  M0_UT_ASSERT(ut_ktest_kiov_count(umd.start, umd.length) == UT_MSG_SIZE);
782  M0_UT_ASSERT(umd.length == (UT_MSG_SIZE / PAGE_SIZE + 1));
783  M0_UT_ASSERT(kcb1->kb_kiov_len != umd.length);
784  M0_UT_ASSERT(kcb1->kb_kiov_adj_idx == umd.length - 1);
785  M0_UT_ASSERT(!ut_ktest_kiov_eq(kcb1->kb_kiov, kdup, kcb1->kb_kiov_len));
786  M0_UT_ASSERT(kcb1->kb_kiov[umd.length - 1].kiov_len !=
787  kdup[umd.length - 1].kiov_len);
788  M0_UT_ASSERT(kcb1->kb_kiov_orig_len == kdup[umd.length - 1].kiov_len);
789 
790  /* validate restoration */
792  M0_UT_ASSERT(ut_ktest_kiov_eq(kcb1->kb_kiov, kdup, kcb1->kb_kiov_len));
794  == td->buf_size1);
795 
797 
798  /* TEST
799  Enqueue a buffer for message reception.
800  Check that buf_msg_recv sends the correct arguments to LNet.
801  Check that the needed count is correctly incremented.
802  Intercept the utils sub to validate.
803  */
804  NLXDBGPnl(td,1,"TEST: receive queue logic\n");
805 
807 
811  needed = lctm1->ctm_bev_needed;
812  bevs_left = nb1->nb_max_receive_msgs;
813 
815  zUT(m0_net_buffer_add(nb1, TM1));
817  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed + UT_KMSG_OPS);
819  M0_UT_ASSERT(m0_nep_tlist_length(&TM1->ntm_end_points) == 1);
820  count = bev_cqueue_size(&lctm1->ctm_bevq);
821  M0_UT_ASSERT(count >= lctm1->ctm_bev_needed);
822 
823  /* TEST
824  Send a sequence of successful events.
825  The buffer should not get dequeued until the last event.
826  The length and offset reported should be as sent.
827  The reference count on end points is as it should be.
828  */
829  NLXDBGPnl(td,1,"TEST: bevq delivery, single\n");
830 
831  ut_cbreset();
832  cb_save_ep1 = true;
833  cepa = nlx_ep_to_core(TM1->ntm_ep);
834  addr.cepa_nid = cepa->cepa_nid; /* use real NID */
835  addr.cepa_pid = 22; /* arbitrary */
836  M0_UT_ASSERT(cepa->cepa_tmid > 10);
837  addr.cepa_tmid = cepa->cepa_tmid - 10; /* arbitrarily different */
838  addr.cepa_portal = 35; /* arbitrary */
839  offset = 0;
840  len = 1;
841  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
842  M0_UT_ASSERT(bevs_left-- > 0);
843  ut_ktest_ack_event(kcb1); /* bad event */
844  ut_ktest_msg_put_event(kcb1, len, offset, 0, 0, &addr);
845  m0_chan_wait(&td->tmwait1);
847  M0_UT_ASSERT(cb_nb1 == nb1);
849  M0_UT_ASSERT(cb_status1 == 0);
850  M0_UT_ASSERT(cb_length1 == len);
853  cepa = nlx_ep_to_core(cb_ep1);
856  M0_UT_ASSERT(m0_nep_tlist_length(&TM1->ntm_end_points) == 2);
857  /* do not release end point yet */
858  cb_ep1 = NULL;
860 
861  ut_cbreset();
862  cb_save_ep1 = true;
863  offset += len;
864  len = 10; /* arbitrary */
865  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
866  M0_UT_ASSERT(bevs_left-- > 0);
867  ut_ktest_msg_put_event(kcb1, len, offset, 0, 0, &addr);
868  m0_chan_wait(&td->tmwait1);
870  M0_UT_ASSERT(cb_nb1 == nb1);
872  M0_UT_ASSERT(cb_status1 == 0);
873  M0_UT_ASSERT(cb_length1 == len);
876  cepa = nlx_ep_to_core(cb_ep1);
879  M0_UT_ASSERT(m0_nep_tlist_length(&TM1->ntm_end_points) == 2);
882  M0_UT_ASSERT(m0_nep_tlist_length(&TM1->ntm_end_points) == 1);
883  cb_ep1 = NULL;
885 
886  ut_cbreset();
887  cb_save_ep1 = true;
888  M0_UT_ASSERT(addr.cepa_tmid > 12);
889  addr.cepa_tmid -= 12;
890  offset += len;
891  len = 11;
892  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
893  M0_UT_ASSERT(bevs_left-- > 0);
894  ut_ktest_ack_event(kcb1); /* bad event */
895  ut_ktest_msg_put_event(kcb1, len, offset, 0, 1, &addr);
896  m0_chan_wait(&td->tmwait1);
898  M0_UT_ASSERT(cb_nb1 == nb1);
900  M0_UT_ASSERT(cb_status1 == 0);
901  M0_UT_ASSERT(cb_length1 == len);
904  cepa = nlx_ep_to_core(cb_ep1);
907  M0_UT_ASSERT(m0_nep_tlist_length(&TM1->ntm_end_points) == 2);
909  M0_UT_ASSERT(m0_nep_tlist_length(&TM1->ntm_end_points) == 1);
910  cb_ep1 = NULL;
911 
913  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed);
914  M0_UT_ASSERT(bev_cqueue_size(&lctm1->ctm_bevq) == count); /* !freed */
915 
916  /* TEST
917  Send a sequence of successful events.
918  Arrange that they build up in the circular queue, so
919  that the "deliver_all" will have multiple events
920  to process.
921  */
922  NLXDBGPnl(td,1,"TEST: bevq delivery, batched\n");
923 
924  /* enqueue buffer */
928  needed = lctm1->ctm_bev_needed;
929  bevs_left = nb1->nb_max_receive_msgs;
930  zUT(m0_net_buffer_add(nb1, TM1));
932  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed + UT_KMSG_OPS);
934 
935  /* stall event delivery */
936  ut_cbreset();
937  M0_UT_ASSERT(cb_called1 == 0);
938  cb_save_ep1 = false;
939  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
940  ut_ktest_msg_buf_event_wait_delay_chan = &TM2->ntm_chan;/* unused */
943  ut_chan_timedwait(&td->tmwait1, 1);/* wait for acknowledgment */
944 
945  /* start pushing put events */
946  count = 0;
947 
948  offset = 0;
949  len = 5;
950  M0_UT_ASSERT(bevs_left-- > 0);
951  ut_ktest_msg_put_event(kcb1, len, offset, 0, 0, &addr);
952  count++;
953  M0_UT_ASSERT(cb_called1 == 0);
954 
955  offset += len;
956  len = 10;
957  M0_UT_ASSERT(bevs_left-- > 0);
958  ut_ktest_ack_event(kcb1); /* bad event */
959  ut_ktest_msg_put_event(kcb1, len, offset, 0, 0, &addr);
960  count++;
961  M0_UT_ASSERT(cb_called1 == 0);
962 
963  offset += len;
964  len = 15;
965  M0_UT_ASSERT(bevs_left-- > 0);
966  ut_ktest_msg_put_event(kcb1, len, offset, 0, 1, &addr);
967  count++;
968  M0_UT_ASSERT(cb_called1 == 0);
969 
970  M0_UT_ASSERT(m0_nep_tlist_length(&TM1->ntm_end_points) == 1);
971 
972  /* open the spigot ... */
975  while (cb_called1 < count) {
976  ut_chan_timedwait(&td->tmwait1,1);
977  }
980 
981  M0_UT_ASSERT(m0_nep_tlist_length(&TM1->ntm_end_points) == 1);
983  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed);
984 
985  /* TEST
986  Enqueue a receive buffer. Send a sequence of events. Arrange for
987  failure in the ep creation subroutine.
988  The buffer should not get dequeued until the last event.
989  The ep failure should not invoke the callback, but the failure
990  statistics should be updated.
991  */
992  NLXDBGPnl(td,1,"TEST: EP failure during message receive\n");
993 
995 
996  /* enqueue buffer */
1000  needed = lctm1->ctm_bev_needed;
1001  bevs_left = nb1->nb_max_receive_msgs;
1002  zUT(m0_net_buffer_add(nb1, TM1));
1004  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed + UT_KMSG_OPS);
1006 
1007  ut_cbreset();
1008  M0_UT_ASSERT(cb_called1 == 0);
1009  cb_save_ep1 = true;
1010 
1011  /* verify normal delivery */
1012  offset = 0;
1013  len = 5;
1014  M0_UT_ASSERT(bevs_left-- > 0);
1015  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1016  ut_ktest_msg_put_event(kcb1, len, offset, 0, 0, &addr);
1017  m0_chan_wait(&td->tmwait1);
1018  m0_clink_del_lock(&td->tmwait1);
1019  M0_UT_ASSERT(cb_nb1 == nb1);
1022  M0_UT_ASSERT(cb_status1 == 0);
1023  M0_UT_ASSERT(cb_length1 == len);
1025  M0_UT_ASSERT(cb_ep1 != NULL);
1026  cepa = nlx_ep_to_core(cb_ep1);
1027  M0_UT_ASSERT(nlx_core_ep_eq(cepa, &addr));
1029  M0_UT_ASSERT(m0_nep_tlist_length(&TM1->ntm_end_points) == 2);
1031  cb_ep1 = NULL;
1032 
1033  /* arrange for ep creation failure */
1035  count = 0;
1036  ut_cbreset();
1037  M0_UT_ASSERT(cb_called1 == 0);
1038  cb_save_ep1 = true;
1039 
1040  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1041 
1042  addr.cepa_portal += 1;
1043  offset += len;
1044  len = 15;
1045  M0_UT_ASSERT(bevs_left-- > 0);
1046  ut_ktest_ack_event(kcb1); /* bad event */
1047  ut_ktest_msg_put_event(kcb1, len, offset, 0, 0, &addr);
1048  count++;
1049 
1050  offset += len;
1051  len = 5;
1052  M0_UT_ASSERT(bevs_left-- > 0);
1053  ut_ktest_msg_put_event(kcb1, len, offset, 0, 1, &addr);
1054  count++;
1055 
1056  m0_chan_wait(&td->tmwait1);
1057  m0_clink_del_lock(&td->tmwait1);
1058  M0_UT_ASSERT(cb_called1 == 1); /* just one callback! */
1059  M0_UT_ASSERT(cb_status1 == -ENOMEM);
1061  == count + 1);
1062  M0_UT_ASSERT(cb_ep1 == NULL); /* no EP */
1064  M0_UT_ASSERT(td->qs.nqs_num_adds == 1);
1065  M0_UT_ASSERT(td->qs.nqs_num_dels == 0);
1066  M0_UT_ASSERT(td->qs.nqs_num_s_events == 1);
1068 
1069  M0_UT_ASSERT(m0_nep_tlist_length(&TM1->ntm_end_points) == 1);
1071  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed);
1073 
1074  /* TEST
1075  Enqueue a buffer for sending.
1076  Ensure that the destination address is correctly conveyed.
1077  Intercept the utils sub to validate.
1078  Ensure that the reference count of the ep is maintained correctly.
1079  Send a success event.
1080  */
1081  NLXDBGPnl(td,1,"TEST: send queue success logic\n");
1082 
1084  ut_cbreset();
1085  M0_UT_ASSERT(cb_called1 == 0);
1086 
1087  { /* create a destination end point */
1088  char epstr[M0_NET_LNET_XEP_ADDR_LEN];
1089  sprintf(epstr, "%s:%d:%d:1024",
1092  TM1, epstr));
1093  }
1094 
1095  nb1->nb_min_receive_size = 0;
1096  nb1->nb_max_receive_msgs = 0;
1098  nb1->nb_length = UT_MSG_SIZE;
1101  needed = lctm1->ctm_bev_needed;
1102  zUT(m0_net_buffer_add(nb1, TM1));
1104  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed + 1);
1106  /* this transport does not pin the ep, but the network layer does */
1110 
1111  /* deliver the completion event */
1112  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1113  ut_ktest_ack_event(kcb1); /* bad event */
1115  m0_chan_wait(&td->tmwait1);
1116  m0_clink_del_lock(&td->tmwait1);
1117  M0_UT_ASSERT(cb_called1 == 1);
1118  M0_UT_ASSERT(cb_nb1 == nb1);
1121  M0_UT_ASSERT(cb_status1 == 0);
1122  M0_UT_ASSERT(cb_ep1 == NULL);
1124  M0_UT_ASSERT(td->qs.nqs_num_adds == 1);
1125  M0_UT_ASSERT(td->qs.nqs_num_dels == 0);
1126  M0_UT_ASSERT(td->qs.nqs_num_s_events == 1);
1127  M0_UT_ASSERT(td->qs.nqs_num_f_events == 0);
1128 
1129  /* TEST
1130  Repeat previous test, but send a failure event.
1131  */
1132  NLXDBGPnl(td,1,"TEST: send queue failure logic\n");
1133 
1135  ut_cbreset();
1136  M0_UT_ASSERT(cb_called1 == 0);
1137 
1138  { /* create a destination end point */
1139  char epstr[M0_NET_LNET_XEP_ADDR_LEN];
1140  sprintf(epstr, "%s:%d:%d:1024",
1143  TM1, epstr));
1144  }
1145 
1146  nb1->nb_min_receive_size = 0;
1147  nb1->nb_max_receive_msgs = 0;
1149  nb1->nb_length = UT_MSG_SIZE;
1152  needed = lctm1->ctm_bev_needed;
1153  zUT(m0_net_buffer_add(nb1, TM1));
1155  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed + 1);
1157  /* this transport does not pin the ep, but the network layer does */
1161 
1162  /* deliver the completion event */
1163  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1165  m0_chan_wait(&td->tmwait1);
1166  m0_clink_del_lock(&td->tmwait1);
1167  M0_UT_ASSERT(cb_called1 == 1);
1168  M0_UT_ASSERT(cb_nb1 == nb1);
1171  M0_UT_ASSERT(cb_status1 == -1);
1172  M0_UT_ASSERT(cb_ep1 == NULL);
1174  M0_UT_ASSERT(td->qs.nqs_num_adds == 1);
1175  M0_UT_ASSERT(td->qs.nqs_num_dels == 0);
1176  M0_UT_ASSERT(td->qs.nqs_num_s_events == 0);
1177  M0_UT_ASSERT(td->qs.nqs_num_f_events == 1);
1178 
1179  done:
1180  cb_ep1 = NULL;
1182 }
1183 
1184 static void ktest_msg(void)
1185 {
1186  ut_save_subs();
1187 
1188  /* intercept these before the TM starts */
1193 
1196 
1198 
1199  ut_restore_subs();
1201 }
1202 
1206  struct nlx_core_buffer *lcbuf,
1207  struct nlx_kcore_buffer *kcb,
1208  lnet_md_t *umd)
1209 {
1210  uint32_t tmid;
1211  uint64_t counter;
1212 
1214  NLXDBG(kctm, 1, printk("intercepted LNetMDAttach (bulk)\n"));
1215  NLXDBG(kctm, 2, nlx_kprint_lnet_md("ktest_bulk", umd));
1216 
1217  M0_UT_ASSERT(umd->options & LNET_MD_KIOV);
1218  M0_UT_ASSERT(umd->start == kcb->kb_kiov);
1219 
1220  M0_UT_ASSERT(umd->threshold == 1);
1221  M0_UT_ASSERT(!(umd->options & LNET_MD_MAX_SIZE));
1222 
1225  if (lcbuf->cb_qtype == M0_NET_QT_PASSIVE_BULK_RECV) {
1226  M0_UT_ASSERT(umd->options & LNET_MD_OP_PUT);
1227  M0_UT_ASSERT(!(umd->options & LNET_MD_OP_GET));
1228  M0_UT_ASSERT(umd->length == kcb->kb_kiov_len);
1229  } else {
1230  size_t len;
1231  unsigned last;
1232  M0_UT_ASSERT(umd->options & LNET_MD_OP_GET);
1233  M0_UT_ASSERT(!(umd->options & LNET_MD_OP_PUT));
1235  kcb->kb_kiov_len,
1236  lcbuf->cb_length,
1237  &last);
1238  M0_UT_ASSERT(umd->length == len);
1239  }
1240 
1241  M0_UT_ASSERT(umd->user_ptr == kcb);
1242  M0_UT_ASSERT(LNetHandleIsEqual(umd->eq_handle, kctm->ktm_eqh));
1243 
1245  M0_UT_ASSERT(tmid == kctm->ktm_addr.cepa_tmid);
1248 
1250  kcb->kb_ktm = kctm;
1251  return 0;
1252  }
1253  return nlx_kcore_LNetMDAttach(kctm, lcbuf, kcb, umd);
1254 }
1255 
1258  struct nlx_core_buffer *lcbuf,
1259  struct nlx_kcore_buffer *kcb,
1260  lnet_md_t *umd)
1261 {
1262  size_t len;
1263  unsigned last;
1264 
1266  NLXDBG(kctm, 1, printk("intercepted LNetGet (bulk)\n"));
1267  NLXDBG(kctm, 2, nlx_kprint_lnet_md("ktest_bulk", umd));
1268 
1269  M0_UT_ASSERT((lnet_kiov_t *) umd->start == kcb->kb_kiov);
1271  kcb->kb_kiov_len,
1272  lcbuf->cb_length,
1273  &last);
1274  M0_UT_ASSERT(umd->length == len);
1275  M0_UT_ASSERT(umd->options & LNET_MD_KIOV);
1276  M0_UT_ASSERT(umd->threshold == 2); /* note */
1277  M0_UT_ASSERT(umd->user_ptr == kcb);
1278  M0_UT_ASSERT(umd->max_size == 0);
1279  M0_UT_ASSERT(!(umd->options & (LNET_MD_OP_PUT | LNET_MD_OP_GET)));
1280  M0_UT_ASSERT(LNetHandleIsEqual(umd->eq_handle, kctm->ktm_eqh));
1281 
1282  kcb->kb_ktm = kctm;
1283 
1284  return 0;
1285 }
1286 
1289  struct nlx_core_buffer *lcbuf,
1290  struct nlx_kcore_buffer *kcb,
1291  lnet_md_t *umd)
1292 {
1293  size_t len;
1294  unsigned last;
1295 
1297  NLXDBG(kctm, 1, printk("intercepted LNetPut (bulk)\n"));
1298  NLXDBG(kctm, 2, nlx_kprint_lnet_md("ktest_bulk", umd));
1299 
1300  M0_UT_ASSERT((lnet_kiov_t *) umd->start == kcb->kb_kiov);
1302  kcb->kb_kiov_len,
1303  lcbuf->cb_length,
1304  &last);
1305  M0_UT_ASSERT(umd->length == len);
1306  M0_UT_ASSERT(umd->options & LNET_MD_KIOV);
1307  M0_UT_ASSERT(umd->threshold == 1);
1308  M0_UT_ASSERT(umd->user_ptr == kcb);
1309  M0_UT_ASSERT(umd->max_size == 0);
1310  M0_UT_ASSERT(!(umd->options & (LNET_MD_OP_PUT | LNET_MD_OP_GET)));
1311  M0_UT_ASSERT(LNetHandleIsEqual(umd->eq_handle, kctm->ktm_eqh));
1312 
1313  kcb->kb_ktm = kctm;
1314 
1315  return 0;
1316 }
1317 
1319  unsigned mlength,
1320  int status)
1321 {
1322  lnet_event_t ev;
1323 
1324  M0_SET0(&ev);
1325  ev.md.user_ptr = kcb;
1326  ev.type = LNET_EVENT_PUT;
1327  ev.mlength = mlength;
1328  ev.rlength = mlength;
1329  ev.status = status;
1330  ev.unlinked = 1;
1331  nlx_kcore_eq_cb(&ev);
1332 }
1333 
1335  unsigned mlength,
1336  int status)
1337 {
1338  lnet_event_t ev;
1339 
1340  M0_SET0(&ev);
1341  ev.md.user_ptr = kcb;
1342  ev.type = LNET_EVENT_GET;
1343  ev.mlength = mlength;
1344  ev.rlength = mlength;
1345  ev.offset = 0;
1346  ev.status = status;
1347  ev.unlinked = 1;
1348  nlx_kcore_eq_cb(&ev);
1349 }
1350 
1352  unsigned mlength,
1353  int status,
1354  int unlinked,
1355  int threshold)
1356 {
1357  lnet_event_t ev;
1358 
1359  M0_SET0(&ev);
1360  ev.md.user_ptr = kcb;
1361  ev.type = LNET_EVENT_SEND;
1362  ev.mlength = mlength;
1363  ev.rlength = mlength;
1364  ev.status = status;
1365  ev.unlinked = unlinked;
1366  ev.md.threshold = threshold;
1367  nlx_kcore_eq_cb(&ev);
1368 }
1369 
1371  unsigned mlength,
1372  int status,
1373  int unlinked,
1374  int threshold)
1375 {
1376  lnet_event_t ev;
1377 
1378  M0_SET0(&ev);
1379  ev.md.user_ptr = kcb;
1380  ev.type = LNET_EVENT_REPLY;
1381  ev.mlength = mlength;
1382  ev.rlength = mlength;
1383  ev.status = status;
1384  ev.unlinked = unlinked;
1385  ev.md.threshold = threshold;
1386  nlx_kcore_eq_cb(&ev);
1387 }
1388 
1390 {
1391  lnet_event_t ev;
1392 
1393  M0_SET0(&ev);
1394  ev.md.user_ptr = kcb;
1395  ev.type = LNET_EVENT_UNLINK;
1396  ev.unlinked = 1;
1397  nlx_kcore_eq_cb(&ev);
1398 }
1399 
1400 static void ktest_bulk_body(struct ut_data *td)
1401 {
1402  struct m0_net_buffer *nb1 = &td->bufs1[0];
1403  struct nlx_xo_transfer_mc *tp1 = TM1->ntm_xprt_private;
1404  struct nlx_core_transfer_mc *lctm1 = &tp1->xtm_core;
1405  struct nlx_xo_buffer *bp1 = nb1->nb_xprt_private;
1406  struct nlx_core_buffer *lcbuf1 = &bp1->xb_core;
1407  struct nlx_kcore_buffer *kcb1 = lcbuf1->cb_kpvt;
1408  int needed;
1409  unsigned bevs_left;
1410  struct m0_net_buf_desc nbd_recv;
1411  struct m0_net_buf_desc nbd_send;
1412 
1413  M0_SET0(&nbd_recv);
1414  M0_SET0(&nbd_send);
1415 
1416  /* sanity check */
1419 
1420  /* TEST
1421  Enqueue a passive receive buffer.
1422  Block the real MDAttach call.
1423  Send the expected LNet events to indicate that the buffer has
1424  been filled.
1425  */
1426  NLXDBGPnl(td, 1, "TEST: passive receive event delivery\n");
1427 
1431 
1433  nb1->nb_length = UT_BULK_SIZE;
1434  nb1->nb_desc.nbd_len = 0;
1435  nb1->nb_desc.nbd_data = NULL;
1436  needed = lctm1->ctm_bev_needed;
1437  bevs_left = 1;
1438  zUT(m0_net_buffer_add(nb1, TM1));
1440  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed + 1);
1442  M0_UT_ASSERT(nb1->nb_desc.nbd_len != 0);
1443  M0_UT_ASSERT(nb1->nb_desc.nbd_data != NULL);
1444 
1445  M0_UT_ASSERT(!m0_net_desc_copy(&nb1->nb_desc, &nbd_recv));
1446 
1447  ut_cbreset();
1448  M0_UT_ASSERT(cb_called1 == 0);
1449 
1450  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1451  M0_UT_ASSERT(bevs_left-- > 0);
1452  ut_ktest_bulk_put_event(kcb1, UT_BULK_SIZE - 1, 0);
1453  m0_chan_wait(&td->tmwait1);
1454  m0_clink_del_lock(&td->tmwait1);
1455  M0_UT_ASSERT(cb_called1 == 1);
1456  M0_UT_ASSERT(cb_nb1 == nb1);
1458  M0_UT_ASSERT(cb_status1 == 0);
1460  M0_UT_ASSERT(cb_offset1 == 0);
1462 
1463  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed);
1464  m0_net_desc_free(&nb1->nb_desc);
1465 
1466  /* TEST
1467  Passive receive cancelled.
1468  */
1469  NLXDBGPnl(td, 1, "TEST: passive receive event delivery (UNLINK)\n");
1470 
1474 
1476  nb1->nb_length = UT_BULK_SIZE;
1477  nb1->nb_desc.nbd_len = 0;
1478  nb1->nb_desc.nbd_data = NULL;
1479  needed = lctm1->ctm_bev_needed;
1480  bevs_left = 1;
1481  zUT(m0_net_buffer_add(nb1, TM1));
1483  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed + 1);
1485  M0_UT_ASSERT(nb1->nb_desc.nbd_len != 0);
1486  M0_UT_ASSERT(nb1->nb_desc.nbd_data != NULL);
1487 
1488  M0_UT_ASSERT(!m0_net_desc_copy(&nb1->nb_desc, &nbd_recv));
1489 
1490  ut_cbreset();
1491  M0_UT_ASSERT(cb_called1 == 0);
1492 
1493  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1494  M0_UT_ASSERT(bevs_left-- > 0);
1495  ut_ktest_ack_event(kcb1); /* bad event */
1497  m0_chan_wait(&td->tmwait1);
1498  m0_clink_del_lock(&td->tmwait1);
1499  M0_UT_ASSERT(cb_called1 == 1);
1500  M0_UT_ASSERT(cb_nb1 == nb1);
1502  M0_UT_ASSERT(cb_status1 == -ECANCELED);
1504 
1505  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed);
1506  m0_net_desc_free(&nb1->nb_desc);
1507 
1508  /* TEST
1509  Enqueue a passive send buffer.
1510  Block the real MDAttach call.
1511  Send the expected LNet events to indicate that the buffer has
1512  been consumed.
1513  */
1514  NLXDBGPnl(td, 1, "TEST: passive send event delivery\n");
1515 
1519 
1521  nb1->nb_length = UT_BULK_SIZE;
1522  nb1->nb_desc.nbd_len = 0;
1523  nb1->nb_desc.nbd_data = NULL;
1524  needed = lctm1->ctm_bev_needed;
1525  bevs_left = 1;
1526  zUT(m0_net_buffer_add(nb1, TM1));
1528  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed + 1);
1530  M0_UT_ASSERT(nb1->nb_desc.nbd_len != 0);
1531  M0_UT_ASSERT(nb1->nb_desc.nbd_data != NULL);
1532 
1533  M0_UT_ASSERT(!m0_net_desc_copy(&nb1->nb_desc, &nbd_send));
1534 
1535  ut_cbreset();
1536  M0_UT_ASSERT(cb_called1 == 0);
1537 
1538  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1539  M0_UT_ASSERT(bevs_left-- > 0);
1541  m0_chan_wait(&td->tmwait1);
1542  m0_clink_del_lock(&td->tmwait1);
1543  M0_UT_ASSERT(cb_called1 == 1);
1544  M0_UT_ASSERT(cb_nb1 == nb1);
1546  M0_UT_ASSERT(cb_status1 == 0);
1547  M0_UT_ASSERT(cb_offset1 == 0);
1549 
1550  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed);
1551  m0_net_desc_free(&nb1->nb_desc);
1552 
1553  /* TEST
1554  Passive send cancelled.
1555  */
1556  NLXDBGPnl(td, 1, "TEST: passive send event delivery (UNLINK)\n");
1557 
1561 
1563  nb1->nb_length = UT_BULK_SIZE;
1564  nb1->nb_desc.nbd_len = 0;
1565  nb1->nb_desc.nbd_data = NULL;
1566  needed = lctm1->ctm_bev_needed;
1567  bevs_left = 1;
1568  zUT(m0_net_buffer_add(nb1, TM1));
1570  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed + 1);
1572  M0_UT_ASSERT(nb1->nb_desc.nbd_len != 0);
1573  M0_UT_ASSERT(nb1->nb_desc.nbd_data != NULL);
1574 
1575  M0_UT_ASSERT(!m0_net_desc_copy(&nb1->nb_desc, &nbd_send));
1576 
1577  ut_cbreset();
1578  M0_UT_ASSERT(cb_called1 == 0);
1579 
1580  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1581  M0_UT_ASSERT(bevs_left-- > 0);
1582  ut_ktest_ack_event(kcb1); /* bad event */
1584  m0_chan_wait(&td->tmwait1);
1585  m0_clink_del_lock(&td->tmwait1);
1586  M0_UT_ASSERT(cb_called1 == 1);
1587  M0_UT_ASSERT(cb_nb1 == nb1);
1589  M0_UT_ASSERT(cb_status1 == -ECANCELED);
1591 
1592  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed);
1593  m0_net_desc_free(&nb1->nb_desc);
1594 
1595  /* TEST
1596  Enqueue an active receive buffer.
1597  Block the real LNetGet call.
1598  Send the expected LNet events to indicate that the buffer has
1599  been filled.
1600  */
1601  NLXDBGPnl(td, 1, "TEST: active receive event delivery (SEND/REPLY)\n");
1602 
1605 
1607  nb1->nb_length = UT_BULK_SIZE;
1608  M0_UT_ASSERT(!m0_net_desc_copy(&nbd_send, &nb1->nb_desc));
1609  needed = lctm1->ctm_bev_needed;
1610  bevs_left = 1;
1611  zUT(m0_net_buffer_add(nb1, TM1));
1613  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed + 1);
1615 
1616  ut_cbreset();
1617  M0_UT_ASSERT(cb_called1 == 0);
1618 
1619  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1620  M0_UT_ASSERT(bevs_left-- > 0);
1621  M0_UT_ASSERT(!kcb1->kb_ooo_reply);
1622  ut_ktest_bulk_send_event(kcb1, UT_BULK_SIZE, 0, 0, 1);
1623  ut_ktest_ack_event(kcb1); /* bad event */
1624  ut_ktest_bulk_reply_event(kcb1, UT_BULK_SIZE, 0, 1, 0);
1625  M0_UT_ASSERT(!kcb1->kb_ooo_reply);
1626  m0_chan_wait(&td->tmwait1);
1627  m0_clink_del_lock(&td->tmwait1);
1628  M0_UT_ASSERT(cb_called1 == 1);
1629  M0_UT_ASSERT(cb_nb1 == nb1);
1631  M0_UT_ASSERT(cb_status1 == 0);
1633  M0_UT_ASSERT(cb_offset1 == 0);
1635 
1636  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed);
1637  m0_net_desc_free(&nb1->nb_desc);
1638 
1639  NLXDBGPnl(td, 1, "TEST: active receive event delivery (REPLY/SEND)\n");
1640 
1643 
1645  nb1->nb_length = UT_BULK_SIZE;
1646  M0_UT_ASSERT(!m0_net_desc_copy(&nbd_send, &nb1->nb_desc));
1647  needed = lctm1->ctm_bev_needed;
1648  bevs_left = 1;
1649  zUT(m0_net_buffer_add(nb1, TM1));
1651  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed + 1);
1653 
1654  ut_cbreset();
1655  M0_UT_ASSERT(cb_called1 == 0);
1656 
1657  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1658  M0_UT_ASSERT(bevs_left-- > 0);
1659  M0_UT_ASSERT(!kcb1->kb_ooo_reply);
1660  ut_ktest_bulk_reply_event(kcb1, UT_BULK_SIZE, 0, 0, 1);
1661  M0_UT_ASSERT(kcb1->kb_ooo_reply);
1662  M0_UT_ASSERT(kcb1->kb_ooo_status == 0);
1664  M0_UT_ASSERT(kcb1->kb_ooo_offset == 0);
1665  ut_ktest_bulk_send_event(kcb1, 0, 0, 1, 0); /* size is wrong */
1666  ut_ktest_ack_event(kcb1); /* bad event */
1667  m0_chan_wait(&td->tmwait1);
1668  m0_clink_del_lock(&td->tmwait1);
1669  M0_UT_ASSERT(cb_called1 == 1);
1670  M0_UT_ASSERT(cb_nb1 == nb1);
1672  M0_UT_ASSERT(cb_status1 == 0);
1673  M0_UT_ASSERT(cb_length1 == UT_BULK_SIZE); /* size must match REPLY */
1674  M0_UT_ASSERT(cb_offset1 == 0);
1676 
1677  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed);
1678  m0_net_desc_free(&nb1->nb_desc);
1679 
1680  /* TEST
1681  Test failure cases:
1682  - SEND failure in SEND/REPLY
1683  - REPLY failure in SEND/REPLY
1684  - REPLY failure in REPLY/SEND
1685  */
1686  NLXDBGPnl(td, 1, "TEST: active receive event delivery "
1687  "(SEND failure [/no REPLY])\n");
1688 
1691 
1693  nb1->nb_length = UT_BULK_SIZE;
1694  M0_UT_ASSERT(!m0_net_desc_copy(&nbd_send, &nb1->nb_desc));
1695  needed = lctm1->ctm_bev_needed;
1696  bevs_left = 1;
1697  zUT(m0_net_buffer_add(nb1, TM1));
1699  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed + 1);
1701 
1702  ut_cbreset();
1703  M0_UT_ASSERT(cb_called1 == 0);
1704 
1705  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1706  M0_UT_ASSERT(bevs_left-- > 0);
1707  M0_UT_ASSERT(!kcb1->kb_ooo_reply);
1708  ut_ktest_bulk_send_event(kcb1, UT_BULK_SIZE, -EIO, 1, 1);
1709  m0_chan_wait(&td->tmwait1);
1710  m0_clink_del_lock(&td->tmwait1);
1711  M0_UT_ASSERT(cb_called1 == 1);
1712  M0_UT_ASSERT(cb_nb1 == nb1);
1714  M0_UT_ASSERT(cb_status1 == -EIO);
1716 
1717  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed);
1718  m0_net_desc_free(&nb1->nb_desc);
1719 
1720  NLXDBGPnl(td, 1, "TEST: active receive event delivery "
1721  "(SEND success/REPLY failure)\n");
1722 
1725 
1727  nb1->nb_length = UT_BULK_SIZE;
1728  M0_UT_ASSERT(!m0_net_desc_copy(&nbd_send, &nb1->nb_desc));
1729  needed = lctm1->ctm_bev_needed;
1730  bevs_left = 1;
1731  zUT(m0_net_buffer_add(nb1, TM1));
1733  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed + 1);
1735 
1736  ut_cbreset();
1737  M0_UT_ASSERT(cb_called1 == 0);
1738 
1739  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1740  M0_UT_ASSERT(bevs_left-- > 0);
1741  M0_UT_ASSERT(!kcb1->kb_ooo_reply);
1742  ut_ktest_bulk_send_event(kcb1, UT_BULK_SIZE, 0, 0, 1);
1743  ut_ktest_ack_event(kcb1); /* bad event */
1744  ut_ktest_bulk_reply_event(kcb1, UT_BULK_SIZE, -EIO, 1, 0);
1745  M0_UT_ASSERT(!kcb1->kb_ooo_reply);
1746  m0_chan_wait(&td->tmwait1);
1747  m0_clink_del_lock(&td->tmwait1);
1748  M0_UT_ASSERT(cb_called1 == 1);
1749  M0_UT_ASSERT(cb_nb1 == nb1);
1751  M0_UT_ASSERT(cb_status1 == -EIO);
1752  M0_UT_ASSERT(cb_offset1 == 0);
1754 
1755  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed);
1756  m0_net_desc_free(&nb1->nb_desc);
1757 
1758  NLXDBGPnl(td, 1, "TEST: active receive event delivery "
1759  "(REPLY failure/SEND success)\n");
1760 
1763 
1765  nb1->nb_length = UT_BULK_SIZE;
1766  M0_UT_ASSERT(!m0_net_desc_copy(&nbd_send, &nb1->nb_desc));
1767  needed = lctm1->ctm_bev_needed;
1768  bevs_left = 1;
1769  zUT(m0_net_buffer_add(nb1, TM1));
1771  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed + 1);
1773 
1774  ut_cbreset();
1775  M0_UT_ASSERT(cb_called1 == 0);
1776 
1777  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1778  M0_UT_ASSERT(bevs_left-- > 0);
1779  M0_UT_ASSERT(!kcb1->kb_ooo_reply);
1780  ut_ktest_bulk_reply_event(kcb1, UT_BULK_SIZE, -EIO, 0, 1);
1781  M0_UT_ASSERT(kcb1->kb_ooo_reply);
1782  M0_UT_ASSERT(kcb1->kb_ooo_status == -EIO);
1784  M0_UT_ASSERT(kcb1->kb_ooo_offset == 0);
1785  ut_ktest_bulk_send_event(kcb1, UT_BULK_SIZE, 0, 1, 0);
1786  m0_chan_wait(&td->tmwait1);
1787  m0_clink_del_lock(&td->tmwait1);
1788  M0_UT_ASSERT(cb_called1 == 1);
1789  M0_UT_ASSERT(cb_nb1 == nb1);
1791  M0_UT_ASSERT(cb_status1 == -EIO);
1792  M0_UT_ASSERT(cb_offset1 == 0);
1794 
1795  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed);
1796  m0_net_desc_free(&nb1->nb_desc);
1797 
1798  /* TEST
1799  Test cancellation cases:
1800  - UNLINK piggy-backed on SEND in a SEND/REPLY sequence.
1801  - UNLINK by itself.
1802  */
1803  NLXDBGPnl(td, 1, "TEST: active receive event delivery "
1804  "(SEND + UNLINK [/no REPLY])\n");
1805 
1808 
1810  nb1->nb_length = UT_BULK_SIZE;
1811  M0_UT_ASSERT(!m0_net_desc_copy(&nbd_send, &nb1->nb_desc));
1812  needed = lctm1->ctm_bev_needed;
1813  bevs_left = 1;
1814  zUT(m0_net_buffer_add(nb1, TM1));
1816  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed + 1);
1818 
1819  ut_cbreset();
1820  M0_UT_ASSERT(cb_called1 == 0);
1821 
1822  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1823  M0_UT_ASSERT(bevs_left-- > 0);
1824  M0_UT_ASSERT(!kcb1->kb_ooo_reply);
1825  ut_ktest_ack_event(kcb1); /* bad event */
1826  ut_ktest_bulk_send_event(kcb1, UT_BULK_SIZE, 0, 1, 1);
1827  m0_chan_wait(&td->tmwait1);
1828  m0_clink_del_lock(&td->tmwait1);
1829  M0_UT_ASSERT(cb_called1 == 1);
1830  M0_UT_ASSERT(cb_nb1 == nb1);
1832  M0_UT_ASSERT(cb_status1 == -ECANCELED);
1834  M0_UT_ASSERT(!kcb1->kb_ooo_reply);
1835 
1836  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed);
1837  m0_net_desc_free(&nb1->nb_desc);
1838 
1839  NLXDBGPnl(td,1,"TEST: active receive event delivery (UNLINK)\n");
1840 
1843 
1845  nb1->nb_length = UT_BULK_SIZE;
1846  M0_UT_ASSERT(!m0_net_desc_copy(&nbd_send, &nb1->nb_desc));
1847  needed = lctm1->ctm_bev_needed;
1848  bevs_left = 1;
1849  zUT(m0_net_buffer_add(nb1, TM1));
1851  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed + 1);
1853 
1854  ut_cbreset();
1855  M0_UT_ASSERT(cb_called1 == 0);
1856 
1857  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1858  M0_UT_ASSERT(bevs_left-- > 0);
1859  M0_UT_ASSERT(!kcb1->kb_ooo_reply);
1861  m0_chan_wait(&td->tmwait1);
1862  m0_clink_del_lock(&td->tmwait1);
1863  M0_UT_ASSERT(cb_called1 == 1);
1864  M0_UT_ASSERT(cb_nb1 == nb1);
1866  M0_UT_ASSERT(cb_status1 == -ECANCELED);
1868  M0_UT_ASSERT(!kcb1->kb_ooo_reply);
1869 
1870  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed);
1871  m0_net_desc_free(&nb1->nb_desc);
1872 
1873  /* TEST
1874  Enqueue an active send buffer.
1875  Block the real LNetGet call.
1876  Send the expected LNet events to indicate that the buffer has
1877  been filled.
1878  The success case is indistinguishable from a piggy-backed UNLINK.
1879  */
1880  NLXDBGPnl(td, 1, "TEST: active send event delivery\n");
1881 
1884 
1886  nb1->nb_length = UT_BULK_SIZE;
1887  M0_UT_ASSERT(!m0_net_desc_copy(&nbd_recv, &nb1->nb_desc));
1888  needed = lctm1->ctm_bev_needed;
1889  bevs_left = 1;
1890  zUT(m0_net_buffer_add(nb1, TM1));
1892  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed + 1);
1894 
1895  ut_cbreset();
1896  M0_UT_ASSERT(cb_called1 == 0);
1897 
1898  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1899  M0_UT_ASSERT(bevs_left-- > 0);
1900  ut_ktest_bulk_send_event(kcb1, UT_BULK_SIZE, 0, 1, 0);
1901  m0_chan_wait(&td->tmwait1);
1902  m0_clink_del_lock(&td->tmwait1);
1903  M0_UT_ASSERT(cb_called1 == 1);
1904  M0_UT_ASSERT(cb_nb1 == nb1);
1906  M0_UT_ASSERT(cb_status1 == 0);
1908 
1909  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed);
1910  m0_net_desc_free(&nb1->nb_desc);
1911 
1912  /* TEST
1913  Send failure situation.
1914  */
1915  NLXDBGPnl(td, 1, "TEST: active send event delivery (SEND failed)\n");
1916 
1919 
1921  nb1->nb_length = UT_BULK_SIZE;
1922  M0_UT_ASSERT(!m0_net_desc_copy(&nbd_recv, &nb1->nb_desc));
1923  needed = lctm1->ctm_bev_needed;
1924  bevs_left = 1;
1925  zUT(m0_net_buffer_add(nb1, TM1));
1927  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed + 1);
1929 
1930  ut_cbreset();
1931  M0_UT_ASSERT(cb_called1 == 0);
1932 
1933  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1934  M0_UT_ASSERT(bevs_left-- > 0);
1935  ut_ktest_ack_event(kcb1); /* bad event */
1936  ut_ktest_bulk_send_event(kcb1, UT_BULK_SIZE, -EIO, 1, 0);
1937  m0_chan_wait(&td->tmwait1);
1938  m0_clink_del_lock(&td->tmwait1);
1939  M0_UT_ASSERT(cb_called1 == 1);
1940  M0_UT_ASSERT(cb_nb1 == nb1);
1942  M0_UT_ASSERT(cb_status1 == -EIO);
1944 
1945  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed);
1946  m0_net_desc_free(&nb1->nb_desc);
1947 
1948  /* TEST
1949  Send cancellation.
1950  */
1951  NLXDBGPnl(td, 1, "TEST: active send event delivery (UNLINK)\n");
1952 
1955 
1957  nb1->nb_length = UT_BULK_SIZE;
1958  M0_UT_ASSERT(!m0_net_desc_copy(&nbd_recv, &nb1->nb_desc));
1959  needed = lctm1->ctm_bev_needed;
1960  bevs_left = 1;
1961  zUT(m0_net_buffer_add(nb1, TM1));
1963  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed + 1);
1965 
1966  ut_cbreset();
1967  M0_UT_ASSERT(cb_called1 == 0);
1968 
1969  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1970  M0_UT_ASSERT(bevs_left-- > 0);
1971  ut_ktest_ack_event(kcb1); /* bad event */
1973  m0_chan_wait(&td->tmwait1);
1974  m0_clink_del_lock(&td->tmwait1);
1975  M0_UT_ASSERT(cb_called1 == 1);
1976  M0_UT_ASSERT(cb_nb1 == nb1);
1978  M0_UT_ASSERT(cb_status1 == -ECANCELED);
1980 
1981  M0_UT_ASSERT(lctm1->ctm_bev_needed == needed);
1982  m0_net_desc_free(&nb1->nb_desc);
1983 
1984  m0_net_desc_free(&nbd_recv);
1985  m0_net_desc_free(&nbd_send);
1986  return;
1987 }
1988 
1989 static void ktest_bulk(void)
1990 {
1991  ut_save_subs();
1992 
1993  /* intercept these before the TM starts */
1998 
2000 
2001  ut_restore_subs();
2002 }
2003 
2004 #undef UT_BUFVEC_FREE
2005 #undef UT_BUFVEC_ALLOC
2006 
2007 static int ut_dev_opens;
2008 static int ut_dev_closes;
2009 static int ut_dev_cleanups;
2010 static int ut_dev_dom_inits;
2011 static int ut_dev_dom_finis;
2012 static int ut_dev_tm_starts;
2013 static int ut_dev_tm_stops;
2014 
2016  struct nlx_core_domain *cd)
2017 {
2018  int rc = nlx_kcore_core_dom_init(kd, cd);
2019  M0_UT_ASSERT(rc == 0);
2022  M0_UT_ASSERT(cd->cd_kpvt == kd);
2023  ut_dev_dom_inits++;
2024  return rc;
2025 }
2026 
2028  struct nlx_core_domain *cd)
2029 {
2030  M0_UT_ASSERT(drv_bufs_tlist_is_empty(&kd->kd_drv_bufs));
2031  M0_UT_ASSERT(drv_tms_tlist_is_empty(&kd->kd_drv_tms));
2032  nlx_kcore_core_dom_fini(kd, cd);
2033  M0_UT_ASSERT(cd->cd_kpvt == NULL);
2034  ut_dev_dom_finis++;
2035 }
2036 
2037 static int ut_kcore_tm_start(struct nlx_kcore_domain *kd,
2038  struct nlx_core_transfer_mc *ctm,
2039  struct nlx_kcore_transfer_mc *ktm)
2040 {
2041  if (kd == NULL || ctm == NULL || ktm == NULL)
2042  return 1;
2043 
2044  /* Init just enough of ktm for driver UT (eg no use of ktm_addr).
2045  KCore UT already tested nlx_kcore_tm_start().
2046  */
2047  drv_tms_tlink_init(ktm);
2048  drv_bevs_tlist_init(&ktm->ktm_drv_bevs);
2050  spin_lock_init(&ktm->ktm_bevq_lock);
2051  init_waitqueue_head(&ktm->ktm_wq);
2052  ctm->ctm_kpvt = ktm;
2056  ut_dev_tm_starts++;
2057  return 0;
2058 }
2059 
2060 static void ut_kcore_tm_stop(struct nlx_core_transfer_mc *ctm,
2061  struct nlx_kcore_transfer_mc *ktm)
2062 {
2064  M0_UT_ASSERT(drv_bevs_tlist_is_empty(&ktm->ktm_drv_bevs));
2065 
2066  drv_bevs_tlist_fini(&ktm->ktm_drv_bevs);
2067  drv_tms_tlink_fini(ktm);
2068  ktm->ktm_magic = 0;
2069  if (nlx_core_tm_invariant(ctm))
2070  ctm->ctm_kpvt = NULL;
2071  ut_dev_tm_stops++;
2072 }
2073 
2074 static struct nlx_kcore_ops ut_kcore_ops = {
2076  .ko_dom_fini = ut_kcore_core_dom_fini,
2077  .ko_buf_register = nlx_kcore_buf_register,
2078  .ko_buf_deregister = nlx_kcore_buf_deregister,
2079  .ko_tm_start = ut_kcore_tm_start,
2080  .ko_tm_stop = ut_kcore_tm_stop,
2081  .ko_buf_msg_recv = nlx_kcore_buf_msg_recv,
2082  .ko_buf_msg_send = nlx_kcore_buf_msg_send,
2083  .ko_buf_active_recv = nlx_kcore_buf_active_recv,
2084  .ko_buf_active_send = nlx_kcore_buf_active_send,
2085  .ko_buf_passive_recv = nlx_kcore_buf_passive_recv,
2086  .ko_buf_passive_send = nlx_kcore_buf_passive_send,
2087  .ko_buf_del = nlx_kcore_LNetMDUnlink,
2088  .ko_buf_event_wait = nlx_kcore_buf_event_wait,
2089 };
2090 
2091 static int ut_dev_open(struct inode *inode, struct file *file)
2092 {
2093  struct nlx_kcore_domain *kd;
2094  int rc;
2095 
2097  rc = nlx_dev_open(inode, file);
2098  M0_UT_ASSERT(rc == 0 || rc == -EPERM);
2099  if (rc == 0) {
2101  ut_dev_opens++;
2103  kd = file->private_data;
2105  if (nlx_kcore_domain_invariant(kd)) {
2107  &kd->kd_cd_loc));
2108  kd->kd_drv_ops = &ut_kcore_ops;
2109  }
2110  }
2111  return rc;
2112 }
2113 
2114 int ut_dev_close(struct inode *inode, struct file *file)
2115 {
2116  struct nlx_kcore_domain *kd =
2117  (struct nlx_kcore_domain *) file->private_data;
2118  int rc;
2119 
2122  if (nlx_kcore_domain_invariant(kd) &&
2123  (!drv_bufs_tlist_is_empty(&kd->kd_drv_bufs) ||
2124  !drv_tms_tlist_is_empty(&kd->kd_drv_tms)))
2125  ut_dev_cleanups++;
2126 
2127  rc = nlx_dev_close(inode, file);
2128  M0_UT_ASSERT(rc == 0);
2129  M0_UT_ASSERT(file->private_data == NULL);
2131  ut_dev_closes++;
2134  return rc;
2135 }
2136 
2137 bool user_helper_wait(int id)
2138 {
2140  bool ok = true;
2141 
2142  M0_UT_ASSERT(ktest_id == id);
2145  while ((ktest_id == id && !ktest_user_failed && ok) ||
2147  ok = m0_cond_timedwait(&ktest_cond, to);
2149  M0_UT_ASSERT(ok);
2151  return ok;
2152 }
2153 
2154 static void ktest_dev(void)
2155 {
2156  bool ok;
2158 
2159 #define USER_HELPER_WAIT(id) \
2160 ({ \
2161  ok = user_helper_wait((id)); \
2162  if (!ok) \
2163  goto restore_fops; \
2164 })
2165 
2166  ut_dev_opens = 0;
2167  nlx_dev_file_ops.release = ut_dev_close;
2169 
2170  /* initial handshake with the user program */
2172  if (ktest_id == UT_TEST_NONE)
2173  ok = m0_cond_timedwait(&ktest_cond, to);
2174  else
2175  ok = true;
2178 
2179  M0_UT_ASSERT(ok);
2180  if (!ok)
2181  goto restore_fops;
2182 
2183  /* UT_TEST_DEV: just wait for user program to verify device */
2185 
2186  /* UT_TEST_OPEN: wait for user program to open/close */
2187  M0_UT_ASSERT(ut_dev_opens == 0);
2189  M0_UT_ASSERT(ut_dev_opens == 1);
2191 
2192  /* UT_TEST_RDWR: wait for user program to try read/write */
2194  M0_UT_ASSERT(ut_dev_opens == 2);
2195 
2196  /* UT_TEST_RDWR: wait for user program to try invalid ioctls */
2198  M0_UT_ASSERT(ut_dev_opens == 3);
2199 
2200  /* UT_TEST_DOMINIT: wait for user program dominit/fini */
2202  M0_UT_ASSERT(ut_dev_opens == 4);
2206 
2207  /* UT_TEST_TMS */
2209  M0_UT_ASSERT(ut_dev_opens == 5);
2215 
2216  /* UT_TEST_DUPTM */
2218  M0_UT_ASSERT(ut_dev_opens == 6);
2224 
2225  /* UT_TEST_TMCLEANUP */
2227  M0_UT_ASSERT(ut_dev_opens == 7);
2233 
2234  /* final handshake before proc file is deregistered */
2239  while (!ktest_done && !ktest_user_failed && ok)
2240  ok = m0_cond_timedwait(&ktest_cond, to);
2242  M0_UT_ASSERT(ok);
2245 
2246 #undef USER_HELPER_WAIT
2247 
2248 restore_fops:
2251  while (ut_dev_opens > ut_dev_closes)
2254  nlx_dev_file_ops.release = nlx_dev_close;
2255 }
2256 
2257 /*
2258  * Local variables:
2259  * c-indentation-style: "K&R"
2260  * c-basic-offset: 8
2261  * tab-width: 8
2262  * fill-column: 79
2263  * scroll-step: 1
2264  * End:
2265  */
M0_INTERNAL m0_bcount_t m0_net_domain_get_max_buffer_segment_size(struct m0_net_domain *dom)
static void m0_atomic64_inc(struct m0_atomic64 *a)
uint64_t nqs_num_f_events
Definition: net.h:784
int(* _nlx_kcore_LNetMDAttach)(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, lnet_md_t *umd)
Definition: klnet_core.c:860
static ssize_t write_lnet_ut(struct file *file, const char __user *buffer, size_t count, loff_t *offset)
Definition: klnet_ut.c:107
struct nlx_core_kmem_loc kd_cd_loc
Definition: klnet_core.h:95
Definition: cond.h:99
static void nlx_kcore_kiov_restore_length(struct nlx_kcore_buffer *kcb)
Definition: klnet_utils.c:352
size_t kb_kiov_len
Definition: klnet_core.h:191
M0_INTERNAL void m0_chan_wait(struct m0_clink *link)
Definition: chan.c:336
uint64_t nqs_num_adds
Definition: net.h:764
static void ut_test_framework(ut_test_fw_body_t body, ut_test_fw_prestart_cb_t ps_cb, int dbg)
Definition: lnet_ut.c:426
static int nlx_kcore_buf_passive_send(struct nlx_kcore_transfer_mc *ktm, struct nlx_core_buffer *cb, struct nlx_kcore_buffer *kb)
Definition: klnet_core.c:1587
#define M0_ALLOC_ARR(arr, nr)
Definition: memory.h:84
static uint64_t nlx_kcore_hdr_data_encode_raw(uint32_t tmid, uint32_t portal)
Definition: klnet_utils.c:200
static struct m0_net_buffer * cb_nb1
Definition: bulk_mem_ut.c:159
struct m0_net_qstats qs
Definition: lnet_ut.c:307
static void ut_ktest_bulk_get_event(struct nlx_kcore_buffer *kcb, unsigned mlength, int status)
Definition: klnet_ut.c:1334
static int ktest_lnet_init(void)
Definition: klnet_ut.c:179
static struct m0_mutex ktest_mutex
Definition: klnet_ut.c:43
M0_INTERNAL void m0_mutex_unlock(struct m0_mutex *mutex)
Definition: mutex.c:66
static void ktest_core_ep_addr(void)
Definition: klnet_ut.c:370
void m0_net_domain_fini(struct m0_net_domain *dom)
Definition: domain.c:71
static void ut_ktest_bulk_unlink_event(struct nlx_kcore_buffer *kcb)
Definition: klnet_ut.c:1389
uint64_t nqs_num_s_events
Definition: net.h:774
static struct proc_dir_entry * proc_lnet_ut
Definition: klnet_ut.c:41
static int ut_ktest_bulk_LNetGet(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, lnet_md_t *umd)
Definition: klnet_ut.c:1257
unsigned kb_kiov_orig_len
Definition: klnet_core.h:203
struct m0_net_buffer bufs1[UT_BUFS1]
Definition: lnet_ut.c:297
static void ktest_buf_shape(void)
Definition: klnet_ut.c:233
static const uint64_t k1
Definition: hash_fnc.c:34
#define NULL
Definition: misc.h:38
M0_INTERNAL void m0_clink_init(struct m0_clink *link, m0_chan_cb_t cb)
Definition: chan.c:201
static size_t bev_cqueue_size(const struct nlx_core_bev_cqueue *q)
Definition: bev_cqueue.c:711
#define EXP_SEG_COUNT(ptr, segsize, expcount)
M0_INTERNAL void m0_clink_del_lock(struct m0_clink *link)
Definition: chan.c:293
struct m0_bufvec nb_buffer
Definition: net.h:1322
M0_INTERNAL int m0_net_buffer_register(struct m0_net_buffer *buf, struct m0_net_domain *dom)
Definition: buf.c:65
static void ktest_msg_body(struct ut_data *td)
Definition: klnet_ut.c:685
static const char * ep_addr
Definition: rpc_machine.c:35
uint32_t nbd_len
Definition: net_otw_types.h:37
static void ktest_lnet_fini(void)
Definition: klnet_ut.c:201
lnet_handle_eq_t ktm_eqh
Definition: klnet_core.h:151
const struct m0_net_xprt m0_net_lnet_xprt
Definition: lnet_xo.c:679
struct nlx_core_ep_addr cb_addr
struct m0_file file
Definition: di.c:36
static struct nlx_kcore_interceptable_subs nlx_kcore_iv
Definition: klnet_core.c:873
static size_t nlx_kcore_num_kiov_entries_for_bytes(const lnet_kiov_t *kiov, size_t kiov_len, m0_bcount_t bytes, unsigned *last_len)
Definition: klnet_vec.c:302
uint64_t nqs_num_dels
Definition: net.h:769
uint64_t m0_time_t
Definition: time.h:37
static void nlx_kcore_umd_init(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, int threshold, int max_size, unsigned options, bool isLNetGetOp, lnet_md_t *umd)
Definition: klnet_utils.c:255
static void nlx_core_match_bits_decode(uint64_t mb, uint32_t *tmid, uint64_t *counter)
Definition: lnet_core.c:258
static bool nlx_kcore_tm_invariant(const struct nlx_kcore_transfer_mc *kctm)
Definition: klnet_core.c:929
#define NLXDBG(ptr, dbg, stmt)
Definition: lnet_main.c:877
#define M0_CASSERT(cond)
int(* _nlx_kcore_LNetGet)(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, lnet_md_t *umd)
Definition: klnet_core.c:868
#define NLX_PAGE_OFFSET(addr)
Definition: klnet_core.h:404
M0_INTERNAL int m0_net_tm_stats_get(struct m0_net_transfer_mc *tm, enum m0_net_queue_type qtype, struct m0_net_qstats *qs, bool reset)
Definition: tm.c:343
static struct m0_atomic64 ut_ktest_msg_ep_create_fail
Definition: klnet_ut.c:572
uint8_t * nbd_data
Definition: net_otw_types.h:38
static int ut_ktest_msg_LNetMDAttach(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, lnet_md_t *umd)
Definition: klnet_ut.c:482
struct m0_vec ov_vec
Definition: vec.h:147
static lnet_kiov_t * ut_ktest_kiov_mem_dup(const lnet_kiov_t *kiov, size_t len)
Definition: klnet_ut.c:639
wait_queue_head_t ktm_wq
Definition: klnet_core.h:148
static bool ut_bufvec_alloc(struct m0_bufvec *bv, size_t n)
Definition: klnet_ut.c:211
#define UT_BUFVEC_ALLOC(v, n)
Definition: klnet_ut.c:223
struct m0_bufvec data
Definition: di.c:40
m0_bcount_t nb_length
Definition: net.h:1334
static struct file_operations nlx_dev_file_ops
Definition: klnet_drv.c:1613
unsigned kb_ooo_mlength
Definition: klnet_core.h:214
uint64_t nb_flags
Definition: net.h:1489
#define KEB(b)
static bool ktest_user_failed
Definition: klnet_ut.c:47
static void ut_kcore_tm_stop(struct nlx_core_transfer_mc *ctm, struct nlx_kcore_transfer_mc *ktm)
Definition: klnet_ut.c:2060
uint32_t cepa_pid
static m0_bcount_t cb_length1
Definition: lnet_ut.c:178
static int nlx_kcore_core_dom_init(struct nlx_kcore_domain *kd, struct nlx_core_domain *cd)
Definition: klnet_core.c:1157
static void nlx_kcore_core_dom_fini(struct nlx_kcore_domain *kd, struct nlx_core_domain *cd)
Definition: klnet_core.c:1200
uint64_t m0_bcount_t
Definition: types.h:77
static void ut_ktest_msg_put_event(struct nlx_kcore_buffer *kcb, unsigned mlength, unsigned offset, int status, int unlinked, struct nlx_core_ep_addr *addr)
Definition: klnet_ut.c:584
lnet_handle_md_t kb_mdh
Definition: klnet_core.h:206
#define PAGE_SIZE
Definition: lnet_ut.c:277
uint64_t kb_magic
Definition: klnet_core.h:160
m0_bcount_t nb_min_receive_size
Definition: net.h:1496
#define M0_SET0(obj)
Definition: misc.h:64
M0_INTERNAL void m0_mutex_lock(struct m0_mutex *mutex)
Definition: mutex.c:49
size_t kb_kiov_adj_idx
Definition: klnet_core.h:197
bool user_helper_wait(int id)
Definition: klnet_ut.c:2137
static int ut_dev_opens
Definition: klnet_ut.c:2007
static enum m0_net_queue_type cb_qt1
Definition: bulk_mem_ut.c:158
static int ut_kcore_core_dom_init(struct nlx_kcore_domain *kd, struct nlx_core_domain *cd)
Definition: klnet_ut.c:2015
static int ktest_id
Definition: klnet_ut.c:46
void ** ov_buf
Definition: vec.h:149
uint32_t cepa_portal
char ** nidstrs1
Definition: lnet_ut.c:308
M0_INTERNAL void m0_cond_init(struct m0_cond *cond, struct m0_mutex *mutex)
Definition: cond.c:40
static int nlx_kcore_LNetMDAttach(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, lnet_md_t *umd)
Definition: klnet_utils.c:377
Definition: sock.c:887
static m0_bcount_t count
Definition: xcode.c:167
#define TM1
Definition: lnet_ut.c:320
struct inode * inode
Definition: dir.c:624
static int ut_dev_tm_stops
Definition: klnet_ut.c:2013
static bool ktest_done
Definition: klnet_ut.c:48
M0_INTERNAL int m0_bufvec_alloc(struct m0_bufvec *bufvec, uint32_t num_segs, m0_bcount_t seg_size)
Definition: vec.c:220
static void ut_restore_subs(void)
Definition: lnet_ut.c:52
static m0_bindex_t cb_offset1
Definition: lnet_ut.c:179
static int ut_dev_open(struct inode *inode, struct file *file)
Definition: klnet_ut.c:2091
static void ktest_buf_reg(void)
Definition: klnet_ut.c:278
M0_INTERNAL void m0_bufvec_free(struct m0_bufvec *bufvec)
Definition: vec.c:395
static int ut_ktest_bulk_LNetPut(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, lnet_md_t *umd)
Definition: klnet_ut.c:1288
static void nlx_kcore_buf_deregister(struct nlx_core_buffer *cb, struct nlx_kcore_buffer *kb)
Definition: klnet_core.c:1282
int ut_ktest_msg_buf_event_wait(struct nlx_core_domain *lcdom, struct nlx_core_transfer_mc *lctm, m0_time_t timeout)
Definition: klnet_ut.c:555
static char * addr
Definition: node_k.c:37
static int ut_dev_cleanups
Definition: klnet_ut.c:2009
int i
Definition: dir.c:1033
static int ut_dev_dom_inits
Definition: klnet_ut.c:2010
static void ut_save_subs(void)
Definition: lnet_ut.c:41
static bool cb_save_ep1
Definition: lnet_ut.c:180
static struct nlx_core_ep_addr * nlx_ep_to_core(struct m0_net_end_point *ep)
Definition: lnet_pvt.h:46
static int close_lnet_ut(struct inode *inode, struct file *file)
Definition: klnet_ut.c:164
static int ut_ktest_msg_LNetPut(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, lnet_md_t *umd)
Definition: klnet_ut.c:518
int(* _nlx_kcore_LNetPut)(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, lnet_md_t *umd)
Definition: klnet_core.c:864
M0_INTERNAL void m0_net_lnet_tm_set_debug(struct m0_net_transfer_mc *tm, unsigned dbg)
Definition: lnet_main.c:992
enum m0_net_queue_type nb_qtype
Definition: net.h:1363
void * nd_xprt_private
Definition: net.h:393
static int ut_ktest_bulk_LNetMDAttach(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, lnet_md_t *umd)
Definition: klnet_ut.c:1205
uint32_t nb_max_receive_msgs
Definition: net.h:1502
#define M0_ASSERT(cond)
M0_INTERNAL int nlx_dev_close(struct inode *inode, struct file *file)
Definition: klnet_drv.c:1527
struct nlx_core_bev_cqueue ctm_bevq
M0_THREAD_ENTER
Definition: dir.c:336
#define TEST_HDR_DATA_ENCODE(_p, _t)
M0_INTERNAL void m0_cond_fini(struct m0_cond *cond)
Definition: cond.c:46
static int nlx_kcore_buf_passive_recv(struct nlx_kcore_transfer_mc *ktm, struct nlx_core_buffer *cb, struct nlx_kcore_buffer *kb)
Definition: klnet_core.c:1538
static int open_lnet_ut(struct inode *inode, struct file *file)
Definition: klnet_ut.c:159
struct m0_atomic64 ref_cnt
Definition: refs.h:38
static int ut_dev_tm_starts
Definition: klnet_ut.c:2012
static int nlx_kcore_buf_msg_recv(struct nlx_kcore_transfer_mc *ktm, struct nlx_core_buffer *cb, struct nlx_kcore_buffer *kb)
Definition: klnet_core.c:1348
static int counter
Definition: mutex.c:32
#define UT_PROC_NAME
Definition: lnet_drv_ut.h:50
struct nlx_core_domain xd_core
Definition: lnet_xo.h:77
static int ut_dev_closes
Definition: klnet_ut.c:2008
static struct m0_stob_domain * dom
Definition: storage.c:38
M0_INTERNAL void m0_cond_signal(struct m0_cond *cond)
Definition: cond.c:94
unsigned kb_ooo_offset
Definition: klnet_core.h:224
static unsigned ut_ktest_kiov_count(const lnet_kiov_t *k, size_t len)
Definition: klnet_ut.c:674
static int ut_ktest_msg_ep_create(struct m0_net_end_point **epp, struct m0_net_transfer_mc *tm, const struct nlx_core_ep_addr *cepa)
Definition: klnet_ut.c:573
size_t buf_size2
Definition: lnet_ut.c:305
#define UT_BUFVEC_FREE(v)
Definition: klnet_ut.c:229
M0_INTERNAL int m0_semaphore_init(struct m0_semaphore *semaphore, unsigned value)
Definition: semaphore.c:38
spinlock_t ktm_bevq_lock
Definition: klnet_core.h:145
M0_INTERNAL void m0_mutex_init(struct m0_mutex *mutex)
Definition: mutex.c:35
struct nlx_core_buffer xb_core
Definition: lnet_xo.h:117
void nlx_core_ep_addr_encode(struct nlx_core_domain *lcdom, const struct nlx_core_ep_addr *cepa, char buf[M0_NET_LNET_XEP_ADDR_LEN])
Definition: lnet_core.c:427
static void ktest_bulk_body(struct ut_data *td)
Definition: klnet_ut.c:1400
Definition: xcode.h:73
M0_INTERNAL void m0_net_desc_free(struct m0_net_buf_desc *desc)
Definition: net.c:87
uint32_t v_nr
Definition: vec.h:51
static void nlx_core_kmem_loc_set(struct nlx_core_kmem_loc *loc, struct page *pg, uint32_t off)
Definition: klnet_core.c:1127
M0_INTERNAL m0_bcount_t m0_net_domain_get_max_buffer_size(struct m0_net_domain *dom)
Definition: chan.h:229
static m0_bindex_t offset
Definition: dump.c:173
M0_INTERNAL int m0_net_desc_copy(const struct m0_net_buf_desc *from_desc, struct m0_net_buf_desc *to_desc)
Definition: net.c:74
m0_bcount_t * v_count
Definition: vec.h:53
#define LNetHandleIsEqual(h1, h2)
Definition: klnet_core.h:246
static struct nlx_xo_interceptable_subs nlx_xo_iv
Definition: lnet_xo.c:83
M0_INTERNAL void nlx_core_nidstrs_put(struct nlx_core_domain *lcdom, char ***nidary)
Definition: klnet_core.c:1774
static void ktest_bulk(void)
Definition: klnet_ut.c:1989
int(* _nlx_ep_create)(struct m0_net_end_point **epp, struct m0_net_transfer_mc *tm, const struct nlx_core_ep_addr *cepa)
Definition: lnet_xo.c:75
#define TM2
Definition: lnet_ut.c:321
static int nlx_ep_create(struct m0_net_end_point **epp, struct m0_net_transfer_mc *tm, const struct nlx_core_ep_addr *cepa)
Definition: lnet_ep.c:48
static struct m0_atomic64 ut_ktest_bulk_fake_LNetMDAttach
Definition: klnet_ut.c:1203
struct nlx_core_ep_addr ktm_addr
Definition: klnet_core.h:139
lnet_kiov_t * kb_kiov
Definition: klnet_core.h:188
M0_INTERNAL void m0_chan_signal_lock(struct m0_chan *chan)
Definition: chan.c:165
M0_INTERNAL m0_bcount_t m0_vec_count(const struct m0_vec *vec)
Definition: vec.c:53
static void nlx_kcore_kiov_adjust_length(struct nlx_kcore_transfer_mc *ktm, struct nlx_kcore_buffer *kcb, lnet_md_t *umd, m0_bcount_t bytes)
Definition: klnet_utils.c:314
static void ut_cbreset(void)
Definition: lnet_ut.c:252
struct nlx_kcore_transfer_mc * kb_ktm
Definition: klnet_core.h:166
struct m0_clink tmwait1
Definition: lnet_ut.c:295
static bool ut_ktest_kiov_eq(const lnet_kiov_t *k1, const lnet_kiov_t *k2, size_t len)
Definition: klnet_ut.c:660
static const uint64_t k2
Definition: hash_fnc.c:35
static struct m0_cond ktest_cond
Definition: klnet_ut.c:44
static bool ut_ktest_msg_LNetPut_called
Definition: klnet_ut.c:516
struct m0_ref nep_ref
Definition: net.h:491
static struct m0_net_end_point * cb_ep1
Definition: lnet_ut.c:181
static bool nlx_core_ep_eq(const struct nlx_core_ep_addr *cep1, const struct nlx_core_ep_addr *cep2)
Definition: lnet_core.h:537
static uint32_t timeout
Definition: console.c:52
static struct nlx_kcore_ops ut_kcore_ops
Definition: klnet_ut.c:2074
static int64_t m0_atomic64_get(const struct m0_atomic64 *a)
void m0_clink_add_lock(struct m0_chan *chan, struct m0_clink *link)
Definition: chan.c:255
int nlx_core_ep_addr_decode(struct nlx_core_domain *lcdom, const char *ep_addr, struct nlx_core_ep_addr *cepa)
Definition: lnet_core.c:386
uint64_t cb_match_bits
struct nlx_kcore_ops * kd_drv_ops
Definition: klnet_core.h:101
void m0_net_end_point_put(struct m0_net_end_point *ep)
Definition: ep.c:98
M0_INTERNAL void m0_cond_wait(struct m0_cond *cond)
Definition: cond.c:52
#define USER_HELPER_WAIT(id)
uint64_t n
Definition: fops.h:107
static void ut_ktest_kiov_mem_free(lnet_kiov_t *kiov)
Definition: klnet_ut.c:655
static struct m0_chan * ut_ktest_msg_buf_event_wait_delay_chan
Definition: klnet_ut.c:554
M0_INTERNAL void m0_net_buffer_deregister(struct m0_net_buffer *buf, struct m0_net_domain *dom)
Definition: buf.c:107
static void ut_kcore_core_dom_fini(struct nlx_kcore_domain *kd, struct nlx_core_domain *cd)
Definition: klnet_ut.c:2027
static struct m0_semaphore ktest_sem
Definition: klnet_ut.c:45
static int ut_kcore_tm_start(struct nlx_kcore_domain *kd, struct nlx_core_transfer_mc *ctm, struct nlx_kcore_transfer_mc *ktm)
Definition: klnet_ut.c:2037
int m0_net_domain_init(struct m0_net_domain *dom, const struct m0_net_xprt *xprt)
Definition: domain.c:36
static int nlx_kcore_buf_register(struct nlx_kcore_domain *kd, nlx_core_opaque_ptr_t buffer_id, struct nlx_core_buffer *cb, struct nlx_kcore_buffer *kb)
Definition: klnet_core.c:1251
struct m0_tl kd_drv_tms
Definition: klnet_core.h:107
#define zUT(x)
Definition: lnet_ut.c:266
static bool ut_ktest_bulk_LNetMDAttach_called
Definition: klnet_ut.c:1204
static int nlx_dev_open(struct inode *inode, struct file *file)
Definition: klnet_drv.c:1494
static struct file_operations proc_lnet_fops
Definition: klnet_ut.c:169
static void ut_ktest_bulk_send_event(struct nlx_kcore_buffer *kcb, unsigned mlength, int status, int unlinked, int threshold)
Definition: klnet_ut.c:1351
static void ktest_dev(void)
Definition: klnet_ut.c:2154
M0_INTERNAL bool m0_chan_timedwait(struct m0_clink *link, const m0_time_t abs_timeout)
Definition: chan.c:349
M0_INTERNAL void m0_semaphore_fini(struct m0_semaphore *semaphore)
Definition: semaphore.c:45
M0_INTERNAL int m0_net_buffer_add(struct m0_net_buffer *buf, struct m0_net_transfer_mc *tm)
Definition: buf.c:247
m0_time_t m0_time_from_now(uint64_t secs, long ns)
Definition: time.c:96
int ut_dev_close(struct inode *inode, struct file *file)
Definition: klnet_ut.c:2114
m0_bcount_t cb_length
Definition: addb2.c:200
static int nlx_kcore_buf_event_wait(struct nlx_core_transfer_mc *ctm, struct nlx_kcore_transfer_mc *ktm, m0_time_t timeout)
Definition: klnet_core.c:1658
size_t buf_size1
Definition: lnet_ut.c:298
static bool ut_ktest_msg_LNetMDAttach_called
Definition: klnet_ut.c:481
static int nlx_kcore_buf_active_send(struct nlx_kcore_transfer_mc *ktm, struct nlx_core_buffer *cb, struct nlx_kcore_buffer *kb)
Definition: klnet_core.c:1488
static int32_t cb_status1
Definition: bulk_mem_ut.c:161
m0_bcount_t size
Definition: di.c:39
static int start(struct m0_fom *fom)
Definition: trigger_fom.c:321
M0_INTERNAL int nlx_core_buf_event_wait(struct nlx_core_domain *cd, struct nlx_core_transfer_mc *ctm, m0_time_t timeout)
Definition: klnet_core.c:1685
M0_INTERNAL void m0_mutex_fini(struct m0_mutex *mutex)
Definition: mutex.c:42
M0_INTERNAL bool m0_cond_timedwait(struct m0_cond *cond, const m0_time_t abs_timeout)
Definition: cond.c:74
static bool ut_ktest_bulk_LNetPut_called
Definition: klnet_ut.c:1287
static ssize_t read_lnet_ut(struct file *file, char __user *buffer, size_t len, loff_t *offset)
Definition: klnet_ut.c:51
M0_INTERNAL int32_t m0_net_domain_get_max_buffer_segments(struct m0_net_domain *dom)
static int nlx_kcore_buf_msg_send(struct nlx_kcore_transfer_mc *ktm, struct nlx_core_buffer *cb, struct nlx_kcore_buffer *kb)
Definition: klnet_core.c:1394
enum m0_net_queue_type cb_qtype
struct m0_net_buf_desc nb_desc
Definition: net.h:1412
static bool nlx_core_tm_invariant(const struct nlx_core_transfer_mc *lctm)
Definition: lnet_core.c:120
static unsigned done
Definition: storage.c:91
static uint64_t base
Definition: dump.c:1504
M0_INTERNAL void m0_semaphore_down(struct m0_semaphore *semaphore)
Definition: semaphore.c:49
static struct m0_atomic64 ut_ktest_msg_buf_event_wait_stall
Definition: klnet_ut.c:553
static void ut_ktest_bulk_put_event(struct nlx_kcore_buffer *kcb, unsigned mlength, int status)
Definition: klnet_ut.c:1318
M0_INTERNAL void m0_semaphore_up(struct m0_semaphore *semaphore)
Definition: semaphore.c:65
static int ut_dev_dom_finis
Definition: klnet_ut.c:2011
static bool ut_chan_timedwait(struct m0_clink *link, uint32_t secs)
Definition: lnet_ut.c:61
static bool nlx_core_kmem_loc_is_empty(const struct nlx_core_kmem_loc *loc)
Definition: lnet_pvt.h:91
struct m0_tl ktm_drv_bevs
Definition: klnet_core.h:136
int(* ko_dom_init)(struct nlx_kcore_domain *kd, struct nlx_core_domain *cd)
Definition: klnet_core.h:275
static int ut_verbose
Definition: lnet_ut.c:33
struct nlx_core_kmem_loc ktm_ctm_loc
Definition: klnet_core.h:124
void m0_free(void *data)
Definition: memory.c:146
Definition: mutex.h:47
static void ut_ktest_ack_event(struct nlx_kcore_buffer *kcb)
Definition: klnet_ut.c:627
static struct m0_net_end_point * ut_ktest_msg_LNetPut_ep
Definition: klnet_ut.c:517
static int nlx_kcore_buf_active_recv(struct nlx_kcore_transfer_mc *ktm, struct nlx_core_buffer *cb, struct nlx_kcore_buffer *kb)
Definition: klnet_core.c:1438
static unsigned cb_called1
Definition: lnet_ut.c:182
static void ktest_enc_dec(void)
Definition: klnet_ut.c:444
void * nb_xprt_private
Definition: net.h:1461
int(* _nlx_core_buf_event_wait)(struct nlx_core_domain *lcdom, struct nlx_core_transfer_mc *lctm, m0_time_t timeout)
Definition: lnet_xo.c:72
int32_t rc
Definition: trigger_fop.h:47
M0_INTERNAL int m0_net_end_point_create(struct m0_net_end_point **epp, struct m0_net_transfer_mc *tm, const char *addr)
Definition: ep.c:56
#define ARRAY_SIZE(a)
Definition: misc.h:45
#define M0_UT_ASSERT(a)
Definition: ut.h:46
static void ut_ktest_bulk_reply_event(struct nlx_kcore_buffer *kcb, unsigned mlength, int status, int unlinked, int threshold)
Definition: klnet_ut.c:1370
static struct m0_addb2_frame_header last
Definition: storage.c:93
M0_INTERNAL int nlx_core_nidstrs_get(struct nlx_core_domain *lcdom, char ***nidary)
Definition: klnet_core.c:1744
static void ktest_msg(void)
Definition: klnet_ut.c:1184
Definition: vec.h:145
struct m0_tl kd_drv_bufs
Definition: klnet_core.h:113
static bool nlx_kcore_domain_invariant(const struct nlx_kcore_domain *kd)
Definition: klnet_core.c:897
static void m0_atomic64_set(struct m0_atomic64 *a, int64_t num)
struct m0_net_end_point * nb_ep
Definition: net.h:1424
static void nlx_kcore_eq_cb(lnet_event_t *event)
Definition: klnet_core.c:1004
struct nlx_core_transfer_mc xtm_core
Definition: lnet_xo.h:104
static int nlx_kcore_LNetMDUnlink(struct nlx_kcore_transfer_mc *kctm, struct nlx_kcore_buffer *kcb)
Definition: klnet_utils.c:433
static bool ut_ktest_bulk_LNetGet_called
Definition: klnet_ut.c:1256
#define NLXDBGPnl(ptr, dbg, fmt,...)
Definition: lnet_main.c:880
uint32_t cepa_tmid
static void ut_ktest_msg_send_event(struct nlx_kcore_buffer *kcb, unsigned mlength, int status)
Definition: klnet_ut.c:608