Motr  M0
lnet_ut.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2013-2021 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 
23 #include "net/lnet/lnet_main.c"
24 
25 #define M0_TRACE_SUBSYSTEM M0_TRACE_SUBSYS_UT
26 #include "lib/trace.h"
27 
28 #include "lib/assert.h" /* M0_IMPOSSIBLE */
29 #include "lib/arith.h" /* max64u */
30 #include "lib/thread.h" /* m0_thread_self */
31 #include "ut/ut.h"
32 
33 static int ut_verbose = 0;
34 
35 static int ut_subs_saved;
37 #ifdef __KERNEL__
38 static struct nlx_kcore_interceptable_subs saved_kcore_subs;
39 #endif
40 
41 static void ut_save_subs(void)
42 {
43  if (ut_subs_saved > 0)
44  return;
45  ut_subs_saved = 1;
47 #ifdef __KERNEL__
48  saved_kcore_subs = nlx_kcore_iv;
49 #endif
50 }
51 
52 static void ut_restore_subs(void)
53 {
56 #ifdef __KERNEL__
57  nlx_kcore_iv = saved_kcore_subs;
58 #endif
59 }
60 
61 static bool ut_chan_timedwait(struct m0_clink *link, uint32_t secs)
62 {
64  return m0_chan_timedwait(link, timeout);
65 }
66 
67 /* write a pattern to a buffer */
68 static void ut_net_buffer_sign(struct m0_net_buffer *nb,
69  m0_bcount_t len,
70  unsigned char seed)
71 {
72  struct m0_bufvec_cursor cur;
73  m0_bcount_t i;
74  m0_bcount_t step;
75  unsigned char val;
76  unsigned char *p;
77 
78  val = (m0_bcount_t) seed + ((m0_bcount_t) seed - 1) * len;
80  i = 0;
81  do {
82  m0_bcount_t bytes = 0;
83  step = m0_bufvec_cursor_step(&cur);
85  for ( ; i < len && bytes < step; ++bytes, ++i, ++p, ++val) {
86  *p = val;
87  }
88  } while (i < len && !m0_bufvec_cursor_move(&cur, step));
89  M0_UT_ASSERT(i == len);
90  return;
91 }
92 
93 /* check the pattern in the buffer */
95  m0_bcount_t len,
97  unsigned char seed)
98 {
99  struct m0_bufvec_cursor cur;
100  m0_bcount_t i;
101  m0_bcount_t step;
102  unsigned char val;
103  unsigned char *p;
104 
105  if (nb == NULL)
106  return false;
107 
108  val = (m0_bcount_t) seed + ((m0_bcount_t) seed - 1) * len;
110  i = 0;
111  len += offset; /* range: offset <= i < len */
112  do {
113  m0_bcount_t bytes;
114  step = m0_bufvec_cursor_step(&cur);
115  if (i + step < offset) {
116  i += step;
117  continue;
118  }
120  if (i < offset) {
121  bytes = offset - i;
122  p += bytes;
123  i += bytes;
124  } else
125  bytes = 0;
126  for ( ; i < len && bytes < step; ++i, ++p, ++bytes, ++val) {
127  if (*p != val)
128  return false;
129  }
130  } while (i < len && !m0_bufvec_cursor_move(&cur, step));
131  return i == len;
132 }
133 
136 static int32_t ecb_status;
137 static int ecb_count;
138 static void ecb_reset(void)
139 {
142  ecb_status = 1;
143  ecb_count = 0;
144 }
145 
146 static void ut_tm_ecb(const struct m0_net_tm_event *ev)
147 {
148  ecb_evt = ev->nte_type;
149  ecb_tms = ev->nte_next_state;
150  ecb_status = ev->nte_status;
151  ecb_count++;
152 }
153 
154 enum {
156  STARTSTOP_PID = 12345, /* same as LUSTRE_SRV_LNET_PID */
161 };
162 #ifdef __KERNEL__
163 /*
164  * LUSTRE_SRV_LNET_PID macro is not available in user space.
165  * Depending on the lustre version, this may be known as LNET_PID_LUSTRE
166  * or LUSTRE_SRV_LNET_PID.
167  */
168 #ifdef LNET_PID_LUSTRE
169 M0_BASSERT(STARTSTOP_PID == LNET_PID_LUSTRE);
170 #else
171 M0_BASSERT(STARTSTOP_PID == LUSTRE_SRV_LNET_PID);
172 #endif
173 #endif
174 
176 static struct m0_net_buffer *cb_nb1;
177 static int32_t cb_status1;
180 static bool cb_save_ep1; /* save ep next call only */
181 static struct m0_net_end_point *cb_ep1; /* QT_MSG_RECV only */
182 static unsigned cb_called1;
183 enum { UT_CB_INVALID_STATUS = 9999999 };
184 
185 static void ut_buf_cb1(const struct m0_net_buffer_event *ev)
186 {
187  /* nlx_print_net_buffer_event("ut_buf_cb1", ev);*/
188  cb_nb1 = ev->nbe_buffer;
190  cb_status1 = ev->nbe_status;
191  cb_length1 = ev->nbe_length;
192  cb_offset1 = ev->nbe_offset;
193  if (cb_qt1 == M0_NET_QT_MSG_RECV && cb_save_ep1 && ev->nbe_ep != NULL) {
194  cb_ep1 = ev->nbe_ep;
196  } else
197  cb_ep1 = NULL;
198  cb_save_ep1 = false;
199  cb_called1++;
200 }
201 
202 static void ut_cbreset1(void)
203 {
204  cb_nb1 = NULL;
207  cb_length1 = 0;
208  cb_offset1 = 0;
209  M0_ASSERT(cb_ep1 == NULL); /* be harsh */
210  cb_save_ep1 = false;
211  cb_called1 = 0;
212 }
213 
215 static struct m0_net_buffer *cb_nb2;
216 static int32_t cb_status2;
219 static bool cb_save_ep2; /* save ep next call only */
220 static struct m0_net_end_point *cb_ep2; /* QT_MSG_RECV only */
221 static unsigned cb_called2;
222 
223 static void ut_buf_cb2(const struct m0_net_buffer_event *ev)
224 {
225  /* nlx_print_net_buffer_event("ut_buf_cb2", ev); */
226  cb_nb2 = ev->nbe_buffer;
228  cb_status2 = ev->nbe_status;
229  cb_length2 = ev->nbe_length;
230  cb_offset2 = ev->nbe_offset;
231  if (cb_qt2 == M0_NET_QT_MSG_RECV && cb_save_ep2 && ev->nbe_ep != NULL) {
232  cb_ep2 = ev->nbe_ep;
234  } else
235  cb_ep2 = NULL;
236  cb_save_ep2 = false;
237  cb_called2++;
238 }
239 
240 static void ut_cbreset2(void)
241 {
242  cb_nb2 = NULL;
245  cb_length2 = 0;
246  cb_offset2 = 0;
247  M0_ASSERT(cb_ep2 == NULL); /* be harsh */
248  cb_save_ep2 = false;
249  cb_called2 = 0;
250 }
251 
252 static void ut_cbreset(void)
253 {
254  ut_cbreset1();
255  ut_cbreset2();
256 }
257 
258 #define zvUT(x, expRC) \
259 do { \
260  int rc = x; \
261  int erc = expRC; \
262  if (rc != erc) \
263  M0_ASSERT_INFO(rc == erc, "%d != %d (%s : %s)", \
264  rc, erc, #x, #expRC); \
265 } while (0)
266 #define zUT(x) zvUT(x, 0)
267 
268 #ifndef PAGE_SHIFT
269  #ifdef CONFIG_X86_64
270  #define PAGE_SHIFT 12
271  #elif defined (CONFIG_AARCH64)
272  #define PAGE_SHIFT 16
273  /* Page Size on aarch64 is 64KB, hence Page Shift will be 16 */
274  #else
275  #error "The platform is not supported"
276  #endif
277  #define PAGE_SIZE (1UL << PAGE_SHIFT)
278 #endif
279 
280 enum {
281  UT_BUFS1 = 2,
283  UT_BUFS2 = 1,
287  UT_PAGE_SHIFT = PAGE_SHIFT
288 };
289 
290 struct ut_data {
291  int _debug_;
298  size_t buf_size1;
305  size_t buf_size2;
308  char **nidstrs1;
309  char **nidstrs2;
312 };
313 
314 #ifdef __KERNEL__
315 M0_BASSERT(UT_PAGE_SHIFT == PAGE_SHIFT);
316 #endif
317 
318 #define DOM1 (&td->dom1)
319 #define DOM2 (&td->dom2)
320 #define TM1 (&td->tm1)
321 #define TM2 (&td->tm2)
322 #define CBD1 (&td->cbd1)
323 #define CBD2 (&td->cbd2)
324 
325 typedef void (*ut_test_fw_body_t)(struct ut_data *td);
326 typedef void (*ut_test_fw_prestart_cb_t)(struct ut_data *td, int which);
327 
328 static void ut_test_framework_dom_cleanup(struct ut_data *td,
329  struct m0_net_domain *dom)
330 {
331  struct m0_clink cl;
332  struct m0_net_buffer *nb;
333  struct m0_net_transfer_mc *tm;
334  size_t len;
335  int qt;
336  int i;
337 
338  m0_clink_init(&cl, NULL);
339 
340  m0_list_for_each_entry(&dom->nd_tms, tm,
342  /* iterate over buffers in each queue */
343  for (qt = M0_NET_QT_MSG_RECV; qt < M0_NET_QT_NR; ++qt) {
344  len = m0_tlist_length(&m0_net_tm_tl, &tm->ntm_q[qt]);
345  /* best effort; can't say if this will always work */
346  for (i = 0; i < len; ++i) {
347  nb = m0_net_tm_tlist_head(&tm->ntm_q[qt]);
348  m0_clink_add_lock(&tm->ntm_chan, &cl);
349  NLXDBGP(td, 2,
350  "Cleanup/DEL D:%p T:%p Q:%d B:%p\n",
351  dom, tm, qt, nb);
352  m0_net_buffer_del(nb, tm);
353  if (tm->ntm_bev_auto_deliver)
354  ut_chan_timedwait(&cl, 10);
355  else {
356  int j;
358  &tm->ntm_chan);
359  for (j = 0; j < 10; ++j) {
360  ut_chan_timedwait(&cl, 1);
362  (tm);
363  }
364  }
365  m0_clink_del_lock(&cl);
366  }
367  len = m0_tlist_length(&m0_net_tm_tl, &tm->ntm_q[qt]);
368  if (len != 0) {
369  NLXDBGP(td, 0,
370  "Cleanup D:%p T:%p Q:%d B failed\n",
371  dom, tm, qt);
372  }
373  }
374  /* iterate over end points */
375  if (m0_nep_tlist_length(&tm->ntm_end_points) > 1) {
376  struct m0_net_end_point *ep;
377 
378  m0_tl_for(m0_nep, &tm->ntm_end_points, ep) {
379  if (ep == tm->ntm_ep)
380  continue;
381  while (m0_atomic64_get(&ep->nep_ref.ref_cnt) >=
382  1) {
383  NLXDBGP(td, 2,
384  "Cleanup/PUT D:%p T:%p E:%p\n",
385  dom, tm, ep);
387  }
388  } m0_tl_endfor;
389  }
390  if (m0_nep_tlist_length(&tm->ntm_end_points) > 1)
391  NLXDBGP(td,0,"Cleanup D:%p T:%p E failed\n", dom, tm);
392  }
393 
394  m0_clink_fini(&cl);
395 }
396 
397 #ifdef NLX_DEBUG
398 static void ut_describe_buf(const struct m0_net_buffer *nb)
399 {
400 #ifdef __KERNEL__
401  struct nlx_xo_buffer *bp = nb->nb_xprt_private;
402  struct nlx_core_buffer *lcbuf = &bp->xb_core;
403  struct nlx_kcore_buffer *kcb = lcbuf->cb_kpvt;
404 
405  NLXP("\txo:%p lcbuf:%p kcb:%p\n",
406  (void *) bp, (void *) lcbuf, (void *) kcb);
407 #endif
408 }
409 
410 static void ut_describe_tm(const struct m0_net_transfer_mc *tm)
411 {
412 #ifdef __KERNEL__
413  struct nlx_xo_transfer_mc *tp = tm->ntm_xprt_private;
414  struct nlx_core_transfer_mc *lctm = &tp->xtm_core;
415  struct nlx_kcore_transfer_mc *kctm = lctm->ctm_kpvt;
416 
417  NLXP("\txo:%p lctm:%p kctm:%p\n",
418  (void *) tp, (void *) lctm, (void *) kctm);
419  if (kctm != NULL) {
420  nlx_kprint_lnet_handle("\tEQ1", kctm->ktm_eqh);
421  }
422 #endif
423 }
424 #endif /* NLX_DEBUG */
425 
428  int dbg)
429 {
430  struct ut_data *td;
431  int i;
432  int rc;
433 
434  /*
435  Setup.
436  */
437  M0_ALLOC_PTR(td);
438  M0_UT_ASSERT(td != NULL);
439  if (td == NULL)
440  return;
441  td->_debug_ = dbg;
442 
443  m0_clink_init(&td->tmwait1, NULL);
444  m0_clink_init(&td->tmwait2, NULL);
446  TM1->ntm_callbacks = &td->tmcb;
447  TM2->ntm_callbacks = &td->tmcb;
448  for (i = M0_NET_QT_MSG_RECV; i < M0_NET_QT_NR; ++i) {
449  td->buf_cb1.nbc_cb[i] = ut_buf_cb1;
450  td->buf_cb2.nbc_cb[i] = ut_buf_cb2;
451  }
452 
453 #define SETUP_DOM(which) \
454 do { \
455  struct m0_net_domain *dom = &td->dom ## which; \
456  struct m0_net_transfer_mc *tm = &td->tm ## which; \
457  char ***nidstrs = &td->nidstrs ## which; \
458  M0_UT_ASSERT(!m0_net_domain_init(dom, &m0_net_lnet_xprt)); \
459  M0_UT_ASSERT(!m0_net_lnet_ifaces_get(dom, nidstrs)); \
460  M0_UT_ASSERT(*nidstrs != NULL && **nidstrs != NULL); \
461  { \
462  char epstr[M0_NET_LNET_XEP_ADDR_LEN]; \
463  m0_bcount_t max_seg_size; \
464  struct m0_net_buffer *nb; \
465  \
466  max_seg_size = m0_net_domain_get_max_buffer_segment_size(dom); \
467  M0_UT_ASSERT(max_seg_size > 0); \
468  M0_UT_ASSERT(max_seg_size >= UT_MSG_SIZE); \
469  td->buf_size ## which = max_seg_size * UT_BUFSEGS ## which; \
470  td->buf_seg_size ## which = max_seg_size; \
471  for (i = 0; i < UT_BUFS ## which; ++i) { \
472  nb = &td->bufs ## which [i]; \
473  rc = m0_bufvec_alloc_aligned(&nb->nb_buffer, \
474  UT_BUFSEGS ## which, \
475  max_seg_size, \
476  UT_PAGE_SHIFT); \
477  M0_UT_ASSERT(rc == 0); \
478  if (rc != 0) { \
479  M0_IMPOSSIBLE("aborting: buf alloc failed"); \
480  goto dereg ## which; \
481  } \
482  rc = m0_net_buffer_register(nb, dom); \
483  if (rc != 0) { \
484  M0_IMPOSSIBLE("aborting: buf reg failed"); \
485  goto dereg ## which; \
486  } \
487  M0_UT_ASSERT(nb->nb_flags & M0_NET_BUF_REGISTERED); \
488  nb->nb_callbacks = &td->buf_cb ## which; \
489  NLXDBGPnl(td, 2, "[%d] D:%p T:%p B:%p [%u,%d]=%lu\n", \
490  which, dom, tm, nb, (unsigned) max_seg_size, \
491  UT_BUFSEGS ## which, \
492  (unsigned long) td->buf_size ## which); \
493  NLXDBGnl(td, 2, ut_describe_buf(nb)); \
494  } \
495  \
496  M0_UT_ASSERT(!m0_net_tm_init(tm, dom)); \
497  if (ps_cb != NULL) \
498  (*ps_cb)(td, which); \
499  \
500  sprintf(epstr, "%s:%d:%d:*", \
501  **nidstrs, STARTSTOP_PID, STARTSTOP_PORTAL); \
502  m0_clink_add_lock(&tm->ntm_chan, &td->tmwait ## which); \
503  M0_UT_ASSERT(!m0_net_tm_start(tm, epstr)); \
504  m0_chan_wait(&td->tmwait ## which); \
505  m0_clink_del_lock(&td->tmwait ## which); \
506  M0_UT_ASSERT(tm->ntm_state == M0_NET_TM_STARTED); \
507  if (tm->ntm_state == M0_NET_TM_FAILED) { \
508  M0_IMPOSSIBLE("aborting: tm" #which " startup failed"); \
509  goto fini ## which; \
510  } \
511  NLXDBGPnl(td, 2, "[%d] D:%p T:%p E:%s\n", which, dom, tm, \
512  tm->ntm_ep->nep_addr); \
513  NLXDBGnl(td, 2, ut_describe_tm(tm)); \
514  } \
515 } while (0)
516 
517 #define TEARDOWN_DOM(which) \
518 do { \
519  struct m0_net_domain *dom; \
520  struct m0_net_transfer_mc *tm = &td->tm ## which; \
521  m0_clink_add_lock(&tm->ntm_chan, &td->tmwait ## which); \
522  M0_UT_ASSERT(!m0_net_tm_stop(tm, false)); \
523  m0_chan_wait(&td->tmwait ## which); \
524  m0_clink_del_lock(&td->tmwait ## which); \
525  M0_UT_ASSERT(tm->ntm_state == M0_NET_TM_STOPPED); \
526  fini ## which: \
527  tm = &td->tm ## which; \
528  m0_net_tm_fini(tm); \
529  dereg ## which: \
530  dom = &td->dom ## which; \
531  for (i = 0; i < UT_BUFS ## which; ++i) { \
532  struct m0_net_buffer *nb; \
533  nb = &td->bufs ## which [i]; \
534  if (nb->nb_buffer.ov_vec.v_nr == 0) \
535  continue; \
536  m0_net_buffer_deregister(nb, dom); \
537  m0_bufvec_free_aligned(&nb->nb_buffer, UT_PAGE_SHIFT); \
538  } \
539  if (td->nidstrs ## which != NULL) \
540  m0_net_lnet_ifaces_put(dom, &td->nidstrs ## which); \
541  M0_UT_ASSERT(td->nidstrs ## which == NULL); \
542  m0_net_domain_fini(dom); \
543 } while (0)
544 
545  SETUP_DOM(1);
546  SETUP_DOM(2);
547 
548  (*body)(td);
549 
552 
553  TEARDOWN_DOM(2);
554  TEARDOWN_DOM(1);
555 
556  m0_clink_fini(&td->tmwait1);
557  m0_clink_fini(&td->tmwait2);
558  m0_free(td);
559 
560 #undef TEARDOWN_DOM
561 #undef SETUP_DOM
562 
563  return;
564 }
565 
566 /* ############################################################## */
567 
568 #ifdef __KERNEL__
570 #endif
571 
572 static int test_lnet_init(void)
573 {
574  int rc;
575 
576 #ifdef __KERNEL__
577  rc = ktest_lnet_init();
578 #else
579  {
580  struct stat st;
581 
582  /* fail entire suite if LNet device is not present */
583  rc = stat("/dev/" M0_LNET_DEV, &st);
584 
585  if (rc != 0)
586  rc = -errno;
587  else if (!S_ISCHR(st.st_mode))
588  rc = -ENODEV;
589  }
590 #endif
591  if (rc == 0)
592  ut_save_subs();
593  return rc;
594 }
595 
596 static int test_lnet_fini(void)
597 {
598  ut_restore_subs();
599 #ifdef __KERNEL__
600  ktest_lnet_fini();
601 #endif
602  return 0;
603 }
604 
605 static void ntc_event_callback(const struct m0_net_tm_event *ev)
606 {
607 }
608 
609 #ifndef __KERNEL__
610 static void test_fail(void)
611 {
612  static struct m0_net_domain dom = {
613  .nd_xprt = NULL
614  };
615  struct nlx_xo_domain *dp;
616  struct nlx_core_kmem_loc loc = { .kl_checksum = 0 };
617  struct nlx_core_ep_addr cepa;
618  const char *sav = nlx_ucore_dev_name;
619 
620  nlx_ucore_dev_name = "/dev/no such device";
622  nlx_ucore_dev_name = sav;
623 
626 
630  dp = dom.nd_xprt_private;
631  M0_UT_ASSERT(nlx_core_ep_addr_decode(&dp->xd_core, "0@lo:xpid:0:0",
632  &cepa) == -EINVAL);
633  M0_UT_ASSERT(nlx_core_ep_addr_decode(&dp->xd_core, "0@lo:12345:xptl:0",
634  &cepa) == -EINVAL);
635  M0_UT_ASSERT(nlx_core_ep_addr_decode(&dp->xd_core, "0@lo:12345:33:xtm",
636  &cepa) == -EINVAL);
637  M0_UT_ASSERT(nlx_core_ep_addr_decode(&dp->xd_core, "0@lo:12345:33:0",
638  &cepa) == -EINVAL);
639  M0_UT_ASSERT(nlx_core_ep_addr_decode(&dp->xd_core, "0@lo:12345:33:1",
640  &cepa) == 0);
642 }
643 #endif
644 
645 static void test_tm_initfini(void)
646 {
647  static struct m0_net_domain dom1 = {
648  .nd_xprt = NULL
649  };
650  const struct m0_net_tm_callbacks cbs1 = {
652  };
653  struct m0_net_transfer_mc d1tm1 = {
654  .ntm_callbacks = &cbs1,
655  .ntm_state = M0_NET_TM_UNDEFINED
656  };
657  static char *n1t0 = "10.72.49.14@o2ib0:12345:31:0";
658  static char *n1t1 = "10.72.49.14@o2ib0:12345:31:1";
659  static char *n1ts = "10.72.49.14@o2ib0:12345:31:*";
660  static char *n2t0 = "192.168.96.128@tcp1:12345:31:0";
661  static char *n2t1 = "192.168.96.128@tcp1:12345:31:1";
662  static char *n2ts = "192.168.96.128@tcp1:12345:31:*";
663 
664  /* TEST
665  Network name comparsion.
666  */
667  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n1t0, n1t0) == 0);
668  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n1t1, n1t1) == 0);
669  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n1ts, n1ts) == 0);
670 
671  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n1t0, n1t1) == 0);
672  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n1t0, n1ts) == 0);
673  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n1t1, n1ts) == 0);
674 
675  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n2t0, n2t0) == 0);
676  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n2t1, n2t1) == 0);
677  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n2ts, n2ts) == 0);
678 
679  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n2t0, n2t1) == 0);
680  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n2t0, n2ts) == 0);
681  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n2t1, n2ts) == 0);
682 
683  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n1t0, n2t0) < 0);
684  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n1t1, n2t0) < 0);
685  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n1ts, n2t0) < 0);
686  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n2t0, n1t0) > 0);
687  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n2t0, n1t1) > 0);
688  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n2t0, n1ts) > 0);
689 
690  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n1t0, n2t1) < 0);
691  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n1t1, n2t1) < 0);
692  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n1ts, n2t1) < 0);
693  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n2t1, n1t0) > 0);
694  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n2t1, n1t1) > 0);
695  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n2t1, n1ts) > 0);
696 
697  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n1t0, n2ts) < 0);
698  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n1t1, n2ts) < 0);
699  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n1ts, n2ts) < 0);
700  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n2ts, n1t0) > 0);
701  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n2ts, n1t1) > 0);
702  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n2ts, n1ts) > 0);
703 
704  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp(n1ts, "foo") == -1);
705  M0_UT_ASSERT(m0_net_lnet_ep_addr_net_cmp("foo", n1ts) == -1);
706 
707  /* TEST
708  Domain setup.
709  */
711  M0_UT_ASSERT(!m0_net_tm_init(&d1tm1, &dom1));
712 
713  /* should be able to fini it immediately */
714  m0_net_tm_fini(&d1tm1);
716 
717  /* should be able to init it again */
718  M0_UT_ASSERT(!m0_net_tm_init(&d1tm1, &dom1));
721 
722  /* fini */
723  m0_net_tm_fini(&d1tm1);
724  m0_net_domain_fini(&dom1);
725 }
726 
727 #ifdef __KERNEL__
728 /* this test only applies in user space, see ulnet_core.c */
729 static unsigned nlx_ucore_nidstrs_thunk = 0;
730 #endif
731 
732 static void test_tm_startstop(void)
733 {
734  struct m0_net_domain *dom;
735  struct m0_net_transfer_mc *tm;
736  const struct m0_net_tm_callbacks cbs1 = {
738  };
739  static struct m0_clink tmwait1;
740  char **nidstrs;
741  const char *nid_to_use;
742  char epstr[M0_NET_LNET_XEP_ADDR_LEN];
743  char badportal_epstr[M0_NET_LNET_XEP_ADDR_LEN];
744  char dyn_epstr[M0_NET_LNET_XEP_ADDR_LEN];
745  char save_epstr[M0_NET_LNET_XEP_ADDR_LEN];
746  struct m0_bitmap procs;
747  unsigned thunk;
748  int i;
749 
750  M0_ALLOC_PTR(dom);
751  M0_ALLOC_PTR(tm);
752  M0_UT_ASSERT(dom != NULL && tm != NULL);
753  tm->ntm_callbacks = &cbs1;
754  ecb_reset();
755 
756  /* also walk realloc block in nlx_ucore_nidstrs_get */
757  thunk = nlx_ucore_nidstrs_thunk;
762  nlx_ucore_nidstrs_thunk = thunk;
763  M0_UT_ASSERT(nidstrs != NULL && nidstrs[0] != NULL);
764  nid_to_use = nidstrs[0];
765  for (i = 0; nidstrs[i] != NULL; ++i) {
766  if (strstr(nidstrs[i], "@lo") != NULL)
767  continue;
768  nid_to_use = nidstrs[i];
769  break;
770  }
771  sprintf(epstr, "%s:%d:%d:101",
772  nid_to_use, STARTSTOP_PID, STARTSTOP_PORTAL);
773  sprintf(badportal_epstr, "%s:%d:99:101", nid_to_use, STARTSTOP_PID);
774  sprintf(dyn_epstr, "%s:%d:%d:*",
775  nid_to_use, STARTSTOP_PID, STARTSTOP_PORTAL);
776  m0_net_lnet_ifaces_put(dom, &nidstrs);
777  M0_UT_ASSERT(nidstrs == NULL);
778 
779  /* test a couple invalid cases first */
781  M0_UT_ASSERT(m0_net_tm_start(tm, "invalid") == -EINVAL);
782  m0_net_tm_fini(tm);
783 
785  m0_clink_init(&tmwait1, NULL);
786  m0_clink_add_lock(&tm->ntm_chan, &tmwait1);
787  M0_UT_ASSERT(!m0_net_tm_start(tm, badportal_epstr));
788  m0_chan_wait(&tmwait1);
789  m0_clink_del_lock(&tmwait1);
790  M0_UT_ASSERT(ecb_count == 1);
791  M0_UT_ASSERT(ecb_status == -EINVAL);
793  m0_net_tm_fini(tm);
794  ecb_reset();
795 
797 
798  m0_clink_init(&tmwait1, NULL);
799  m0_clink_add_lock(&tm->ntm_chan, &tmwait1);
800  M0_UT_ASSERT(!m0_net_tm_start(tm, epstr));
801  m0_chan_wait(&tmwait1);
802  m0_clink_del_lock(&tmwait1);
803  M0_UT_ASSERT(ecb_count == 1);
806  M0_UT_ASSERT(ecb_status == 0);
808  if (tm->ntm_state == M0_NET_TM_FAILED) {
809  /* skip rest of this test, else M0_ASSERT will occur */
810  m0_net_tm_fini(tm);
812  m0_free(tm);
813  m0_free(dom);
814  M0_IMPOSSIBLE("aborting test case, endpoint in-use?");
815  return;
816  }
817  M0_UT_ASSERT(strcmp(tm->ntm_ep->nep_addr, epstr) == 0);
818 
819  m0_clink_add_lock(&tm->ntm_chan, &tmwait1);
820  M0_UT_ASSERT(!m0_net_tm_stop(tm, true));
821  m0_chan_wait(&tmwait1);
822  m0_clink_del_lock(&tmwait1);
824  m0_net_tm_fini(tm);
826  m0_free(tm);
827  m0_free(dom);
828 
829  /*
830  * test combination of dynamic endpoint, start with confine, and
831  * multiple domains and TMs
832  */
835  M0_UT_ASSERT(dom != NULL && tm != NULL);
836 
837  for (i = 0; i < STARTSTOP_DOM_NR; ++i) {
838  tm[i].ntm_callbacks = &cbs1;
840  M0_UT_ASSERT(!m0_net_tm_init(&tm[i], &dom[i]));
841  M0_UT_ASSERT(m0_bitmap_init(&procs, 1) == 0);
842  m0_bitmap_set(&procs, 0, true);
843  M0_UT_ASSERT(m0_net_tm_confine(&tm[i], &procs) == 0);
844  /* 2x, to walk the re-confine path */
845  M0_UT_ASSERT(m0_net_tm_confine(&tm[i], &procs) == 0);
846  m0_bitmap_fini(&procs);
847 
848  ecb_reset();
849  m0_clink_add_lock(&tm[i].ntm_chan, &tmwait1);
850  M0_UT_ASSERT(!m0_net_tm_start(&tm[i], dyn_epstr));
851  m0_chan_wait(&tmwait1);
852  m0_clink_del_lock(&tmwait1);
854  M0_UT_ASSERT(tm[i].ntm_state == M0_NET_TM_STARTED);
855  M0_UT_ASSERT(strcmp(tm[i].ntm_ep->nep_addr, dyn_epstr) != 0);
856  if (i > 0)
857  M0_UT_ASSERT(strcmp(tm[i].ntm_ep->nep_addr,
858  tm[i-1].ntm_ep->nep_addr) < 0);
859  }
860 
861  /* subtest: dynamic TMID reuse using middle TM */
862  strcpy(save_epstr, tm[1].ntm_ep->nep_addr);
863  m0_clink_add_lock(&tm[1].ntm_chan, &tmwait1);
864  M0_UT_ASSERT(!m0_net_tm_stop(&tm[1], false));
865  m0_chan_wait(&tmwait1);
866  m0_clink_del_lock(&tmwait1);
867  M0_UT_ASSERT(tm[1].ntm_state == M0_NET_TM_STOPPED);
868  m0_net_tm_fini(&tm[1]);
869  M0_UT_ASSERT(!m0_net_tm_init(&tm[1], &dom[1]));
870 
871  m0_clink_add_lock(&tm[1].ntm_chan, &tmwait1);
872  M0_UT_ASSERT(!m0_net_tm_start(&tm[1], dyn_epstr));
873  m0_chan_wait(&tmwait1);
874  m0_clink_del_lock(&tmwait1);
876  M0_UT_ASSERT(tm[1].ntm_state == M0_NET_TM_STARTED);
877  M0_UT_ASSERT(strcmp(tm[1].ntm_ep->nep_addr, save_epstr) == 0);
878 
879  for (i = 0; i < STARTSTOP_DOM_NR; ++i) {
880  m0_clink_add_lock(&tm[i].ntm_chan, &tmwait1);
881  M0_UT_ASSERT(!m0_net_tm_stop(&tm[i], false));
882  m0_chan_wait(&tmwait1);
883  m0_clink_del_lock(&tmwait1);
885  M0_UT_ASSERT(tm[i].ntm_state == M0_NET_TM_STOPPED);
886  m0_net_tm_fini(&tm[i]);
888  }
889  m0_free(tm);
890  m0_free(dom);
891  m0_clink_fini(&tmwait1);
892 }
893 
894 /* test_msg_body */
895 enum {
897 };
898 
899 /* Sub to send messages from TM2 to TM1 until the latter's buffer
900  is expected to fill.
901  TM1 is primed with the specified number of receive buffers.
902  */
903 static bool test_msg_send_loop(struct ut_data *td,
904  uint32_t num_recv_bufs,
905  uint32_t recv_max_msgs,
906  struct m0_net_end_point *ep2,
907  m0_bcount_t send_len_first,
908  m0_bcount_t send_len_rest,
909  bool space_exhausted)
910 {
911  struct m0_net_buffer *nb1;
912  struct m0_net_buffer *nb2;
913  m0_bcount_t msg_size;
915  m0_bcount_t space_left;
916  unsigned bevs_left;
917  unsigned char seed;
918  int msg_num;
919  bool rc = false;
920  uint32_t rb_num;
921  m0_bcount_t total_bytes_sent;
922 
925 
926  ut_cbreset();
928  &td->qs, true));
930  &td->qs, true));
931 
932  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
933  m0_clink_add_lock(&TM2->ntm_chan, &td->tmwait2);
934 
935  /* UT sanity check: messages within buffer bounds */
936  if (send_len_first > td->buf_size1 || send_len_first > td->buf_size2 ||
937  send_len_rest > td->buf_size1 || send_len_rest > td->buf_size2) {
938  M0_UT_ASSERT(!(send_len_first > td->buf_size1 ||
939  send_len_first > td->buf_size2 ||
940  send_len_rest > td->buf_size1 ||
941  send_len_rest > td->buf_size2));
942  }
943 
944  for (rb_num = 0; rb_num < num_recv_bufs && rb_num < UT_BUFS1; ++rb_num){
945  nb1 = &td->bufs1[rb_num];
946  nb1->nb_min_receive_size = max64u(send_len_first,send_len_rest);
947  nb1->nb_max_receive_msgs = recv_max_msgs;
949  zUT(m0_net_buffer_add(nb1, TM1));
950  }
951  if (rb_num != num_recv_bufs) {
952  M0_UT_ASSERT(rb_num == num_recv_bufs);
953  }
954 
955 #define RESET_RECV_COUNTERS() \
956  do { \
957  offset = 0; \
958  bevs_left = recv_max_msgs; \
959  /* 1 buf only as all recv space not used */ \
960  space_left = td->buf_size1; \
961  } while (0)
962 
964  rb_num = 1;
965 
966  total_bytes_sent = 0;
967  msg_size = send_len_first;
968  msg_num = 0;
969  seed = 'a';
970  nb2 = &td->bufs2[0];
971  while (msg_size <= space_left && bevs_left > 0) {
972  msg_num++;
973  nb1 = &td->bufs1[rb_num-1];
974 
975  ut_net_buffer_sign(nb2, msg_size, seed);
976  M0_UT_ASSERT(ut_net_buffer_authenticate(nb2, msg_size, 0,seed));
978  nb2->nb_length = msg_size;
979  nb2->nb_ep = ep2;
980 
981  NLXDBGPnl(td, 2, "\t%s S%d %lu bytes -> %s\n",
982  TM2->ntm_ep->nep_addr, msg_num,
983  (unsigned long) msg_size, ep2->nep_addr);
984  cb_save_ep1 = true;
985  zUT(m0_net_buffer_add(nb2, TM2));
986 
987  m0_chan_wait(&td->tmwait2);
988  M0_UT_ASSERT(cb_called2 == msg_num);
990  M0_UT_ASSERT(cb_nb2 == nb2);
992  M0_UT_ASSERT(cb_status2 == 0);
994  &td->qs, false));
995  M0_UT_ASSERT(td->qs.nqs_num_f_events == 0);
996  M0_UT_ASSERT(td->qs.nqs_num_s_events == msg_num);
997  M0_UT_ASSERT(td->qs.nqs_num_adds == msg_num);
998  M0_UT_ASSERT(td->qs.nqs_num_dels == 0);
999  total_bytes_sent += msg_size;
1000 
1001  m0_chan_wait(&td->tmwait1);
1002  space_left -= cb_length1;
1003  NLXDBGPnl(td, 2,
1004  "\t%s R%d %lu bytes <- %s off %lu left %lu/%d\n",
1005  cb_nb1->nb_tm->ntm_ep->nep_addr, (unsigned) rb_num,
1006  (unsigned long) cb_length1,
1007  cb_ep1 != NULL ? cb_ep1->nep_addr : "<null>",
1008  (unsigned long) offset,
1009  (unsigned long) space_left, bevs_left);
1010  M0_UT_ASSERT(cb_called1 == msg_num);
1012  M0_UT_ASSERT(cb_nb1 == nb1);
1013  M0_UT_ASSERT(cb_status1 == 0);
1015  offset += cb_length1;
1016  M0_UT_ASSERT(cb_length1 == msg_size);
1018  cb_offset1, seed));
1019  M0_UT_ASSERT(cb_ep1 != NULL &&
1020  strcmp(TM2->ntm_ep->nep_addr,cb_ep1->nep_addr)==0);
1022  &td->qs, false));
1023  M0_UT_ASSERT(td->qs.nqs_num_f_events == 0);
1024  M0_UT_ASSERT(td->qs.nqs_num_s_events == msg_num);
1025  M0_UT_ASSERT(td->qs.nqs_num_adds == num_recv_bufs);
1026  M0_UT_ASSERT(td->qs.nqs_num_dels == 0);
1027 
1028  msg_size = send_len_rest;
1029  ++seed;
1030  --bevs_left;
1031 
1032  if (!(cb_nb1->nb_flags & M0_NET_BUF_QUEUED)) {
1033  /* next receive buffer */
1034  ++rb_num;
1035  if (rb_num <= num_recv_bufs)
1037  else
1038  break;
1039  }
1040  }
1041  if (space_exhausted) {
1042  M0_UT_ASSERT(msg_size > space_left);
1043  } else
1044  M0_UT_ASSERT(bevs_left == 0);
1045 
1046  M0_UT_ASSERT(total_bytes_sent >= (num_recv_bufs - 1) * td->buf_size1);
1047 
1048  M0_UT_ASSERT(rb_num == num_recv_bufs + 1);
1049  M0_UT_ASSERT(cb_nb1 == &td->bufs1[num_recv_bufs - 1]);
1050  for (rb_num = 0; rb_num < num_recv_bufs; ++rb_num) {
1051  nb1 = &td->bufs1[rb_num];
1053  }
1054 
1056  while (msg_num-- > 0)
1058  cb_ep1 = NULL;
1059 
1060  rc = true;
1061  m0_clink_del_lock(&td->tmwait2);
1062  m0_clink_del_lock(&td->tmwait1);
1063  return rc;
1064 
1065 #undef RESET_RECV_COUNTERS
1066 }
1067 
1068 static void test_msg_body(struct ut_data *td)
1069 {
1070  struct m0_net_buffer *nb1;
1071  struct m0_net_buffer *nb2;
1072  struct m0_net_end_point *ep2;
1073  m0_bcount_t msg_size;
1074  unsigned char seed;
1075 
1076  nb1 = &td->bufs1[0];
1077  nb2 = &td->bufs2[0];
1078 
1079  /* TEST
1080  Add a buffer for message receive then cancel it.
1081  */
1083  nb1->nb_max_receive_msgs = 1;
1085 
1086  NLXDBGPnl(td, 1, "TEST: add/del on the receive queue\n");
1087 
1088  ut_cbreset();
1089  zUT(m0_net_buffer_add(nb1, TM1));
1090 
1091  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1092  m0_net_buffer_del(nb1, TM1);
1093  ut_chan_timedwait(&td->tmwait1, 10);
1094  m0_clink_del_lock(&td->tmwait1);
1096  M0_UT_ASSERT(cb_nb1 == nb1);
1097  M0_UT_ASSERT(cb_status1 == -ECANCELED);
1099  &td->qs, true));
1100  M0_UT_ASSERT(td->qs.nqs_num_f_events == 1);
1101  M0_UT_ASSERT(td->qs.nqs_num_s_events == 0);
1102  M0_UT_ASSERT(td->qs.nqs_num_adds == 1);
1103  M0_UT_ASSERT(td->qs.nqs_num_dels == 1);
1104 
1105  /* TEST
1106  Add a buffer for receive in TM1 and send multiple messages from TM2.
1107  */
1108 
1109  /* check that the sign/authenticate work */
1110  msg_size = UT_MSG_SIZE;
1111  seed = 'a';
1112  ut_net_buffer_sign(nb2, msg_size, seed);
1113  M0_UT_ASSERT(ut_net_buffer_authenticate(nb2, msg_size, 0, seed));
1114  M0_UT_ASSERT(!ut_net_buffer_authenticate(nb2, msg_size - 1, 0, seed));
1115  M0_UT_ASSERT(!ut_net_buffer_authenticate(nb2, msg_size, 0, seed + 1));
1116 
1117  /* sanity check */
1118  if (UT_MSG_SIZE >= td->buf_seg_size1) {
1119  M0_UT_ASSERT(UT_MSG_SIZE < td->buf_seg_size1);
1120  goto done;
1121  }
1122 
1123  /* get the destination TM address */
1124  zUT(m0_net_end_point_create(&ep2, TM2, TM1->ntm_ep->nep_addr));
1126 
1127  /* send until max receive messages is reached */
1128  NLXDBGPnl(td, 1, "TEST: send until max receive messages reached "
1129  "(1 receive buffer)\n");
1131  UT_MSG_SIZE / 3, UT_MSG_SIZE, false));
1132 
1133  NLXDBGPnl(td, 1, "TEST: send until max receive messages reached "
1134  "(2 receive buffers, > 1 seg)\n");
1136  td->buf_seg_size1 + UT_MSG_SIZE,
1137  UT_MSG_SIZE, false));
1138 
1139  /* send until space is exhausted */
1140  NLXDBGPnl(td, 1, "TEST: send until receive space exhausted "
1141  "(1 receive buffer)\n");
1142  M0_UT_ASSERT(test_msg_send_loop(td, 1, UT_MSG_OPS * 2, ep2,
1143  UT_MSG_SIZE, UT_MSG_SIZE, true));
1144 
1145  NLXDBGPnl(td, 1, "TEST: send until receive space exhausted "
1146  "(2 receive buffers, > 1 seg)\n");
1147  M0_UT_ASSERT(test_msg_send_loop(td, 2, UT_MSG_OPS * 2, ep2,
1148  td->buf_seg_size1 + UT_MSG_SIZE,
1149  td->buf_seg_size1 + UT_MSG_SIZE,
1150  true));
1151 
1152  /* TEST
1153  Send a message when there is no receive buffer
1154  */
1155  NLXDBGPnl(td, 1, "TEST: send/no receive buffer - no error expected\n");
1156 
1159 
1161 
1162  ut_cbreset();
1163  m0_clink_add_lock(&TM2->ntm_chan, &td->tmwait2);
1165  &td->qs, true));
1166 
1168  nb2->nb_length = UT_MSG_SIZE;
1169  nb2->nb_ep = ep2;
1170  NLXDBGPnl(td, 2, "\t%s S%d %lu bytes -> %s\n",
1171  TM2->ntm_ep->nep_addr, 1,
1172  (unsigned long) UT_MSG_SIZE, ep2->nep_addr);
1173  zUT(m0_net_buffer_add(nb2, TM2));
1174 
1175  m0_chan_wait(&td->tmwait2);
1176  M0_UT_ASSERT(cb_called2 == 1);
1178  M0_UT_ASSERT(cb_nb2 == nb2);
1180  M0_UT_ASSERT(cb_status2 != 1); /* send doesn't see the error */
1182  &td->qs, true));
1183  M0_UT_ASSERT(td->qs.nqs_num_f_events == 0);
1184  M0_UT_ASSERT(td->qs.nqs_num_s_events == 1);
1185  M0_UT_ASSERT(td->qs.nqs_num_adds == 1);
1186  M0_UT_ASSERT(td->qs.nqs_num_dels == 0);
1187 
1189  m0_net_end_point_put(ep2);
1190  ep2 = NULL;
1191 
1192  /* TEST
1193  Send a message to a non-existent TM address.
1194  */
1195  NLXDBGPnl(td, 1, "TEST: send / non-existent TM - no error expected\n");
1198 
1199  { /* create a destination end point */
1200  char epstr[M0_NET_LNET_XEP_ADDR_LEN];
1201  /* verify dynamic end point is not allowed here */
1202  sprintf(epstr, "%s:%d:%d:*",
1204  zvUT(m0_net_end_point_create(&ep2, TM2, epstr), -EINVAL);
1205  sprintf(epstr, "%s:%d:%d:1024",
1207  zUT(m0_net_end_point_create(&ep2, TM2, epstr));
1208  }
1210 
1211  ut_cbreset();
1212 
1214  nb2->nb_length = UT_MSG_SIZE;
1215  nb2->nb_ep = ep2;
1216  NLXDBGPnl(td, 2, "\t%s S%d %lu bytes -> %s\n",
1217  TM2->ntm_ep->nep_addr, 1,
1218  (unsigned long) UT_MSG_SIZE, ep2->nep_addr);
1219  zUT(m0_net_buffer_add(nb2, TM2));
1220 
1221  m0_chan_wait(&td->tmwait2);
1222  M0_UT_ASSERT(cb_called2 == 1);
1224  M0_UT_ASSERT(cb_nb2 == nb2);
1226  M0_UT_ASSERT(cb_status2 == 0); /* send doesn't see the error */
1228  &td->qs, true));
1229  M0_UT_ASSERT(td->qs.nqs_num_f_events == 0);
1230  M0_UT_ASSERT(td->qs.nqs_num_s_events == 1);
1231  M0_UT_ASSERT(td->qs.nqs_num_adds == 1);
1232  M0_UT_ASSERT(td->qs.nqs_num_dels == 0);
1233 
1235  m0_net_end_point_put(ep2);
1236  ep2 = NULL;
1237 
1238  m0_clink_del_lock(&td->tmwait2);
1239  done:
1240  return;
1241 }
1242 
1243 static void test_msg(void)
1244 {
1246 }
1247 
1248 static void test_buf_desc_body(struct ut_data *td)
1249 {
1250  struct nlx_xo_transfer_mc *tp1 = TM1->ntm_xprt_private;
1251  struct nlx_core_transfer_mc *lctm1 = &tp1->xtm_core;
1252  struct m0_net_buffer *nb1 = &td->bufs1[0];
1253  struct nlx_xo_buffer *bp1 = nb1->nb_xprt_private;
1254  struct nlx_core_buffer *lcbuf1 = &bp1->xb_core;
1255  struct nlx_xo_transfer_mc *tp2 = TM2->ntm_xprt_private;
1256  struct nlx_core_transfer_mc *lctm2 = &tp2->xtm_core;
1257  struct m0_net_buffer *nb2 = &td->bufs2[0];
1258  struct nlx_xo_buffer *bp2 = nb2->nb_xprt_private;
1259  struct nlx_core_buffer *lcbuf2 = &bp2->xb_core;
1260  uint32_t tmid;
1261  uint64_t counter;
1262  int rc;
1263 
1264  /* TEST
1265  Check that match bit decode reverses encode.
1266  */
1267  NLXDBGPnl(td, 1, "TEST: match bit encoding\n");
1268 #define TEST_MATCH_BIT_ENCODE(_t, _c) \
1269  nlx_core_match_bits_decode(nlx_core_match_bits_encode((_t),(_c)), \
1270  &tmid, &counter); \
1271  M0_UT_ASSERT(tmid == (_t)); \
1272  M0_UT_ASSERT(counter == (_c))
1273 
1274  TEST_MATCH_BIT_ENCODE(0, 0);
1278 
1279 #undef TEST_MATCH_BIT_ENCODE
1280 
1281 
1282  /* TEST
1283  Check that conversion to and from the external opaque descriptor
1284  form preserve the internal descriptor.
1285  */
1286  NLXDBGPnl(td, 1, "TEST: buffer descriptor S(export, import)\n");
1287  memset(CBD1, 0xf7, sizeof *CBD1); /* arbitrary pattern */
1288  memset(CBD2, 0xab, sizeof *CBD2); /* different arbitrary pattern */
1289  M0_UT_ASSERT(sizeof *CBD1 == sizeof *CBD2);
1290  M0_UT_ASSERT(memcmp(CBD1, CBD2, sizeof *CBD1) != 0);
1291 
1293  M0_UT_ASSERT(nb1->nb_desc.nbd_len == sizeof *CBD1);
1295  M0_UT_ASSERT(memcmp(CBD1, CBD2, sizeof *CBD1) == 0);
1296 
1297  /* TEST
1298  Check that an invalid descriptor length will fail to convert to
1299  internal form, and will not modify the supplied internal desc.
1300  */
1301  NLXDBGPnl(td, 1, "TEST: buffer descriptor F(import invalid size)\n");
1302  nb1->nb_desc.nbd_len++;
1303  memset(CBD1, 0x35, sizeof *CBD1); /* aribtrary pattern, not the desc */
1304  memcpy(CBD2, CBD1, sizeof *CBD2); /* same pattern */
1305  M0_UT_ASSERT(nlx_xo__nbd_recover(TM1, &nb1->nb_desc, CBD2) == -EINVAL);
1306  M0_UT_ASSERT(memcmp(CBD1, CBD2, sizeof *CBD1) == 0); /* unchanged */
1307 
1308  m0_net_desc_free(&nb1->nb_desc);
1309 
1310 #define VALIDATE_MATCH_BITS(mb, s_lctm) \
1311  nlx_core_match_bits_decode(mb, &tmid, &counter); \
1312  M0_UT_ASSERT(tmid == s_lctm->ctm_addr.cepa_tmid); \
1313  M0_UT_ASSERT(counter == s_lctm->ctm_mb_counter - 1)
1314 
1315  /* TEST
1316  Passive send buffer descriptor.
1317  Ensure that match bits get properly set, and that the
1318  descriptor properly encodes buffer and end point
1319  data, and that the data can be properly decoded.
1320  Test with an exact size receive buffer and a larger one.
1321  */
1322  NLXDBGPnl(td, 1, "TEST: encode buffer descriptor S(PS1)\n");
1325 
1328  lcbuf1->cb_length = td->buf_size1; /* Arbitrary */
1329  M0_SET0(&lcbuf1->cb_addr);
1330  m0_mutex_lock(&TM1->ntm_mutex);
1331  nlx_core_buf_desc_encode(lctm1, lcbuf1, CBD1);
1332  m0_mutex_unlock(&TM1->ntm_mutex);
1334  VALIDATE_MATCH_BITS(lcbuf1->cb_match_bits, lctm1);
1335 
1336  NLXDBGPnl(td, 1, "TEST: decode buffer descriptor S(AR2 == PS1)\n");
1337  M0_SET0(&lcbuf2->cb_addr); /* clear target areas of buf2 */
1338  M0_UT_ASSERT(!nlx_core_ep_eq(&lctm1->ctm_addr, &lcbuf2->cb_addr));
1339  lcbuf2->cb_match_bits = 0;
1340  M0_UT_ASSERT(lcbuf2->cb_match_bits != lcbuf1->cb_match_bits);
1341  /* decode - buf2 on right queue and have adequate size */
1342  lcbuf2->cb_length = lcbuf1->cb_length; /* same size as send buffer */
1344  m0_mutex_lock(&TM2->ntm_mutex);
1345  rc = nlx_core_buf_desc_decode(lctm2, lcbuf2, CBD1);
1346  m0_mutex_unlock(&TM2->ntm_mutex);
1347  M0_UT_ASSERT(rc == 0);
1348  /* buf2 target address set to TM1, and size/bits set to buf1 */
1349  M0_UT_ASSERT(nlx_core_ep_eq(&lctm1->ctm_addr, &lcbuf2->cb_addr));
1350  M0_UT_ASSERT(lcbuf2->cb_length == lcbuf1->cb_length);
1351  M0_UT_ASSERT(lcbuf2->cb_match_bits == lcbuf1->cb_match_bits);
1352 
1353  NLXDBGPnl(td, 1, "TEST: decode buffer descriptor F(corrupt)\n");
1354  /* decode - everything correct, like above, but corrupt descriptor */
1355  counter = CBD1->cbd_data[2]; /* save some location */
1356  CBD1->cbd_data[2]++; /* modify, arbitrarily different */
1357  m0_mutex_lock(&TM2->ntm_mutex);
1358  rc = nlx_core_buf_desc_decode(lctm2, lcbuf2, CBD1);
1359  m0_mutex_unlock(&TM2->ntm_mutex);
1360  M0_UT_ASSERT(rc == -EINVAL);
1361  CBD1->cbd_data[2] = counter; /* restore */
1362 
1363  NLXDBGPnl(td, 1, "TEST: decode buffer descriptor S(AR2 > PS1)\n");
1364  M0_SET0(&lcbuf2->cb_addr); /* clear target areas of buf2 */
1365  M0_UT_ASSERT(!nlx_core_ep_eq(&lctm1->ctm_addr, &lcbuf2->cb_addr));
1366  lcbuf2->cb_match_bits = 0;
1367  M0_UT_ASSERT(lcbuf2->cb_match_bits != lcbuf1->cb_match_bits);
1368  /* decode - buf2 on right queue and > send buffer size */
1369  lcbuf2->cb_length = lcbuf1->cb_length + 1;
1371  m0_mutex_lock(&TM2->ntm_mutex);
1372  rc = nlx_core_buf_desc_decode(lctm2, lcbuf2, CBD1);
1373  m0_mutex_unlock(&TM2->ntm_mutex);
1374  M0_UT_ASSERT(rc == 0);
1375  /* buf2 target address set to TM1, and size/bits set to buf1 */
1376  M0_UT_ASSERT(nlx_core_ep_eq(&lctm1->ctm_addr, &lcbuf2->cb_addr));
1377  M0_UT_ASSERT(lcbuf2->cb_length == lcbuf1->cb_length); /* passive size */
1378  M0_UT_ASSERT(lcbuf2->cb_match_bits == lcbuf1->cb_match_bits);
1379 
1380  NLXDBGPnl(td, 1, "TEST: decode buffer descriptor F(AR2 < PS1)\n");
1381  lcbuf2->cb_length = lcbuf1->cb_length - 1; /* < send buffer size */
1383  m0_mutex_lock(&TM2->ntm_mutex);
1384  rc = nlx_core_buf_desc_decode(lctm2, lcbuf2, CBD1);
1385  m0_mutex_unlock(&TM2->ntm_mutex);
1386  M0_UT_ASSERT(rc == -EFBIG);
1387 
1388  NLXDBGPnl(td, 1, "TEST: decode buffer descriptor F(AS2 == PS1)\n");
1389  lcbuf1->cb_length = lcbuf1->cb_length; /* same size as send buffer */
1391  m0_mutex_lock(&TM1->ntm_mutex);
1392  rc = nlx_core_buf_desc_decode(lctm1, lcbuf1, CBD1);
1393  m0_mutex_unlock(&TM1->ntm_mutex);
1394  M0_UT_ASSERT(rc == -EPERM);
1395 
1396  /* TEST
1397  Passive receive buffer descriptor.
1398  Ensure that match bits get properly set, and that the
1399  descriptor properly encodes buffer and end point
1400  data, and that the data can be properly decoded.
1401  Test with an exact size send buffer and a smaller one.
1402  */
1403  M0_UT_ASSERT(td->buf_size1 >= td->buf_size2); /* sanity check */
1404  NLXDBGPnl(td, 1, "TEST: encode buffer descriptor S(PR2)\n");
1405 
1408 
1411  lcbuf2->cb_length = td->buf_size2; /* Arbitrary */
1412  m0_mutex_lock(&TM2->ntm_mutex);
1413  nlx_core_buf_desc_encode(lctm2, lcbuf2, CBD2);
1414  m0_mutex_unlock(&TM2->ntm_mutex);
1416  VALIDATE_MATCH_BITS(lcbuf2->cb_match_bits, lctm2);
1417 
1418  NLXDBGPnl(td, 1, "TEST: decode buffer descriptor S(AS1 == PR2)\n");
1419  M0_SET0(&lcbuf1->cb_addr); /* clear target areas of buf1 */
1420  M0_UT_ASSERT(!nlx_core_ep_eq(&lctm2->ctm_addr, &lcbuf1->cb_addr));
1421  lcbuf1->cb_match_bits = 0;
1422  M0_UT_ASSERT(lcbuf1->cb_match_bits != lcbuf2->cb_match_bits);
1423  /* decode - buf1 on right queue and have adequate size */
1424  lcbuf1->cb_length = lcbuf2->cb_length; /* same size as receive buffer */
1426  m0_mutex_lock(&TM1->ntm_mutex);
1427  rc = nlx_core_buf_desc_decode(lctm1, lcbuf1, CBD2);
1428  m0_mutex_unlock(&TM1->ntm_mutex);
1429  M0_UT_ASSERT(rc == 0);
1430  /* buf1 target address set to TM2, and size/bits set to buf2 */
1431  M0_UT_ASSERT(nlx_core_ep_eq(&lctm2->ctm_addr, &lcbuf1->cb_addr));
1432  M0_UT_ASSERT(lcbuf1->cb_length == lcbuf2->cb_length);
1433  M0_UT_ASSERT(lcbuf1->cb_match_bits == lcbuf2->cb_match_bits);
1434 
1435  NLXDBGPnl(td, 1, "TEST: decode buffer descriptor S(AS1 < PR2)\n");
1436  M0_SET0(&lcbuf1->cb_addr); /* clear target areas of buf1 */
1437  M0_UT_ASSERT(!nlx_core_ep_eq(&lctm2->ctm_addr, &lcbuf1->cb_addr));
1438  lcbuf1->cb_match_bits = 0;
1439  M0_UT_ASSERT(lcbuf1->cb_match_bits != lcbuf2->cb_match_bits);
1440  /* decode - buf1 on right queue and < receive buffer size */
1441  lcbuf1->cb_length = lcbuf2->cb_length - 1;
1443  m0_mutex_lock(&TM1->ntm_mutex);
1444  rc = nlx_core_buf_desc_decode(lctm1, lcbuf1, CBD2);
1445  m0_mutex_unlock(&TM1->ntm_mutex);
1446  M0_UT_ASSERT(rc == 0);
1447  /* buf1 target address set to TM2, and size/bits set to buf2 */
1448  M0_UT_ASSERT(nlx_core_ep_eq(&lctm2->ctm_addr, &lcbuf1->cb_addr));
1449  M0_UT_ASSERT(lcbuf1->cb_length == lcbuf2->cb_length - 1);/* active sz */
1450  M0_UT_ASSERT(lcbuf1->cb_match_bits == lcbuf2->cb_match_bits);
1451 
1452 #undef VALIDATE_MATCH_BITS
1453 
1454  /* TEST
1455  Failure tests for this setup: invalid usage, wrong sizes
1456  */
1457  NLXDBGPnl(td, 1, "TEST: decode buffer descriptor F(AS1 > PR2)\n");
1458  lcbuf1->cb_length = lcbuf2->cb_length + 1; /* > receive buffer size */
1460  m0_mutex_lock(&TM1->ntm_mutex);
1461  rc = nlx_core_buf_desc_decode(lctm1, lcbuf1, CBD2);
1462  m0_mutex_unlock(&TM1->ntm_mutex);
1463  M0_UT_ASSERT(rc == -EFBIG);
1464 
1465  NLXDBGPnl(td, 1, "TEST: decode buffer descriptor F(AR1 == PR2)\n");
1466  lcbuf1->cb_length = lcbuf2->cb_length; /* same size as receive buffer */
1468  m0_mutex_lock(&TM1->ntm_mutex);
1469  rc = nlx_core_buf_desc_decode(lctm1, lcbuf1, CBD2);
1470  m0_mutex_unlock(&TM1->ntm_mutex);
1471  M0_UT_ASSERT(rc == -EPERM);
1472 
1473  /* TEST
1474  Invalid descriptor cases
1475  */
1476  *CBD1 = *CBD2;
1477 
1478  NLXDBGPnl(td, 1, "TEST: decode buffer descriptor F(corrupt)\n");
1479  CBD2->cbd_match_bits++; /* invalidates checksum */
1480  lcbuf1->cb_length = lcbuf2->cb_length; /* same size as receive buffer */
1482  m0_mutex_lock(&TM1->ntm_mutex);
1483  rc = nlx_core_buf_desc_decode(lctm1, lcbuf1, CBD2);
1484  m0_mutex_unlock(&TM1->ntm_mutex);
1485  M0_UT_ASSERT(rc == -EINVAL);
1486 
1487  /* TEST
1488  Check that the match bit counter wraps.
1489  */
1490  NLXDBGPnl(td, 1, "TEST: match bit counter wraps\n");
1493  lcbuf1->cb_length = td->buf_size1; /* Arbitrary */
1494  m0_mutex_lock(&TM1->ntm_mutex);
1495  nlx_core_buf_desc_encode(lctm1, lcbuf1, CBD1);
1496  m0_mutex_unlock(&TM1->ntm_mutex);
1499  M0_UT_ASSERT(tmid == lctm1->ctm_addr.cepa_tmid);
1501 
1502  lcbuf1->cb_qtype = M0_NET_QT_PASSIVE_BULK_RECV; /* qt doesn't matter */
1503  m0_mutex_lock(&TM1->ntm_mutex);
1504  nlx_core_buf_desc_encode(lctm1, lcbuf1, CBD1);
1505  m0_mutex_unlock(&TM1->ntm_mutex);
1508  M0_UT_ASSERT(tmid == lctm1->ctm_addr.cepa_tmid);
1510 
1511  return;
1512 }
1513 
1514 static void test_buf_desc(void)
1515 {
1517 }
1518 
1519 static int test_bulk_passive_send(struct ut_data *td)
1520 {
1521  struct m0_net_buffer *nb1 = &td->bufs1[0]; /* passive */
1522  struct m0_net_buffer *nb2 = &td->bufs2[0]; /* active */
1523  struct m0_net_buffer *nb2s = NULL;
1524  unsigned char seed = 's';
1525  m0_bcount_t pBytes = UT_BULK_SIZE;
1526  int rc = -1;
1527 
1528  ut_cbreset();
1529 
1530  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1531  m0_clink_add_lock(&TM2->ntm_chan, &td->tmwait2);
1532 
1533  /* stage passive send buffer */
1534  M0_UT_ASSERT(td->buf_size1 >= pBytes);
1535  if (td->buf_size1 < pBytes)
1536  goto failed;
1537  nb1->nb_length = pBytes;
1538  ut_net_buffer_sign(nb1, nb1->nb_length, seed);
1540  zUT(m0_net_buffer_add(nb1, TM1));
1542  M0_UT_ASSERT(nb1->nb_desc.nbd_len != 0);
1543  M0_UT_ASSERT(nb1->nb_desc.nbd_data != NULL);
1544 
1545  zUT(m0_net_desc_copy(&nb1->nb_desc, &nb2->nb_desc));
1546 
1547  /* ensure that an active send fails */
1548  NLXDBGPnl(td, 1, "TEST: bulk transfer F(PS !~ AS)\n");
1550  nb2->nb_length = pBytes;
1551  zvUT(m0_net_buffer_add(nb2, TM2), -EPERM);
1552 
1553  /* try to receive into a smaller buffer */
1554  NLXDBGPnl(td, 1, "TEST: bulk transfer F(PS > AR)\n");
1555  M0_UT_ASSERT(pBytes > td->buf_seg_size2); /* sanity */
1556  zUT((M0_ALLOC_PTR(nb2s) == NULL));
1558  UT_PAGE_SHIFT));
1560  zUT(m0_net_desc_copy(&nb1->nb_desc, &nb2s->nb_desc));
1561  nb2s->nb_length = td->buf_seg_size2;
1563  zvUT(m0_net_buffer_add(nb2s, TM2), -EFBIG);
1564 
1565  /* success case */
1566  NLXDBGPnl(td, 1, "TEST: bulk transfer S(PS ~ AR)\n");
1567  nb2->nb_length = td->buf_size2;
1569  zUT(m0_net_buffer_add(nb2, TM2));
1570  ut_chan_timedwait(&td->tmwait2, 10);
1572 
1573  ut_chan_timedwait(&td->tmwait2, 10);
1574  M0_UT_ASSERT(cb_called2 == 1);
1575  M0_UT_ASSERT(cb_status2 == 0);
1576  M0_UT_ASSERT(cb_nb2 == nb2);
1578  M0_UT_ASSERT(cb_length2 == pBytes);
1581  &td->qs, true));
1582  M0_UT_ASSERT(td->qs.nqs_num_f_events == 0);
1583  M0_UT_ASSERT(td->qs.nqs_num_s_events == 1);
1584  M0_UT_ASSERT(td->qs.nqs_num_adds == 1);
1585  M0_UT_ASSERT(td->qs.nqs_num_dels == 0);
1586 
1587  ut_chan_timedwait(&td->tmwait1, 10);
1588  M0_UT_ASSERT(cb_called1 == 1);
1589  M0_UT_ASSERT(cb_status1 == 0);
1590  M0_UT_ASSERT(cb_nb1 == nb1);
1593  &td->qs, true));
1594  M0_UT_ASSERT(td->qs.nqs_num_f_events == 0);
1595  M0_UT_ASSERT(td->qs.nqs_num_s_events == 1);
1596  M0_UT_ASSERT(td->qs.nqs_num_adds == 1);
1597  M0_UT_ASSERT(td->qs.nqs_num_dels == 0);
1598 
1599  rc = 0;
1600  failed:
1601  m0_net_desc_free(&nb1->nb_desc);
1602  m0_net_desc_free(&nb2->nb_desc);
1603  if (nb2s != NULL) {
1604  if (nb2s->nb_flags & M0_NET_BUF_QUEUED) {
1605  NLXDBGP(td, 3, "\tcancelling nb2s\n");
1606  m0_net_buffer_del(nb2s, nb2s->nb_tm);
1607  ut_chan_timedwait(&td->tmwait2, 10);
1608  if (nb2s->nb_flags & M0_NET_BUF_QUEUED)
1609  NLXP("Unable to cancel nb2s\n");
1610  }
1611  if (nb2s->nb_flags & M0_NET_BUF_REGISTERED) {
1612  NLXDBGP(td, 3, "\tderegistering nb2s\n");
1614  }
1615  if (nb2s->nb_buffer.ov_vec.v_nr != 0)
1617  m0_net_desc_free(&nb2s->nb_desc);
1618  m0_free(nb2s);
1619  }
1620  m0_clink_del_lock(&td->tmwait1);
1621  m0_clink_del_lock(&td->tmwait2);
1622  return rc;
1623 }
1624 
1625 static int test_bulk_passive_recv(struct ut_data *td)
1626 {
1627  struct m0_net_buffer *nb1 = &td->bufs1[0]; /* passive */
1628  struct m0_net_buffer *nb2 = &td->bufs2[0]; /* active */
1629  struct m0_net_buffer *nb2l = NULL;
1630  unsigned char seed = 'r';
1631  m0_bcount_t aBytes = UT_BULK_SIZE;
1632  int rc = -1;
1633 
1634  ut_cbreset();
1635 
1636  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1637  m0_clink_add_lock(&TM2->ntm_chan, &td->tmwait2);
1638 
1639  /* stage passive recv buffer */
1641  nb1->nb_length = td->buf_size1;
1642  zUT(m0_net_buffer_add(nb1, TM1));
1644  M0_UT_ASSERT(nb1->nb_desc.nbd_len != 0);
1645  M0_UT_ASSERT(nb1->nb_desc.nbd_data != NULL);
1646 
1647  zUT(m0_net_desc_copy(&nb1->nb_desc, &nb2->nb_desc));
1648  M0_UT_ASSERT(td->buf_size2 >= aBytes);
1649  if (td->buf_size2 < aBytes)
1650  goto failed;
1651  nb2->nb_length = aBytes;
1652  ut_net_buffer_sign(nb2, nb2->nb_length, seed);
1653 
1654  /* ensure that an active receive fails */
1655  NLXDBGPnl(td, 1, "TEST: bulk transfer F(PR !~ AR)\n");
1657  zvUT(m0_net_buffer_add(nb2, TM2), -EPERM);
1658 
1659  /* try to send a larger buffer */
1660  NLXDBGPnl(td, 1, "TEST: bulk transfer F(PR < AS)\n");
1661  zUT((M0_ALLOC_PTR(nb2l) == NULL));
1665  zUT(m0_net_desc_copy(&nb1->nb_desc, &nb2l->nb_desc));
1666  nb2l->nb_length = td->buf_seg_size1 * (UT_BUFSEGS1 + 1);
1668  zvUT(m0_net_buffer_add(nb2l, TM2), -EFBIG);
1669 
1670  /* now try the success case */
1671  NLXDBGPnl(td, 1, "TEST: bulk transfer S(PR ~ AS)\n");
1673  zUT(m0_net_buffer_add(nb2, TM2));
1675 
1676  ut_chan_timedwait(&td->tmwait2, 10);
1677  M0_UT_ASSERT(cb_called2 == 1);
1678  M0_UT_ASSERT(cb_status2 == 0);
1679  M0_UT_ASSERT(cb_nb2 == nb2);
1682  &td->qs, true));
1683  M0_UT_ASSERT(td->qs.nqs_num_f_events == 0);
1684  M0_UT_ASSERT(td->qs.nqs_num_s_events == 1);
1685  M0_UT_ASSERT(td->qs.nqs_num_adds == 1);
1686  M0_UT_ASSERT(td->qs.nqs_num_dels == 0);
1687 
1688  ut_chan_timedwait(&td->tmwait1, 10);
1689  M0_UT_ASSERT(cb_called1 == 1);
1690  M0_UT_ASSERT(cb_status1 == 0);
1691  M0_UT_ASSERT(cb_nb1 == nb1);
1693  M0_UT_ASSERT(cb_length1 == aBytes);
1696  &td->qs, true));
1697  M0_UT_ASSERT(td->qs.nqs_num_f_events == 0);
1698  M0_UT_ASSERT(td->qs.nqs_num_s_events == 1);
1699  M0_UT_ASSERT(td->qs.nqs_num_adds == 1);
1700  M0_UT_ASSERT(td->qs.nqs_num_dels == 0);
1701 
1702  rc = 0;
1703  failed:
1704  m0_net_desc_free(&nb1->nb_desc);
1705  m0_net_desc_free(&nb2->nb_desc);
1706  if (nb2l != NULL) {
1707  if (nb2l->nb_flags & M0_NET_BUF_QUEUED) {
1708  NLXDBGP(td, 3, "\tcancelling nb2l\n");
1709  m0_net_buffer_del(nb2l, nb2l->nb_tm);
1710  ut_chan_timedwait(&td->tmwait2, 10);
1711  if (nb2l->nb_flags & M0_NET_BUF_QUEUED)
1712  NLXP("Unable to cancel nb2l\n");
1713  }
1714  if (nb2l->nb_flags & M0_NET_BUF_REGISTERED) {
1715  NLXDBGP(td, 3, "\tderegistering nb2l\n");
1717  }
1718  if (nb2l->nb_buffer.ov_vec.v_nr != 0)
1720  m0_net_desc_free(&nb2l->nb_desc);
1721  m0_free(nb2l);
1722  }
1723  m0_clink_del_lock(&td->tmwait1);
1724  m0_clink_del_lock(&td->tmwait2);
1725  return rc;
1726 }
1727 
1728 static void test_bulk_body(struct ut_data *td)
1729 {
1730  struct m0_net_buffer *nb1 = &td->bufs1[0];
1731  int i;
1732 
1735 
1736  /* TEST
1737  Add buffers on the passive queues and then cancel them.
1738  Check that descriptors are present after enqueuing.
1739  */
1740  NLXDBGPnl(td, 1, "TEST: add/del on the passive queues\n");
1743  nb1->nb_length = td->buf_size1;
1744  nb1->nb_qtype = i;
1745 
1746  ut_cbreset();
1747  M0_SET0(&nb1->nb_desc);
1748  zUT(m0_net_buffer_add(nb1, TM1));
1750  M0_UT_ASSERT(nb1->nb_desc.nbd_len == sizeof *CBD1);
1751  M0_UT_ASSERT(nb1->nb_desc.nbd_data != NULL);
1752 
1753  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1754  m0_net_buffer_del(nb1, TM1);
1755  ut_chan_timedwait(&td->tmwait1, 10);
1756  m0_clink_del_lock(&td->tmwait1);
1757  M0_UT_ASSERT(cb_called1 == 1);
1758  M0_UT_ASSERT(cb_nb1 == nb1);
1760  M0_UT_ASSERT(cb_qt1 == i);
1761  M0_UT_ASSERT(cb_status1 == -ECANCELED);
1762  M0_UT_ASSERT(!m0_net_tm_stats_get(TM1, i, &td->qs, true));
1763  M0_UT_ASSERT(td->qs.nqs_num_f_events == 1);
1764  M0_UT_ASSERT(td->qs.nqs_num_s_events == 0);
1765  M0_UT_ASSERT(td->qs.nqs_num_adds == 1);
1766  M0_UT_ASSERT(td->qs.nqs_num_dels == 1);
1767 
1768  /* explicitly free the descriptor */
1769  M0_UT_ASSERT(nb1->nb_desc.nbd_data != NULL);
1770  m0_net_desc_free(&nb1->nb_desc);
1771  M0_UT_ASSERT(nb1->nb_desc.nbd_data == NULL);
1772  M0_UT_ASSERT(nb1->nb_desc.nbd_len == 0);
1773  }
1776 
1777  /* sanity check */
1780 
1781  /* TEST
1782  Test a passive send. Ensure that an active send cannot be
1783  issued for the network buffer descriptor.
1784  Also test size issues.
1785  */
1787 
1788  /* TEST
1789  Test a passive receive. Ensure that an active receive cannot be
1790  issued for the network buffer descriptor.
1791  Also test size issues.
1792  */
1794 
1795  return;
1796 }
1797 
1798 static void test_bulk(void)
1799 {
1800 #ifdef NLX_DEBUG
1801  nlx_debug._debug_ = 0;
1802 #endif
1804 #ifdef NLX_DEBUG
1805  nlx_debug._debug_ = 0;
1806 #endif
1807 }
1808 
1810 
1811 /* replacement for ut_buf_cb2 for this test */
1812 static void test_sync_msg_send_cb2(const struct m0_net_buffer_event *ev)
1813 {
1814  /* async callback on background thread */
1816 
1817  ut_buf_cb2(ev);
1818 }
1819 
1820 /* replacement for ut_buf_cb1 for this test */
1821 static void test_sync_msg_recv_cb1(const struct m0_net_buffer_event *ev)
1822 {
1823  /* synchronous callback on application thread */
1825 
1826  cb_save_ep1 = true;
1827  ut_buf_cb1(ev);
1829  M0_UT_ASSERT(cb_status1 == 0);
1830 }
1831 
1832 static void test_sync_body(struct ut_data *td)
1833 {
1834  struct m0_net_buffer *nb1 = &td->bufs1[0];
1835  struct m0_net_buffer *nb2 = &td->bufs2[0];
1836  struct nlx_xo_transfer_mc *tp1 = TM1->ntm_xprt_private;
1837  struct m0_net_end_point *ep2 = NULL;
1838  int num_msgs;
1839  int initial_len;
1840  int len;
1841  int offset;
1842  int i;
1843 
1845 
1846  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
1847  m0_clink_add_lock(&TM2->ntm_chan, &td->tmwait2);
1848 
1849  /* TEST
1850  No-op calls
1851  */
1852  NLXDBGPnl(td, 1, "TEST: no-op sync calls\n");
1853  ut_cbreset();
1854  M0_UT_ASSERT(tp1->xtm_ev_chan == NULL);
1857  M0_UT_ASSERT(cb_called1 == 0);
1858 
1859  /* TEST
1860  Test synchronous delivery of buffer events under control of the
1861  application.
1862  No events must be delivered until fetched.
1863  Normal event delivery guarantees the use of a separate thread.
1864  Synchronous event delivery guarantees the use of the application
1865  thread.
1866 
1867  Note that this test is not about the content of the event (tested
1868  elsewhere) but about the control over delivery.
1869  I use the M0_NET_QT_MSG_RECV queue for this test to make it easier
1870  to generate multiple events, but any queue would have sufficied.
1871  */
1872  NLXDBGPnl(td, 1, "TEST: sync delivery of buffer events\n");
1873 
1874  ut_cbreset();
1875 
1876  test_sync_ut_handle = m0_thread_self(); /* save thread handle */
1877 
1878  num_msgs = 4;
1879  initial_len = 256;
1880  nb1->nb_length = td->buf_size1;
1881  ut_net_buffer_sign(nb1, nb1->nb_length, 0);
1883  nb1->nb_max_receive_msgs = num_msgs;
1886  zUT(m0_net_buffer_add(nb1, TM1));
1888 
1890  M0_UT_ASSERT(tp1->xtm_ev_chan == NULL);
1891  m0_net_buffer_event_notify(TM1, &TM1->ntm_chan);
1892  M0_UT_ASSERT(tp1->xtm_ev_chan == &TM1->ntm_chan);
1893 
1894  /* get a TM2 end point for TM1's address */
1895  zUT(m0_net_end_point_create(&ep2, TM2, TM1->ntm_ep->nep_addr));
1897 
1899  for (i = 1; i <= num_msgs; ++i) {
1900  len = initial_len * i;
1901  M0_UT_ASSERT(len < td->buf_size2);
1902  ut_net_buffer_sign(nb2, len, i);
1904  nb2->nb_length = len;
1905  nb2->nb_ep = ep2;
1906  zUT(m0_net_buffer_add(nb2, TM2));
1907  ut_chan_timedwait(&td->tmwait2, 10);
1910  M0_UT_ASSERT(cb_status2 == 0);
1911  M0_UT_ASSERT(cb_nb2 == nb2);
1914  &td->qs, false));
1915  M0_UT_ASSERT(td->qs.nqs_num_f_events == 0);
1917  M0_UT_ASSERT(td->qs.nqs_num_adds == i);
1918  M0_UT_ASSERT(td->qs.nqs_num_dels == 0);
1919  }
1920 
1921  M0_UT_ASSERT(cb_called1 == 0);
1922  M0_UT_ASSERT(ut_chan_timedwait(&td->tmwait1, 10));/* got notification */
1923  M0_UT_ASSERT(tp1->xtm_ev_chan == NULL);
1925  M0_UT_ASSERT(cb_called1 == 0);
1927 
1928  /* event is still pending */
1930 
1931  m0_net_buffer_event_deliver_all(TM1); /* get events */
1932 
1933  M0_UT_ASSERT(cb_called1 == num_msgs);
1935  M0_UT_ASSERT(cb_ep1 != NULL &&
1936  strcmp(TM2->ntm_ep->nep_addr, cb_ep1->nep_addr) == 0);
1938  for (i = 1, len = 0, offset = 0; i <= num_msgs; ++i) {
1939  offset += len;
1940  len = initial_len * i;
1943  }
1944  cb_ep1 = NULL;
1946  &td->qs, false));
1947  M0_UT_ASSERT(td->qs.nqs_num_f_events == 0);
1948  M0_UT_ASSERT(td->qs.nqs_num_s_events == num_msgs);
1949  M0_UT_ASSERT(td->qs.nqs_num_adds == 1);
1950  M0_UT_ASSERT(td->qs.nqs_num_dels == 0);
1951 
1952  m0_clink_del_lock(&td->tmwait2);
1953  m0_clink_del_lock(&td->tmwait1);
1954  if (ep2 != NULL)
1955  m0_net_end_point_put(ep2);
1956  return;
1957 }
1958 
1959 static void test_sync_prestart(struct ut_data *td, int which)
1960 {
1961  if (which == 2)
1962  return;
1963  /* use synchronous event delivery in TM1 */
1965 }
1966 
1967 static void test_sync(void)
1968 {
1970 }
1971 
1972 
1973 /* replacement for ut_buf_cb1 used in this test */
1974 static void test_timeout_msg_recv_cb1(const struct m0_net_buffer_event *ev)
1975 {
1976  ut_buf_cb1(ev);
1978  M0_UT_ASSERT(cb_status1 == -ETIMEDOUT);
1979 }
1980 
1981 /* intercepted sub */
1982 static m0_time_t
1984 {
1985  return m0_time(1, 0) >> 1; /* 500ms */
1986 }
1987 
1988 /* intercepted sub */
1990 static struct m0_atomic64 test_timeout_ttb_retval; /* non zero */
1992  m0_time_t now)
1993 {
1994  int rc;
1996  rc = nlx_tm_timeout_buffers(tm, now);
1997  if (rc)
1999  return rc;
2000 }
2001 
2002 static void test_timeout_body(struct ut_data *td)
2003 {
2004  struct m0_net_buffer *nb1 = &td->bufs1[0];
2005  int qts[3] = {
2009  };
2010  int qt;
2011  int i;
2012  m0_time_t abs_timeout;
2013  m0_time_t buf_add_time;
2014  uint64_t timeout_secs = 1;
2015 
2016  m0_clink_add_lock(&TM1->ntm_chan, &td->tmwait1);
2017 
2018  /* TEST
2019  Enqueue non-active buffers one at a time on different queues,
2020  and let them timeout.
2021  Cannot test with active buffers.
2022  */
2024  nb1->nb_length = td->buf_size1;
2025  for (i = 0; i < ARRAY_SIZE(qts); ++i) {
2026  qt = qts[i];
2027  NLXDBGPnl(td, 1, "TEST: buffer single timeout: %d\n", (int) qt);
2028  ut_cbreset();
2030  if (qt == M0_NET_QT_MSG_RECV) {
2032  nb1->nb_max_receive_msgs = 1;
2033  } else {
2034  nb1->nb_min_receive_size = 0;
2035  nb1->nb_max_receive_msgs = 0;
2036  }
2037  nb1->nb_qtype = qt;
2038  abs_timeout = m0_time_from_now(timeout_secs, 0);
2040  nb1->nb_timeout = abs_timeout;
2041  zUT(m0_net_buffer_add(nb1, TM1));
2043  buf_add_time = m0_time_now();
2044 
2045  ut_chan_timedwait(&td->tmwait1, 2 * timeout_secs);
2047  2 * timeout_secs); /* 0.5s tick */
2049  M0_UT_ASSERT(cb_called1 == 1);
2050  M0_UT_ASSERT(cb_status1 == -ETIMEDOUT);
2053  M0_UT_ASSERT(!m0_net_tm_stats_get(TM1, qt, &td->qs, true));
2054  M0_UT_ASSERT(td->qs.nqs_num_f_events == 1);
2055  M0_UT_ASSERT(td->qs.nqs_num_s_events == 0);
2056  M0_UT_ASSERT(td->qs.nqs_num_adds == 1);
2057  M0_UT_ASSERT(td->qs.nqs_num_dels == 0);
2059  abs_timeout - buf_add_time);
2060 
2061  if (qt != M0_NET_QT_MSG_RECV)
2062  m0_net_desc_free(&nb1->nb_desc);
2063  }
2064 
2065  /* TEST
2066  Enqueue multiple non-active buffers on a single queue and let
2067  them timeout at the same time.
2068  Cannot test with active buffers.
2069  */
2072  NLXDBGPnl(td, 1, "TEST: buffer multiple timeout\n");
2074  abs_timeout = m0_time_from_now(timeout_secs, 0);
2075  ut_cbreset();
2077  for (i = 0; i < UT_BUFS1; ++i) {
2078  nb1 = &td->bufs1[i];
2079  nb1->nb_qtype = qt;
2081  nb1->nb_max_receive_msgs = 1;
2083  nb1->nb_timeout = abs_timeout;
2084  zUT(m0_net_buffer_add(nb1, TM1));
2086  }
2087  buf_add_time = m0_time_now();
2088 
2089  i = 0;
2090  while (cb_called1 != UT_BUFS1 && i <= UT_BUFS1) {
2091  ut_chan_timedwait(&td->tmwait1, 2 * timeout_secs);
2092  ++i;
2093  }
2096  2 * timeout_secs); /* 0.5s tick */
2098  M0_UT_ASSERT(!m0_net_tm_stats_get(TM1, qt, &td->qs, true));
2100  M0_UT_ASSERT(td->qs.nqs_num_s_events == 0);
2102  M0_UT_ASSERT(td->qs.nqs_num_dels == 0);
2104  (abs_timeout - buf_add_time) * UT_BUFS1);
2105 
2106  for (i = 0; i < UT_BUFS1; ++i) {
2107  nb1 = &td->bufs1[i];
2109  }
2110 
2111  /* TEST
2112  Enqueue multiple non-active buffers onto a single queue.
2113  Set a timeout only on one buffer.
2114  Cannot test with active buffers.
2115  */
2118  NLXDBGPnl(td, 1, "TEST: buffer mixed timeout\n");
2120  abs_timeout = m0_time_from_now(timeout_secs, 0);
2121  ut_cbreset();
2123  for (i = 0; i < UT_BUFS1; ++i) {
2124  nb1 = &td->bufs1[i];
2125  nb1->nb_qtype = qt;
2127  nb1->nb_max_receive_msgs = 1;
2129  if (i == 0)
2130  nb1->nb_timeout = abs_timeout;
2131  zUT(m0_net_buffer_add(nb1, TM1));
2132  if (i == 0)
2133  buf_add_time = m0_time_now();
2135  }
2136 
2137  i = 0;
2138  while (cb_called1 != 1 && i <= UT_BUFS1) {
2139  ut_chan_timedwait(&td->tmwait1, 2 * timeout_secs);
2140  ++i;
2141  }
2142  M0_UT_ASSERT(cb_called1 == 1);
2144  2 * timeout_secs); /* 0.5s tick */
2146  M0_UT_ASSERT(!m0_net_tm_stats_get(TM1, qt, &td->qs, true));
2147  M0_UT_ASSERT(td->qs.nqs_num_f_events == 1);
2148  M0_UT_ASSERT(td->qs.nqs_num_s_events == 0);
2150  M0_UT_ASSERT(td->qs.nqs_num_dels == 0);
2152  abs_timeout - buf_add_time);
2153 
2154  /* restore the callback sub and then cancel the other buffer */
2155  td->buf_cb1.nbc_cb[qt] = ut_buf_cb1;
2156  m0_net_buffer_del(nb1, TM1);
2157  ut_chan_timedwait(&td->tmwait1, 3);
2158  M0_UT_ASSERT(cb_called1 == 2);
2159  M0_UT_ASSERT(cb_status1 == -ECANCELED);
2160 
2161  /* TEST
2162  Enqueue a buffer, no timeout.
2163  Force set the M0_NET_BUF_TIMED_OUT flag in the buffer.
2164  Construct a core buffer event structure and set the status to 0
2165  in the structure, indicating that the buffer operation completed
2166  successfully and co-incidentally with the time out.
2167  Call nlx_xo_core_bev_to_net_bev() to construct the net buffer
2168  event structure.
2169  The status should be 0 in the buffer event structure and the
2170  M0_NET_BUF_TIMED_OUT flag should be cleared.
2171 
2172  This situation is quite probable when using synchronous buffer event
2173  delivery when the cancel is issued while the completion event is
2174  already present in the circular buffer but not yet harvested.
2175  */
2177  nb1 = &td->bufs1[0];
2178  nb1->nb_length = td->buf_size1;
2180  nb1->nb_max_receive_msgs = 1;
2181  nb1->nb_timeout = M0_TIME_NEVER;
2182  nb1->nb_qtype = M0_NET_QT_PASSIVE_BULK_RECV; /* doesn't involve EPs */
2183  zUT(m0_net_buffer_add(nb1, TM1));
2185 
2186  {
2187  struct nlx_core_buffer_event lcbev = {
2189  .cbe_status = 0,
2190  .cbe_length = 10,
2191  .cbe_unlinked = true,
2192  };
2193  struct m0_net_buffer_event nbev;
2194 
2196 
2197  m0_mutex_lock(&TM1->ntm_mutex);
2198  zUT(nlx_xo_core_bev_to_net_bev(TM1, &lcbev, &nbev));
2199  m0_mutex_unlock(&TM1->ntm_mutex);
2201  M0_UT_ASSERT(nbev.nbe_status == 0);
2202  M0_UT_ASSERT(nbev.nbe_length == lcbev.cbe_length);
2203  }
2204 
2205  /* cancel the buffer */
2206  m0_net_buffer_del(nb1, TM1);
2207  ut_chan_timedwait(&td->tmwait1, 3);
2208  M0_UT_ASSERT(cb_called1 == 3);
2209  M0_UT_ASSERT(cb_status1 == -ECANCELED);
2210  m0_net_desc_free(&nb1->nb_desc);
2211 
2212  m0_clink_del_lock(&td->tmwait1);
2213 }
2214 
2215 static void test_timeout(void)
2216 {
2217  ut_save_subs();
2218 
2225 
2227 
2228  ut_restore_subs();
2229 }
2230 
2232  .ts_name = "net-lnet",
2233  .ts_init = test_lnet_init,
2234  .ts_fini = test_lnet_fini,
2235  .ts_tests = {
2236 #ifdef __KERNEL__
2237  { "net_lnet_buf_shape (K)", ktest_buf_shape },
2238  { "net_lnet_buf_reg (K)", ktest_buf_reg },
2239  { "net_lnet_ep_addr (K)", ktest_core_ep_addr },
2240  { "net_lnet_enc_dec (K)", ktest_enc_dec },
2241  { "net_lnet_msg (K)", ktest_msg },
2242  { "net_lnet_bulk (K)", ktest_bulk },
2243  { "net_lnet_device", ktest_dev },
2244 #else
2245  { "net_lnet_fail", test_fail },
2246 #endif
2247  { "net_lnet_tm_initfini", test_tm_initfini },
2248  { "net_lnet_tm_startstop", test_tm_startstop },
2249  { "net_lnet_msg", test_msg },
2250  { "net_lnet_buf_desc", test_buf_desc },
2251  { "net_lnet_bulk", test_bulk },
2252  { "net_lnet_sync", test_sync },
2253  { "net_lnet_timeout", test_timeout },
2254  { NULL, NULL }
2255  }
2256 };
2257 M0_EXPORTED(m0_net_lnet_ut);
2258 
2259 #undef M0_TRACE_SUBSYSTEM
2260 
2261 /*
2262  * Local variables:
2263  * c-indentation-style: "K&R"
2264  * c-basic-offset: 8
2265  * tab-width: 8
2266  * fill-column: 80
2267  * scroll-step: 1
2268  * End:
2269  */
M0_INTERNAL void m0_net_lnet_dom_set_debug(struct m0_net_domain *dom, unsigned dbg)
Definition: lnet_main.c:977
static void m0_atomic64_inc(struct m0_atomic64 *a)
struct m0_ut_suite m0_net_lnet_ut
Definition: lnet_ut.c:2231
uint64_t nqs_num_f_events
Definition: net.h:784
static int ecb_count
Definition: lnet_ut.c:137
static struct m0_addb2_philter p
Definition: consumer.c:40
M0_INTERNAL void m0_chan_wait(struct m0_clink *link)
Definition: chan.c:336
uint64_t nqs_num_adds
Definition: net.h:764
struct m0_net_transfer_mc * nb_tm
Definition: net.h:1357
static m0_bindex_t cb_offset2
Definition: lnet_ut.c:218
static void ut_test_framework(ut_test_fw_body_t body, ut_test_fw_prestart_cb_t ps_cb, int dbg)
Definition: lnet_ut.c:426
#define M0_ALLOC_ARR(arr, nr)
Definition: memory.h:84
static bool nlx_dom_invariant(const struct m0_net_domain *dom)
Definition: lnet_xo.c:28
struct m0_net_qstats qs
Definition: lnet_ut.c:307
M0_INTERNAL int m0_bitmap_init(struct m0_bitmap *map, size_t nr)
Definition: bitmap.c:86
#define DOM2
Definition: lnet_ut.c:319
static int ktest_lnet_init(void)
Definition: klnet_ut.c:179
M0_INTERNAL void m0_mutex_unlock(struct m0_mutex *mutex)
Definition: mutex.c:66
static void ktest_core_ep_addr(void)
Definition: klnet_ut.c:370
void m0_net_domain_fini(struct m0_net_domain *dom)
Definition: domain.c:71
m0_time_t nqs_time_in_queue
Definition: net.h:791
M0_INTERNAL int m0_net_tm_start(struct m0_net_transfer_mc *tm, const char *addr)
Definition: tm.c:261
uint64_t nqs_num_s_events
Definition: net.h:774
struct m0_net_buffer bufs1[UT_BUFS1]
Definition: lnet_ut.c:297
char ** nidstrs2
Definition: lnet_ut.c:309
M0_INTERNAL int struct dentry struct kstat * stat
Definition: dir.c:1433
static void ktest_buf_shape(void)
Definition: klnet_ut.c:233
struct m0_net_transfer_mc tm2
Definition: lnet_ut.c:301
static int32_t cb_status1
Definition: lnet_ut.c:177
#define NULL
Definition: misc.h:38
M0_INTERNAL void m0_clink_init(struct m0_clink *link, m0_chan_cb_t cb)
Definition: chan.c:201
M0_INTERNAL void m0_bitmap_fini(struct m0_bitmap *map)
Definition: bitmap.c:97
M0_INTERNAL void m0_clink_del_lock(struct m0_clink *link)
Definition: chan.c:293
static struct buffer * cur(struct m0_addb2_mach *mach, m0_bcount_t space)
Definition: addb2.c:791
static void test_sync_msg_recv_cb1(const struct m0_net_buffer_event *ev)
Definition: lnet_ut.c:1821
struct m0_bufvec nb_buffer
Definition: net.h:1322
Definition: idx_mock.c:52
static void test_tm_initfini(void)
Definition: lnet_ut.c:645
M0_INTERNAL int m0_net_buffer_register(struct m0_net_buffer *buf, struct m0_net_domain *dom)
Definition: buf.c:65
uint32_t nbd_len
Definition: net_otw_types.h:37
static void ktest_lnet_fini(void)
Definition: klnet_ut.c:201
lnet_handle_eq_t ktm_eqh
Definition: klnet_core.h:151
static int test_bulk_passive_send(struct ut_data *td)
Definition: lnet_ut.c:1519
static m0_bcount_t cb_length2
Definition: lnet_ut.c:217
static bool test_msg_send_loop(struct ut_data *td, uint32_t num_recv_bufs, uint32_t recv_max_msgs, struct m0_net_end_point *ep2, m0_bcount_t send_len_first, m0_bcount_t send_len_rest, bool space_exhausted)
Definition: lnet_ut.c:903
const struct m0_net_xprt m0_net_lnet_xprt
Definition: lnet_xo.c:679
const m0_time_t M0_TIME_NEVER
Definition: time.c:108
struct nlx_core_ep_addr cb_addr
static int32_t ecb_status
Definition: lnet_ut.c:136
static struct nlx_kcore_interceptable_subs nlx_kcore_iv
Definition: klnet_core.c:873
uint64_t nqs_num_dels
Definition: net.h:769
uint64_t m0_time_t
Definition: time.h:37
static void nlx_core_match_bits_decode(uint64_t mb, uint32_t *tmid, uint64_t *counter)
Definition: lnet_core.c:258
M0_INTERNAL int m0_net_tm_stats_get(struct m0_net_transfer_mc *tm, enum m0_net_queue_type qtype, struct m0_net_qstats *qs, bool reset)
Definition: tm.c:343
uint8_t * nbd_data
Definition: net_otw_types.h:38
static void test_msg(void)
Definition: lnet_ut.c:1243
static void test_buf_desc_body(struct ut_data *td)
Definition: lnet_ut.c:1248
struct m0_vec ov_vec
Definition: vec.h:147
enum m0_net_tm_state ntm_state
Definition: net.h:819
static bool cb_save_ep2
Definition: lnet_ut.c:219
m0_bcount_t nb_length
Definition: net.h:1334
uint64_t nb_flags
Definition: net.h:1489
static enum m0_net_tm_ev_type ecb_evt
Definition: lnet_ut.c:134
struct m0_net_tm_callbacks tmcb
Definition: lnet_ut.c:292
static m0_bcount_t cb_length1
Definition: lnet_ut.c:178
static int test_lnet_fini(void)
Definition: lnet_ut.c:596
uint64_t m0_bindex_t
Definition: types.h:80
static unsigned cb_called2
Definition: lnet_ut.c:221
M0_INTERNAL void * m0_bufvec_cursor_addr(struct m0_bufvec_cursor *cur)
Definition: vec.c:597
uint64_t m0_bcount_t
Definition: types.h:77
M0_INTERNAL bool m0_net_buffer_del(struct m0_net_buffer *buf, struct m0_net_transfer_mc *tm)
Definition: buf.c:261
#define CBD2
Definition: lnet_ut.c:323
static const char * nlx_ucore_dev_name
Definition: ulnet_core.c:502
static struct m0_atomic64 test_timeout_ttb_retval
Definition: lnet_ut.c:1990
#define PAGE_SIZE
Definition: lnet_ut.c:277
m0_bcount_t nb_min_receive_size
Definition: net.h:1496
const char * nep_addr
Definition: net.h:503
m0_bindex_t nbe_offset
Definition: net.h:1238
#define M0_SET0(obj)
Definition: misc.h:64
M0_INTERNAL void m0_mutex_lock(struct m0_mutex *mutex)
Definition: mutex.c:49
Definition: ut.h:77
m0_bcount_t nbe_length
Definition: net.h:1226
struct m0_net_buffer * nbe_buffer
Definition: net.h:1194
#define NLXP(fmt,...)
Definition: lnet_main.c:876
static int nlx_xo__nbd_recover(struct m0_net_transfer_mc *tm, const struct m0_net_buf_desc *nbd, struct nlx_core_buf_desc *cbd)
Definition: lnet_xo.c:268
m0_time_t m0_time(uint64_t secs, long ns)
Definition: time.c:41
int(* _nlx_tm_timeout_buffers)(struct m0_net_transfer_mc *tm, m0_time_t now)
Definition: lnet_xo.c:80
struct m0_net_end_point * nbe_ep
Definition: net.h:1251
char ** nidstrs1
Definition: lnet_ut.c:308
#define TM1
Definition: lnet_ut.c:320
M0_INTERNAL int m0_net_lnet_ep_addr_net_cmp(const char *addr1, const char *addr2)
Definition: lnet_main.c:941
static struct nlx_xo_interceptable_subs saved_xo_subs
Definition: lnet_ut.c:36
bool ntm_bev_auto_deliver
Definition: net.h:891
struct m0_net_buffer_callbacks buf_cb2
Definition: lnet_ut.c:303
void(* ut_test_fw_body_t)(struct ut_data *td)
Definition: lnet_ut.c:325
struct m0_tl ntm_end_points
Definition: net.h:856
struct m0_fop_cob * body
Definition: dir.c:1436
#define m0_tl_endfor
Definition: tlist.h:700
static int test_lnet_init(void)
Definition: lnet_ut.c:572
int m0_bufvec_alloc_aligned(struct m0_bufvec *bufvec, uint32_t num_segs, m0_bcount_t seg_size, unsigned shift)
Definition: vec.c:355
static void test_bulk_body(struct ut_data *td)
Definition: lnet_ut.c:1728
static void ut_restore_subs(void)
Definition: lnet_ut.c:52
Definition: sock.c:754
static m0_bindex_t cb_offset1
Definition: lnet_ut.c:179
uint64_t nlx_core_opaque_ptr_t
struct m0_chan ntm_chan
Definition: net.h:874
static void ut_net_buffer_sign(struct m0_net_buffer *nb, m0_bcount_t len, unsigned char seed)
Definition: lnet_ut.c:68
static void ktest_buf_reg(void)
Definition: klnet_ut.c:278
M0_INTERNAL bool m0_bufvec_cursor_move(struct m0_bufvec_cursor *cur, m0_bcount_t count)
Definition: vec.c:574
struct nlx_core_buf_desc cbd1
Definition: lnet_ut.c:310
int i
Definition: dir.c:1033
struct m0_net_domain dom2
Definition: lnet_ut.c:300
struct nlx_core_ep_addr ctm_addr
int32_t nbe_status
Definition: net.h:1218
enum m0_net_tm_state nte_next_state
Definition: net.h:723
static void ut_save_subs(void)
Definition: lnet_ut.c:41
static bool cb_save_ep1
Definition: lnet_ut.c:180
void(* ut_test_fw_prestart_cb_t)(struct ut_data *td, int which)
Definition: lnet_ut.c:326
void * ntm_xprt_private
Definition: net.h:886
M0_INTERNAL int m0_net_tm_init(struct m0_net_transfer_mc *tm, struct m0_net_domain *dom)
Definition: tm.c:160
M0_INTERNAL void m0_net_lnet_tm_set_debug(struct m0_net_transfer_mc *tm, unsigned dbg)
Definition: lnet_main.c:992
static void test_sync_msg_send_cb2(const struct m0_net_buffer_event *ev)
Definition: lnet_ut.c:1812
static int nlx_xo_core_bev_to_net_bev(struct m0_net_transfer_mc *tm, struct nlx_core_buffer_event *lcbev, struct m0_net_buffer_event *nbev)
Definition: lnet_tm.c:240
enum m0_net_queue_type nb_qtype
Definition: net.h:1363
static int ut_subs_saved
Definition: lnet_ut.c:35
M0_INTERNAL m0_bcount_t m0_bufvec_cursor_step(const struct m0_bufvec_cursor *cur)
Definition: vec.c:581
uint32_t nb_max_receive_msgs
Definition: net.h:1502
#define m0_list_for_each_entry(head, pos, type, member)
Definition: list.h:235
m0_bcount_t buf_seg_size2
Definition: lnet_ut.c:306
#define M0_ASSERT(cond)
m0_time_t nb_timeout
Definition: net.h:1387
M0_INTERNAL void m0_net_tm_fini(struct m0_net_transfer_mc *tm)
Definition: tm.c:204
const struct m0_net_xprt * nd_xprt
Definition: net.h:396
static void ut_cbreset2(void)
Definition: lnet_ut.c:240
M0_INTERNAL int m0_net_lnet_ifaces_get(struct m0_net_domain *dom, char ***addrs)
Definition: lnet_main.c:955
void(* ntc_event_cb)(const struct m0_net_tm_event *ev)
Definition: net.h:752
m0_time_t m0_time_now(void)
Definition: time.c:134
struct m0_atomic64 ref_cnt
Definition: refs.h:38
static int counter
Definition: mutex.c:32
static void test_fail(void)
Definition: lnet_ut.c:610
m0_net_tm_state
Definition: net.h:630
nlx_core_opaque_ptr_t cbe_buffer_id
struct m0_chan * xtm_ev_chan
Definition: lnet_xo.h:101
struct nlx_core_domain xd_core
Definition: lnet_xo.h:77
#define TEST_MATCH_BIT_ENCODE(_t, _c)
static struct m0_stob_domain * dom
Definition: storage.c:38
M0_INTERNAL void m0_bufvec_cursor_init(struct m0_bufvec_cursor *cur, const struct m0_bufvec *bvec)
Definition: vec.c:563
static void test_timeout_msg_recv_cb1(const struct m0_net_buffer_event *ev)
Definition: lnet_ut.c:1974
m0_net_tm_ev_type
Definition: net.h:641
#define zvUT(x, expRC)
Definition: lnet_ut.c:258
struct m0_clink tmwait2
Definition: lnet_ut.c:302
struct m0_list_link ntm_dom_linkage
Definition: net.h:883
size_t buf_size2
Definition: lnet_ut.c:305
static void ut_buf_cb2(const struct m0_net_buffer_event *ev)
Definition: lnet_ut.c:223
M0_INTERNAL size_t m0_tlist_length(const struct m0_tl_descr *d, const struct m0_tl *list)
Definition: tlist.c:117
#define M0_LNET_DEV
Definition: lnet_ioctl.h:164
M0_INTERNAL bool m0_list_contains(const struct m0_list *list, const struct m0_list_link *link)
Definition: list.c:87
struct nlx_core_buffer xb_core
Definition: lnet_xo.h:117
static bool ut_net_buffer_authenticate(struct m0_net_buffer *nb, m0_bcount_t len, m0_bcount_t offset, unsigned char seed)
Definition: lnet_ut.c:94
M0_INTERNAL void m0_net_lnet_ifaces_put(struct m0_net_domain *dom, char ***addrs)
Definition: lnet_main.c:966
static struct m0_net_buffer * cb_nb2
Definition: lnet_ut.c:215
static void ntc_event_callback(const struct m0_net_tm_event *ev)
Definition: lnet_ut.c:605
M0_INTERNAL void m0_net_desc_free(struct m0_net_buf_desc *desc)
Definition: net.c:87
M0_INTERNAL void m0_bitmap_set(struct m0_bitmap *map, size_t idx, bool val)
Definition: bitmap.c:139
uint32_t v_nr
Definition: vec.h:51
M0_INTERNAL void m0_net_end_point_get(struct m0_net_end_point *ep)
Definition: ep.c:88
static void test_timeout_body(struct ut_data *td)
Definition: lnet_ut.c:2002
m0_net_buffer_cb_proc_t nbc_cb[M0_NET_QT_NR]
Definition: net.h:1272
static m0_bindex_t offset
Definition: dump.c:173
M0_INTERNAL int m0_net_desc_copy(const struct m0_net_buf_desc *from_desc, struct m0_net_buf_desc *to_desc)
Definition: net.c:74
static struct nlx_xo_interceptable_subs nlx_xo_iv
Definition: lnet_xo.c:83
static void ktest_bulk(void)
Definition: klnet_ut.c:1989
#define TM2
Definition: lnet_ut.c:321
static int nlx_tm_timeout_buffers(struct m0_net_transfer_mc *tm, m0_time_t now)
struct m0_tl ntm_q[M0_NET_QT_NR]
Definition: net.h:877
M0_INTERNAL int m0_net_tm_stop(struct m0_net_transfer_mc *tm, bool abort)
Definition: tm.c:293
M0_INTERNAL void m0_bufvec_free_aligned(struct m0_bufvec *bufvec, unsigned shift)
Definition: vec.c:436
static void ut_cbreset(void)
Definition: lnet_ut.c:252
struct m0_clink tmwait1
Definition: lnet_ut.c:295
M0_INTERNAL void m0_net_buffer_event_deliver_all(struct m0_net_transfer_mc *tm)
Definition: tm.c:397
struct m0_ref nep_ref
Definition: net.h:491
static struct m0_net_end_point * cb_ep1
Definition: lnet_ut.c:181
static bool nlx_core_ep_eq(const struct nlx_core_ep_addr *cep1, const struct nlx_core_ep_addr *cep2)
Definition: lnet_core.h:537
static uint32_t timeout
Definition: console.c:52
static int64_t m0_atomic64_get(const struct m0_atomic64 *a)
void m0_clink_add_lock(struct m0_chan *chan, struct m0_clink *link)
Definition: chan.c:255
int nlx_core_ep_addr_decode(struct nlx_core_domain *lcdom, const char *ep_addr, struct nlx_core_ep_addr *cepa)
Definition: lnet_core.c:386
uint64_t cb_match_bits
void m0_net_end_point_put(struct m0_net_end_point *ep)
Definition: ep.c:98
M0_INTERNAL void m0_net_buffer_event_notify(struct m0_net_transfer_mc *tm, struct m0_chan *chan)
Definition: tm.c:423
const char * ts_name
Definition: ut.h:99
M0_INTERNAL void nlx_core_buf_desc_encode(struct nlx_core_transfer_mc *lctm, struct nlx_core_buffer *lcbuf, struct nlx_core_buf_desc *cbd)
Definition: lnet_core.c:292
static void ut_test_framework_dom_cleanup(struct ut_data *td, struct m0_net_domain *dom)
Definition: lnet_ut.c:328
struct m0_net_domain dom1
Definition: lnet_ut.c:293
#define RESET_RECV_COUNTERS()
M0_INTERNAL void m0_net_buffer_deregister(struct m0_net_buffer *buf, struct m0_net_domain *dom)
Definition: buf.c:107
static void ecb_reset(void)
Definition: lnet_ut.c:138
#define TEARDOWN_DOM(which)
int m0_net_domain_init(struct m0_net_domain *dom, const struct m0_net_xprt *xprt)
Definition: domain.c:36
char * ep
Definition: sw.h:132
M0_INTERNAL bool m0_net_buffer_event_pending(struct m0_net_transfer_mc *tm)
Definition: tm.c:409
static int test_bulk_passive_recv(struct ut_data *td)
Definition: lnet_ut.c:1625
M0_INTERNAL int m0_net_buffer_event_deliver_synchronously(struct m0_net_transfer_mc *tm)
Definition: tm.c:377
Definition: queue.c:27
struct m0_net_end_point * ntm_ep
Definition: net.h:868
static struct m0_net_end_point * cb_ep2
Definition: lnet_ut.c:220
#define zUT(x)
Definition: lnet_ut.c:266
m0_net_queue_type
Definition: net.h:591
static void test_sync_body(struct ut_data *td)
Definition: lnet_ut.c:1832
static void ktest_dev(void)
Definition: klnet_ut.c:2154
#define M0_ALLOC_PTR(ptr)
Definition: memory.h:86
M0_INTERNAL bool m0_chan_timedwait(struct m0_clink *link, const m0_time_t abs_timeout)
Definition: chan.c:349
M0_INTERNAL int m0_net_buffer_add(struct m0_net_buffer *buf, struct m0_net_transfer_mc *tm)
Definition: buf.c:247
#define NLXDBGP(ptr, dbg, fmt,...)
Definition: lnet_main.c:879
m0_time_t m0_time_from_now(uint64_t secs, long ns)
Definition: time.c:96
struct m0_net_transfer_mc tm1
Definition: lnet_ut.c:294
M0_INTERNAL struct m0_thread * m0_thread_self(void)
Definition: thread.c:122
m0_bcount_t cb_length
static void test_sync_prestart(struct ut_data *td, int which)
Definition: lnet_ut.c:1959
size_t buf_size1
Definition: lnet_ut.c:298
#define DOM1
Definition: lnet_ut.c:318
#define VALIDATE_MATCH_BITS(mb, s_lctm)
M0_INTERNAL int m0_net_tm_confine(struct m0_net_transfer_mc *tm, const struct m0_bitmap *processors)
Definition: tm.c:356
struct nlx_core_buf_desc cbd2
Definition: lnet_ut.c:311
M0_INTERNAL void m0_clink_fini(struct m0_clink *link)
Definition: chan.c:208
static void ut_buf_cb1(const struct m0_net_buffer_event *ev)
Definition: lnet_ut.c:185
static void test_tm_startstop(void)
Definition: lnet_ut.c:732
static void ut_cbreset1(void)
Definition: lnet_ut.c:202
enum m0_net_queue_type cb_qtype
static void test_sync(void)
Definition: lnet_ut.c:1967
static m0_time_t test_timeout_tm_get_buffer_timeout_tick(const struct m0_net_transfer_mc *tm)
Definition: lnet_ut.c:1983
struct m0_net_buf_desc nb_desc
Definition: net.h:1412
static unsigned done
Definition: storage.c:91
static int nlx_xo__nbd_allocate(struct m0_net_transfer_mc *tm, const struct nlx_core_buf_desc *cbd, struct m0_net_buf_desc *nbd)
Definition: lnet_xo.c:250
enum m0_net_tm_ev_type nte_type
Definition: net.h:691
static struct bulkio_params * bp
Definition: bulkio_ut.c:44
static enum m0_net_tm_state ecb_tms
Definition: lnet_ut.c:135
int _debug_
Definition: lnet_ut.c:291
static int32_t cb_status2
Definition: lnet_ut.c:216
static bool ut_chan_timedwait(struct m0_clink *link, uint32_t secs)
Definition: lnet_ut.c:61
static bool nlx_core_kmem_loc_invariant(const struct nlx_core_kmem_loc *loc)
Definition: lnet_pvt.h:81
static bool nlx_core_kmem_loc_is_empty(const struct nlx_core_kmem_loc *loc)
Definition: lnet_pvt.h:91
static void test_bulk(void)
Definition: lnet_ut.c:1798
static int ut_verbose
Definition: lnet_ut.c:33
static struct m0_atomic64 test_timeout_ttb_called
Definition: lnet_ut.c:1989
#define m0_tl_for(name, head, obj)
Definition: tlist.h:695
void m0_free(void *data)
Definition: memory.c:146
static unsigned nlx_ucore_nidstrs_thunk
Definition: ulnet_core.c:570
static void test_msg_body(struct ut_data *td)
Definition: lnet_ut.c:1068
static void test_buf_desc(void)
Definition: lnet_ut.c:1514
struct m0_net_buffer bufs2[UT_BUFS2]
Definition: lnet_ut.c:304
static struct m0_thread * test_sync_ut_handle
Definition: lnet_ut.c:1809
#define SETUP_DOM(which)
static int test_timeout_tm_timeout_buffers(struct m0_net_transfer_mc *tm, m0_time_t now)
Definition: lnet_ut.c:1991
static unsigned cb_called1
Definition: lnet_ut.c:182
static void ktest_enc_dec(void)
Definition: klnet_ut.c:444
void * nb_xprt_private
Definition: net.h:1461
static enum m0_net_queue_type cb_qt2
Definition: lnet_ut.c:214
int32_t rc
Definition: trigger_fop.h:47
M0_INTERNAL int m0_net_end_point_create(struct m0_net_end_point **epp, struct m0_net_transfer_mc *tm, const char *addr)
Definition: ep.c:56
M0_INTERNAL int nlx_core_buf_desc_decode(struct nlx_core_transfer_mc *lctm, struct nlx_core_buffer *lcbuf, struct nlx_core_buf_desc *cbd)
Definition: lnet_core.c:330
struct m0_net_buffer_callbacks buf_cb1
Definition: lnet_ut.c:296
#define ARRAY_SIZE(a)
Definition: misc.h:45
const struct m0_net_tm_callbacks * ntm_callbacks
Definition: net.h:816
static enum m0_net_queue_type cb_qt1
Definition: lnet_ut.c:175
#define CBD1
Definition: lnet_ut.c:322
#define M0_BASSERT(cond)
#define M0_UT_ASSERT(a)
Definition: ut.h:46
static struct m0_net_buffer * cb_nb1
Definition: lnet_ut.c:176
struct m0_list nd_tms
Definition: net.h:390
int32_t nte_status
Definition: net.h:715
static uint64_t max64u(uint64_t a, uint64_t b)
Definition: arith.h:71
static void ktest_msg(void)
Definition: klnet_ut.c:1184
static void m0_atomic64_set(struct m0_atomic64 *a, int64_t num)
m0_bcount_t buf_seg_size1
Definition: lnet_ut.c:299
struct m0_net_end_point * nb_ep
Definition: net.h:1424
m0_time_t(* _nlx_tm_get_buffer_timeout_tick)(const struct m0_net_transfer_mc *tm)
Definition: lnet_xo.c:78
struct nlx_core_transfer_mc xtm_core
Definition: lnet_xo.h:104
#define M0_IMPOSSIBLE(fmt,...)
#define NLXDBGPnl(ptr, dbg, fmt,...)
Definition: lnet_main.c:880
static void test_timeout(void)
Definition: lnet_ut.c:2215
static void ut_tm_ecb(const struct m0_net_tm_event *ev)
Definition: lnet_ut.c:146