Motr  M0
lnet_xo.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2012-2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 
28 static bool nlx_dom_invariant(const struct m0_net_domain *dom)
29 {
30  const struct nlx_xo_domain *dp = dom->nd_xprt_private;
31  return _0C(dp != NULL) && _0C(dp->xd_dom == dom) &&
32  _0C(dom->nd_xprt == &m0_net_lnet_xprt);
33 }
34 
35 static bool nlx_ep_invariant(const struct m0_net_end_point *ep)
36 {
37  const struct nlx_xo_ep *xep;
38  if (ep == NULL)
39  return false;
40  xep = container_of(ep, struct nlx_xo_ep, xe_ep);
41  return _0C(xep->xe_magic == M0_NET_LNET_XE_MAGIC) &&
42  _0C(xep->xe_ep.nep_addr == &xep->xe_addr[0]);
43 }
44 
45 static bool nlx_buffer_invariant(const struct m0_net_buffer *nb)
46 {
47  const struct nlx_xo_buffer *bp = nb->nb_xprt_private;
48 
49  return _0C(bp != NULL) && _0C(bp->xb_nb == nb) && _0C(nlx_dom_invariant(nb->nb_dom)) &&
50  _0C(bp->xb_core.cb_buffer_id == (nlx_core_opaque_ptr_t) nb) &&
51  _0C(ergo(nb->nb_flags & M0_NET_BUF_QUEUED,
52  nb->nb_tm != NULL && nlx_tm_invariant(nb->nb_tm))) &&
54 }
55 
56 static bool nlx_tm_invariant(const struct m0_net_transfer_mc *tm)
57 {
58  const struct nlx_xo_transfer_mc *tp = tm->ntm_xprt_private;
59  return tp != NULL && tp->xtm_tm == tm && nlx_dom_invariant(tm->ntm_dom);
60 }
61 
73  struct nlx_core_transfer_mc *lctm,
76  struct m0_net_transfer_mc *tm,
77  const struct nlx_core_ep_addr *cepa);
79  m0_net_transfer_mc *tm);
81  m0_time_t now);
82 };
84 #define _NLXIS(s) ._##s = s
85 
90 
91 #undef _NLXI
92 };
93 
94 #define NLX_core_buf_event_wait(lcdom, lctm, timeout) \
95  (*nlx_xo_iv._nlx_core_buf_event_wait)(lcdom, lctm, timeout)
96 #define NLX_ep_create(epp, tm, cepa) \
97  (*nlx_xo_iv._nlx_ep_create)(epp, tm, cepa)
98 #define NLX_tm_get_buffer_timeout_tick(tm) \
99  (*nlx_xo_iv._nlx_tm_get_buffer_timeout_tick)(tm)
100 #define NLX_tm_timeout_buffers(tm, now) \
101  (*nlx_xo_iv._nlx_tm_timeout_buffers)(tm, now)
102 
103 static int nlx_xo_dom_init(const struct m0_net_xprt *xprt,
104  struct m0_net_domain *dom)
105 {
106  struct nlx_xo_domain *dp;
107  int rc;
108 
109  M0_ENTRY();
110 
111  M0_PRE(dom->nd_xprt_private == NULL);
114  if (dp == NULL)
115  return M0_RC(-ENOMEM);
116  dom->nd_xprt_private = dp;
117  dp->xd_dom = dom;
118 
119  rc = nlx_core_dom_init(dom, &dp->xd_core);
120  if (rc != 0) {
122  dom->nd_xprt_private = NULL;
123  } else
125 
127  return M0_RC(rc);
128 }
129 
130 static void nlx_xo_dom_fini(struct m0_net_domain *dom)
131 {
132  struct nlx_xo_domain *dp = dom->nd_xprt_private;
133 
137  dom->nd_xprt_private = NULL;
138 }
139 
141 {
142  struct nlx_xo_domain *dp = dom->nd_xprt_private;
143 
146 }
147 
150 {
151  struct nlx_xo_domain *dp = dom->nd_xprt_private;
152 
155 }
156 
157 static int32_t nlx_xo_get_max_buffer_segments(const struct m0_net_domain *dom)
158 {
159  struct nlx_xo_domain *dp = dom->nd_xprt_private;
160 
163 }
164 
166  struct m0_net_transfer_mc *tm,
167  const char *addr)
168 {
169  struct nlx_xo_domain *dp;
170  struct nlx_core_ep_addr cepa;
171  int rc;
172 
174  dp = tm->ntm_dom->nd_xprt_private;
175  rc = nlx_core_ep_addr_decode(&dp->xd_core, addr, &cepa);
176  if (rc == 0 && cepa.cepa_tmid == M0_NET_LNET_TMID_INVALID)
177  rc = M0_ERR(-EINVAL);
178  if (rc != 0) {
179  return M0_RC(rc);
180  }
181 
182  return nlx_ep_create(epp, tm, &cepa);
183 }
184 
185 static bool nlx_xo_buffer_bufvec_invariant(const struct m0_net_buffer *nb)
186 {
187  const struct m0_vec *v = &nb->nb_buffer.ov_vec;
188  const struct m0_bufvec *bv = &nb->nb_buffer;
189  m0_bcount_t max_seg_size;
190  int i;
191 
194  return false;
195  max_seg_size = nlx_xo_get_max_buffer_segment_size(nb->nb_dom);
196  for (i = 0; i < v->v_nr; ++i)
197  if (v->v_count[i] == 0 || v->v_count[i] > max_seg_size ||
198  bv->ov_buf[i] == NULL)
199  return false;
200  return true;
201 }
202 
203 static int nlx_xo_buf_register(struct m0_net_buffer *nb)
204 {
205  struct nlx_xo_domain *dp;
206  struct nlx_xo_buffer *bp;
207  int rc;
208 
209  M0_PRE(nb->nb_dom != NULL && nlx_dom_invariant(nb->nb_dom));
210  M0_PRE(nb->nb_xprt_private == NULL);
212 
213  dp = nb->nb_dom->nd_xprt_private;
215  if (bp == NULL)
216  return M0_ERR(-ENOMEM);
217  nb->nb_xprt_private = bp;
218  bp->xb_nb = nb;
219 
221  &nb->nb_buffer, &bp->xb_core);
222  if (rc != 0) {
224  nb->nb_xprt_private = NULL;
225  }
227  return M0_RC(rc);
228 }
229 
230 static void nlx_xo_buf_deregister(struct m0_net_buffer *nb)
231 {
232  struct nlx_xo_domain *dp;
233  struct nlx_xo_buffer *bp = nb->nb_xprt_private;
234 
235  M0_PRE(nb->nb_dom != NULL && nlx_dom_invariant(nb->nb_dom));
237  dp = nb->nb_dom->nd_xprt_private;
238 
239  nlx_core_buf_deregister(&dp->xd_core, &bp->xb_core);
241  nb->nb_xprt_private = NULL;
242  return;
243 }
244 
251  const struct nlx_core_buf_desc *cbd,
252  struct m0_net_buf_desc *nbd)
253 {
254  nbd->nbd_len = sizeof *cbd;
255  NLX_ALLOC(nbd->nbd_data, nbd->nbd_len);
256  if (nbd->nbd_data == NULL) {
257  nbd->nbd_len = 0; /* for m0_net_desc_free() safety */
258  return M0_ERR(-ENOMEM);
259  }
260  memcpy(nbd->nbd_data, cbd, nbd->nbd_len);
261 
262  return 0;
263 }
264 
269  const struct m0_net_buf_desc *nbd,
270  struct nlx_core_buf_desc *cbd)
271 {
272  if (nbd->nbd_len != sizeof *cbd) {
273  return M0_RC(M0_ERR(-EINVAL));
274  }
275  memcpy(cbd, nbd->nbd_data, nbd->nbd_len);
276 
277  return 0;
278 }
279 
280 static int nlx_xo_buf_add(struct m0_net_buffer *nb)
281 {
282  struct nlx_xo_domain *dp;
283  struct nlx_xo_transfer_mc *tp;
284  struct nlx_xo_buffer *bp = nb->nb_xprt_private;
285  struct nlx_core_domain *cd;
286  struct nlx_core_transfer_mc *ctp;
287  struct nlx_core_buffer *cbp;
288  struct nlx_core_buf_desc cbd;
289  m0_bcount_t bufsize;
290  size_t need;
291  int rc;
292 
293  M0_PRE_EX(nlx_buffer_invariant(nb) && nb->nb_tm != NULL);
294  M0_PRE(m0_mutex_is_locked(&nb->nb_tm->ntm_mutex));
295  M0_PRE(nb->nb_offset == 0); /* do not support an offset during add */
296  M0_PRE((nb->nb_flags & M0_NET_BUF_RETAIN) == 0);
297  dp = nb->nb_dom->nd_xprt_private;
298  tp = nb->nb_tm->ntm_xprt_private;
299  cd = &dp->xd_core;
300  ctp = &tp->xtm_core;
301  cbp = &bp->xb_core;
302 
303  NLXDBGP(tp, 1, "%p: nlx_xo_buf_add(%p, %d)\n", tp, nb, nb->nb_qtype);
304 
305  /* Provision the required number of internal buffer event structures for
306  the maximum expected completion notifications.
307  Release is done in nlx_xo_bev_deliver_all().
308  */
309  need = nb->nb_qtype == M0_NET_QT_MSG_RECV ? nb->nb_max_receive_msgs : 1;
310  rc = nlx_core_bevq_provision(cd, ctp, need);
311  if (rc != 0)
312  return M0_RC(rc);
313  cbp->cb_max_operations = need;
314 
315  bufsize = m0_vec_count(&nb->nb_buffer.ov_vec);
316  cbp->cb_length = bufsize; /* default for receive cases */
317 
318  cbp->cb_qtype = nb->nb_qtype;
319  switch (nb->nb_qtype) {
320  case M0_NET_QT_MSG_RECV:
322  rc = nlx_core_buf_msg_recv(&dp->xd_core, ctp, cbp);
323  break;
324 
325  case M0_NET_QT_MSG_SEND:
326  M0_ASSERT(nb->nb_length <= bufsize);
327  cbp->cb_length = nb->nb_length;
328  cbp->cb_addr = *nlx_ep_to_core(nb->nb_ep); /* dest addr */
329  rc = nlx_core_buf_msg_send(&dp->xd_core, ctp, cbp);
330  break;
331 
333  nlx_core_buf_desc_encode(ctp, cbp, &cbd);
334  rc = nlx_xo__nbd_allocate(nb->nb_tm, &cbd, &nb->nb_desc);
335  if (rc == 0)
336  rc = nlx_core_buf_passive_recv(&dp->xd_core, ctp, cbp);
337  if (rc != 0)
339  break;
340 
342  M0_ASSERT(nb->nb_length <= bufsize);
343  cbp->cb_length = nb->nb_length;
344  nlx_core_buf_desc_encode(ctp, cbp, &cbd);
345  rc = nlx_xo__nbd_allocate(nb->nb_tm, &cbd, &nb->nb_desc);
346  if (rc == 0)
347  rc = nlx_core_buf_passive_send(&dp->xd_core, ctp, cbp);
348  if (rc != 0)
350  break;
351 
353  rc = nlx_xo__nbd_recover(nb->nb_tm, &nb->nb_desc, &cbd);
354  if (rc == 0)
355  rc = nlx_core_buf_desc_decode(ctp, cbp, &cbd);
356  if (rc == 0) /* remote addr and size decoded */
357  rc = nlx_core_buf_active_recv(&dp->xd_core, ctp, cbp);
358  break;
359 
361  M0_ASSERT(nb->nb_length <= bufsize);
362  cbp->cb_length = nb->nb_length;
363  rc = nlx_xo__nbd_recover(nb->nb_tm, &nb->nb_desc, &cbd);
364  if (rc == 0) /* remote addr and size decoded */
365  rc = nlx_core_buf_desc_decode(ctp, cbp, &cbd);
366  if (rc == 0)
367  rc = nlx_core_buf_active_send(&dp->xd_core, ctp, cbp);
368  break;
369 
370  default:
371  M0_IMPOSSIBLE("invalid queue type");
372  break;
373  }
374  if (rc != 0)
375  nlx_core_bevq_release(ctp, need);
376  return M0_RC(rc);
377 }
378 
379 static void nlx_xo_buf_del(struct m0_net_buffer *nb)
380 {
381  struct nlx_xo_domain *dp;
382  struct nlx_xo_transfer_mc *tp;
383  struct nlx_xo_buffer *bp = nb->nb_xprt_private;
384 
385  M0_PRE_EX(nlx_buffer_invariant(nb) && nb->nb_tm != NULL);
386  dp = nb->nb_dom->nd_xprt_private;
387  tp = nb->nb_tm->ntm_xprt_private;
388  NLXDBGP(tp, 1, "%p: nlx_xo_buf_del(%p, %lX)\n", tp, nb,
389  (unsigned long) nb->nb_flags);
390  nlx_core_buf_del(&dp->xd_core, &tp->xtm_core, &bp->xb_core);
391 }
392 
393 static int nlx_xo_tm_init(struct m0_net_transfer_mc *tm)
394 {
395  struct nlx_xo_transfer_mc *tp;
396 
398  M0_PRE(tm->ntm_xprt_private == NULL);
399 
401  if (tp == NULL)
402  return M0_ERR(-ENOMEM);
403  tm->ntm_xprt_private = tp;
404 
405  /* defer init of processors, thread and xtm_core to TM confine/start */
406  m0_cond_init(&tp->xtm_ev_cond, &tm->ntm_mutex);
407  tp->xtm_tm = tm;
408 
410  return 0;
411 }
412 
413 static void nlx_xo_tm_fini(struct m0_net_transfer_mc *tm)
414 {
415  struct nlx_xo_transfer_mc *tp = tm->ntm_xprt_private;
416 
419  M0_PRE(tm->ntm_callback_counter == 0);
420 
421  if (tp->xtm_ev_thread.t_state != TS_PARKED)
423 
424  if (tp->xtm_processors.b_words != NULL)
427  tm->ntm_xprt_private = NULL;
429 }
430 
431 static int nlx_xo_tm_start(struct m0_net_transfer_mc *tm, const char *addr)
432 {
433  struct nlx_xo_domain *dp;
434  struct nlx_xo_transfer_mc *tp;
435  int rc;
436 
438  M0_PRE(m0_mutex_is_locked(&tm->ntm_mutex));
439 
440  dp = tm->ntm_dom->nd_xprt_private;
441  tp = tm->ntm_xprt_private;
442 
444  &tp->xtm_core.ctm_addr) ?:
446  NULL, &nlx_tm_ev_worker, tm, "m0_nlx_tm");
447  return M0_RC(rc);
448 }
449 
450 static int nlx_xo_tm_stop(struct m0_net_transfer_mc *tm, bool cancel)
451 {
452  struct nlx_xo_transfer_mc *tp = tm->ntm_xprt_private;
453 
455  M0_PRE(m0_mutex_is_locked(&tm->ntm_mutex));
456 
457  if (cancel)
458  m0_net__tm_cancel(tm);
460  return 0;
461 }
462 
463 static int nlx_xo_tm_confine(struct m0_net_transfer_mc *tm,
464  const struct m0_bitmap *processors)
465 {
466  struct nlx_xo_transfer_mc *tp = tm->ntm_xprt_private;
467  int rc;
468 
470  M0_PRE(m0_mutex_is_locked(&tm->ntm_mutex));
471  M0_PRE(processors != NULL);
472  if (tp->xtm_processors.b_words != NULL)
474  rc = m0_bitmap_init(&tp->xtm_processors, processors->b_nr);
475  if (rc == 0)
476  m0_bitmap_copy(&tp->xtm_processors, processors);
477  return M0_RC(rc);
478 }
479 
481 {
482  struct nlx_xo_transfer_mc *tp = tm->ntm_xprt_private;
483  struct nlx_core_buffer_event cbev;
484  int num_events = 0;
485 
487  M0_PRE(m0_mutex_is_locked(&tm->ntm_mutex));
490 
491  while (nlx_core_buf_event_get(&tp->xtm_core, &cbev)) {
492  struct m0_net_buffer_event nbev;
493  int rc;
494 
495  rc = nlx_xo_core_bev_to_net_bev(tm, &cbev, &nbev);
496  if (rc != 0) {
497  /* Failure can only happen for receive message events
498  when end point creation fails due to lack
499  of memory.
500  We can ignore the event unless LNet also indicates
501  that it has unlinked the buffer; in the latter case
502  we will deliver a failure buffer operation to the
503  application.
504  We will increment the failure counters for the
505  cases where we eat the event. Note that LNet still
506  knows about the buffer.
507  */
508  M0_ASSERT(nbev.nbe_buffer->nb_qtype ==
510  M0_ASSERT(rc == -ENOMEM);
511  if (!cbev.cbe_unlinked) {
512  struct m0_net_qstats *q;
513  q = &tm->ntm_qstats[nbev.nbe_buffer->nb_qtype];
514  q->nqs_num_f_events++;
515  NLXDBGP(tp, 1, "%p: skipping event\n", tp);
516  continue;
517  }
518  NLXDBGP(tp, 1, "%p: event conversion failed\n", tp);
519  }
521 
522  /* Release provisioned internal buffer event structures. Done
523  on unlink only rather than piece-meal with each
524  nlx_core_buf_event_get() so as to not lose count due to
525  premature termination on failure and cancellation.
526  */
527  if (cbev.cbe_unlinked) {
528  struct nlx_xo_buffer *bp;
529  struct nlx_core_buffer *cbp;
530  size_t need;
531  bp = nbev.nbe_buffer->nb_xprt_private;
532  cbp = &bp->xb_core;
533  need = cbp->cb_max_operations;
534  NLXDBGP(tp, 3, "%p: reducing need by %d\n",
535  tp, (int) need);
536  nlx_core_bevq_release(&tp->xtm_core, need);
537  }
538  NLXDBGP(tp, 1, "%p: event buf:%p qt:%d status:%d flags:%lx\n",
539  tp, nbev.nbe_buffer, (int) nbev.nbe_buffer->nb_qtype,
540  (int) nbev.nbe_status,
541  (unsigned long) nbev.nbe_buffer->nb_flags);
542 
543  /* Deliver the event out of the mutex.
544  Suppress signalling on the TM channel by incrementing
545  the callback counter.
546  */
547  tm->ntm_callback_counter++;
548  m0_mutex_unlock(&tm->ntm_mutex);
549 
550  num_events++;
552 
553  /* re-enter the mutex */
554  m0_mutex_lock(&tm->ntm_mutex);
555  tm->ntm_callback_counter--;
556 
559  }
560  NLXDBGP(tp,2,"%p: delivered %d events\n", tp, num_events);
561 
562  /* if we ever left the mutex, wake up waiters on the TM channel */
563  if (num_events > 0 && tm->ntm_callback_counter == 0)
565 }
566 
568 {
570  return 0;
571 }
572 
573 static bool nlx_xo_bev_pending(struct m0_net_transfer_mc *tm)
574 {
575  struct nlx_xo_domain *dp;
576  struct nlx_xo_transfer_mc *tp;
577 
579  tp = tm->ntm_xprt_private;
581  dp = tm->ntm_dom->nd_xprt_private;
582  return nlx_core_buf_event_wait(&dp->xd_core, &tp->xtm_core, 0) == 0;
583 }
584 
585 static void nlx_xo_bev_notify(struct m0_net_transfer_mc *tm,
586  struct m0_chan *chan)
587 {
588  struct nlx_xo_transfer_mc *tp;
589 
591  M0_PRE(m0_mutex_is_locked(&tm->ntm_mutex));
592  tp = tm->ntm_xprt_private;
593 
594  /* set the notification channel and awaken nlx_tm_ev_worker() */
595  tp->xtm_ev_chan = chan;
597 
598  return;
599 }
600 
602  *dom)
603 {
605 
606  return sizeof(struct nlx_core_buf_desc);
607 }
608 
610 {
611  M0_PRE(ndom != NULL);
612 
614  M0_SEG_SIZE);
615 }
616 
617 static uint32_t nlx_xo_rpc_max_segs_nr(struct m0_net_domain *ndom)
618 {
619  M0_PRE(ndom != NULL);
620 
621  return m0_net_domain_get_max_buffer_size(ndom) /
623 }
624 
626  m0_bcount_t rpc_size)
627 {
628  M0_PRE(ndom != NULL);
629 
630  return default_xo_rpc_max_msg_size(ndom, rpc_size);
631 }
632 
633 static uint32_t nlx_xo_rpc_max_recv_msgs(struct m0_net_domain *ndom,
634  m0_bcount_t rpc_size)
635 {
636  M0_PRE(ndom != NULL);
637 
638  return m0_net_domain_get_max_buffer_size(ndom) /
639  nlx_xo_rpc_max_msg_size(ndom, rpc_size);
640 }
641 
642 static const struct m0_net_xprt_ops nlx_xo_xprt_ops = {
644  .xo_dom_fini = nlx_xo_dom_fini,
645  .xo_get_max_buffer_size = nlx_xo_get_max_buffer_size,
646  .xo_get_max_buffer_segment_size = nlx_xo_get_max_buffer_segment_size,
647  .xo_get_max_buffer_segments = nlx_xo_get_max_buffer_segments,
648  .xo_end_point_create = nlx_xo_end_point_create,
649  .xo_buf_register = nlx_xo_buf_register,
650  .xo_buf_deregister = nlx_xo_buf_deregister,
651  .xo_buf_add = nlx_xo_buf_add,
652  .xo_buf_del = nlx_xo_buf_del,
653  .xo_tm_init = nlx_xo_tm_init,
654  .xo_tm_fini = nlx_xo_tm_fini,
655  .xo_tm_start = nlx_xo_tm_start,
656  .xo_tm_stop = nlx_xo_tm_stop,
657  .xo_tm_confine = nlx_xo_tm_confine,
658  .xo_bev_deliver_all = nlx_xo_bev_deliver_all,
659  .xo_bev_deliver_sync = nlx_xo_bev_deliver_sync,
660  .xo_bev_pending = nlx_xo_bev_pending,
661  .xo_bev_notify = nlx_xo_bev_notify,
662  .xo_get_max_buffer_desc_size = nlx_xo_get_max_buffer_desc_size,
663  .xo_rpc_max_seg_size = nlx_xo_rpc_max_seg_size,
664  .xo_rpc_max_segs_nr = nlx_xo_rpc_max_segs_nr,
665  .xo_rpc_max_msg_size = nlx_xo_rpc_max_msg_size,
666  .xo_rpc_max_recv_msgs = nlx_xo_rpc_max_recv_msgs,
667 };
668 
678 #if !defined(ENABLE_SOCK_MOCK_LNET) || defined(__KERNEL__)
680  .nx_name = "lnet",
681  .nx_ops = &nlx_xo_xprt_ops
682 };
683 M0_EXPORTED(m0_net_lnet_xprt);
684 #else
685 const struct m0_net_xprt m0_net_lnet_mocked_xprt = {
686  .nx_name = "lnet",
687  .nx_ops = &nlx_xo_xprt_ops
688 };
689 #endif
690 
695 /*
696  * Local variables:
697  * c-indentation-style: "K&R"
698  * c-basic-offset: 8
699  * tab-width: 8
700  * fill-column: 79
701  * scroll-step: 1
702  * End:
703  */
M0_INTERNAL m0_bcount_t m0_net_domain_get_max_buffer_segment_size(struct m0_net_domain *dom)
struct m0_net_transfer_mc * nb_tm
Definition: net.h:1357
#define M0_PRE(cond)
static bool nlx_dom_invariant(const struct m0_net_domain *dom)
Definition: lnet_xo.c:28
static m0_bcount_t nlx_xo_get_max_buffer_desc_size(const struct m0_net_domain *dom)
Definition: lnet_xo.c:601
M0_INTERNAL int m0_bitmap_init(struct m0_bitmap *map, size_t nr)
Definition: bitmap.c:86
M0_INTERNAL void m0_mutex_unlock(struct m0_mutex *mutex)
Definition: mutex.c:66
static m0_bcount_t nlx_xo_rpc_max_seg_size(struct m0_net_domain *ndom)
Definition: lnet_xo.c:609
M0_INTERNAL int nlx_core_buf_msg_recv(struct nlx_core_domain *cd, struct nlx_core_transfer_mc *ctm, struct nlx_core_buffer *cb)
Definition: klnet_core.c:1373
static bool nlx_xo_buffer_bufvec_invariant(const struct m0_net_buffer *nb)
Definition: lnet_xo.c:185
M0_INTERNAL int nlx_core_buf_del(struct nlx_core_domain *cd, struct nlx_core_transfer_mc *ctm, struct nlx_core_buffer *cb)
Definition: klnet_core.c:1631
struct m0_net_end_point xe_ep
Definition: lnet_xo.h:57
static struct m0_semaphore q
Definition: rwlock.c:55
#define NULL
Definition: misc.h:38
m0_bindex_t nb_offset
Definition: net.h:1344
M0_INTERNAL void m0_bitmap_fini(struct m0_bitmap *map)
Definition: bitmap.c:97
static int32_t nlx_xo_get_max_buffer_segments(const struct m0_net_domain *dom)
Definition: lnet_xo.c:157
M0_INTERNAL void nlx_core_dom_fini(struct nlx_core_domain *cd)
Definition: klnet_core.c:1207
struct m0_bufvec nb_buffer
Definition: net.h:1322
#define ergo(a, b)
Definition: misc.h:293
M0_INTERNAL void m0_net__tm_cancel(struct m0_net_transfer_mc *tm)
Definition: tm.c:145
int m0_thread_join(struct m0_thread *q)
Definition: kthread.c:169
uint32_t nbd_len
Definition: net_otw_types.h:37
M0_INTERNAL m0_bcount_t nlx_core_get_max_buffer_segment_size(struct nlx_core_domain *lcdom)
Definition: klnet_core.c:1226
struct m0_bitmap xtm_processors
Definition: lnet_xo.h:90
const struct m0_net_xprt m0_net_lnet_xprt
Definition: lnet_xo.c:679
struct nlx_core_ep_addr cb_addr
static void nlx_xo_dom_fini(struct m0_net_domain *dom)
Definition: lnet_xo.c:130
uint64_t m0_time_t
Definition: time.h:37
uint64_t xe_magic
Definition: lnet_xo.h:54
char xe_addr[M0_NET_LNET_XEP_ADDR_LEN]
Definition: lnet_xo.h:66
uint8_t * nbd_data
Definition: net_otw_types.h:38
struct m0_net_qstats ntm_qstats[M0_NET_QT_NR]
Definition: net.h:880
struct m0_vec ov_vec
Definition: vec.h:147
enum m0_net_tm_state ntm_state
Definition: net.h:819
m0_bcount_t nb_length
Definition: net.h:1334
uint64_t nb_flags
Definition: net.h:1489
struct m0_mutex nd_mutex
Definition: net.h:381
Definition: vec.h:49
struct m0_net_domain * ntm_dom
Definition: net.h:853
unsigned _debug_
Definition: lnet_xo.h:79
uint64_t m0_bcount_t
Definition: types.h:77
#define M0_THREAD_INIT(thread, TYPE, init, func, arg, namefmt,...)
Definition: thread.h:139
m0_bcount_t nb_min_receive_size
Definition: net.h:1496
const char * nep_addr
Definition: net.h:503
#define container_of(ptr, type, member)
Definition: misc.h:33
M0_INTERNAL void m0_mutex_lock(struct m0_mutex *mutex)
Definition: mutex.c:49
struct m0_net_buffer * nbe_buffer
Definition: net.h:1194
#define _NLXIS(s)
static int nlx_xo__nbd_recover(struct m0_net_transfer_mc *tm, const struct m0_net_buf_desc *nbd, struct nlx_core_buf_desc *cbd)
Definition: lnet_xo.c:268
int(* _nlx_tm_timeout_buffers)(struct m0_net_transfer_mc *tm, m0_time_t now)
Definition: lnet_xo.c:80
M0_INTERNAL void m0_net_buffer_event_post(const struct m0_net_buffer_event *ev)
Definition: buf.c:314
static void nlx_xo_buf_deregister(struct m0_net_buffer *nb)
Definition: lnet_xo.c:230
void ** ov_buf
Definition: vec.h:149
M0_INTERNAL void m0_cond_init(struct m0_cond *cond, struct m0_mutex *mutex)
Definition: cond.c:40
static void nlx_tm_ev_worker(struct m0_net_transfer_mc *tm)
static bool nlx_xo_bev_pending(struct m0_net_transfer_mc *tm)
Definition: lnet_xo.c:573
M0_INTERNAL int nlx_core_buf_active_recv(struct nlx_core_domain *cd, struct nlx_core_transfer_mc *ctm, struct nlx_core_buffer *cb)
Definition: klnet_core.c:1467
M0_INTERNAL void nlx_core_dom_set_debug(struct nlx_core_domain *lcdom, unsigned dbg)
Definition: lnet_core.c:447
return M0_RC(rc)
Definition: sock.c:754
M0_INTERNAL void nlx_core_bevq_release(struct nlx_core_transfer_mc *lctm, size_t release)
Definition: lnet_core.c:204
#define M0_ASSERT_EX(cond)
uint64_t nlx_core_opaque_ptr_t
struct m0_chan ntm_chan
Definition: net.h:874
#define M0_ENTRY(...)
Definition: trace.h:170
int i
Definition: dir.c:1033
struct nlx_core_ep_addr ctm_addr
int32_t nbe_status
Definition: net.h:1218
return M0_ERR(-EOPNOTSUPP)
void * ntm_xprt_private
Definition: net.h:886
static struct nlx_core_ep_addr * nlx_ep_to_core(struct m0_net_end_point *ep)
Definition: lnet_pvt.h:46
struct m0_net_domain * xd_dom
Definition: lnet_xo.h:74
static int nlx_xo_core_bev_to_net_bev(struct m0_net_transfer_mc *tm, struct nlx_core_buffer_event *lcbev, struct m0_net_buffer_event *nbev)
Definition: lnet_tm.c:240
enum m0_net_queue_type nb_qtype
Definition: net.h:1363
void * nd_xprt_private
Definition: net.h:393
M0_INTERNAL int nlx_core_bevq_provision(struct nlx_core_domain *lcdom, struct nlx_core_transfer_mc *lctm, size_t need)
Definition: lnet_core.c:175
uint32_t nb_max_receive_msgs
Definition: net.h:1502
#define M0_ASSERT(cond)
static m0_time_t nlx_tm_get_buffer_timeout_tick(const struct m0_net_transfer_mc *tm)
M0_INTERNAL bool m0_mutex_is_locked(const struct m0_mutex *mutex)
Definition: mutex.c:95
static m0_bcount_t nlx_xo_get_max_buffer_size(const struct m0_net_domain *dom)
Definition: lnet_xo.c:140
M0_INTERNAL void m0_cond_fini(struct m0_cond *cond)
Definition: cond.c:46
static bool nlx_buffer_invariant(const struct m0_net_buffer *nb)
Definition: lnet_xo.c:45
m0_bcount_t cb_min_receive_size
static int nlx_xo_tm_confine(struct m0_net_transfer_mc *tm, const struct m0_bitmap *processors)
Definition: lnet_xo.c:463
struct m0_chan * xtm_ev_chan
Definition: lnet_xo.h:101
struct nlx_core_domain xd_core
Definition: lnet_xo.h:77
static int nlx_xo_tm_init(struct m0_net_transfer_mc *tm)
Definition: lnet_xo.c:393
#define NLX_ALLOC(ptr, len)
Definition: lnet_core.h:637
static struct m0_stob_domain * dom
Definition: storage.c:38
M0_INTERNAL void m0_cond_signal(struct m0_cond *cond)
Definition: cond.c:94
M0_INTERNAL m0_bcount_t nlx_core_get_max_buffer_size(struct nlx_core_domain *lcdom)
Definition: klnet_core.c:1220
uint32_t ntm_callback_counter
Definition: net.h:850
static void nlx_xo_bev_deliver_all(struct m0_net_transfer_mc *tm)
Definition: lnet_xo.c:480
M0_INTERNAL int32_t nlx_core_get_max_buffer_segments(struct nlx_core_domain *lcdom)
Definition: klnet_core.c:1237
struct m0_net_domain * nb_dom
Definition: net.h:1351
static bool nlx_ep_invariant(const struct m0_net_end_point *ep)
Definition: lnet_xo.c:35
#define NLX_ALLOC_ALIGNED_PTR(ptr)
Definition: lnet_core.h:630
M0_INTERNAL int nlx_core_buf_active_send(struct nlx_core_domain *cd, struct nlx_core_transfer_mc *ctm, struct nlx_core_buffer *cb)
Definition: klnet_core.c:1517
#define M0_POST(cond)
Definition: xcode.h:73
M0_INTERNAL void m0_net_desc_free(struct m0_net_buf_desc *desc)
Definition: net.c:87
M0_INTERNAL void nlx_core_buf_deregister(struct nlx_core_domain *cd, struct nlx_core_buffer *cb)
Definition: klnet_core.c:1330
uint32_t v_nr
Definition: vec.h:51
M0_INTERNAL m0_bcount_t m0_net_domain_get_max_buffer_size(struct m0_net_domain *dom)
Definition: chan.h:229
struct m0_cond xtm_ev_cond
Definition: lnet_xo.h:98
M0_INTERNAL int nlx_core_buf_register(struct nlx_core_domain *cd, nlx_core_opaque_ptr_t buffer_id, const struct m0_bufvec *bvec, struct nlx_core_buffer *cb)
Definition: klnet_core.c:1295
M0_INTERNAL bool nlx_core_buf_event_get(struct nlx_core_transfer_mc *lctm, struct nlx_core_buffer_event *lcbe)
Definition: lnet_core.c:215
static uint32_t nlx_xo_rpc_max_recv_msgs(struct m0_net_domain *ndom, m0_bcount_t rpc_size)
Definition: lnet_xo.c:633
m0_bcount_t * v_count
Definition: vec.h:53
static uint32_t nlx_xo_rpc_max_segs_nr(struct m0_net_domain *ndom)
Definition: lnet_xo.c:617
static uint64_t min64u(uint64_t a, uint64_t b)
Definition: arith.h:66
static struct nlx_xo_interceptable_subs nlx_xo_iv
Definition: lnet_xo.c:83
uint32_t cb_max_operations
int(* _nlx_ep_create)(struct m0_net_end_point **epp, struct m0_net_transfer_mc *tm, const struct nlx_core_ep_addr *cepa)
Definition: lnet_xo.c:75
static int nlx_ep_create(struct m0_net_end_point **epp, struct m0_net_transfer_mc *tm, const struct nlx_core_ep_addr *cepa)
Definition: lnet_ep.c:48
static int nlx_tm_timeout_buffers(struct m0_net_transfer_mc *tm, m0_time_t now)
static int nlx_xo_buf_add(struct m0_net_buffer *nb)
Definition: lnet_xo.c:280
M0_INTERNAL m0_bcount_t m0_vec_count(const struct m0_vec *vec)
Definition: vec.c:53
M0_INTERNAL void m0_bitmap_copy(struct m0_bitmap *dst, const struct m0_bitmap *src)
Definition: bitmap.c:158
static uint32_t timeout
Definition: console.c:52
int nlx_core_ep_addr_decode(struct nlx_core_domain *lcdom, const char *ep_addr, struct nlx_core_ep_addr *cepa)
Definition: lnet_core.c:386
static int nlx_xo_end_point_create(struct m0_net_end_point **epp, struct m0_net_transfer_mc *tm, const char *addr)
Definition: lnet_xo.c:165
M0_INTERNAL int nlx_core_buf_passive_recv(struct nlx_core_domain *cd, struct nlx_core_transfer_mc *ctm, struct nlx_core_buffer *cb)
Definition: klnet_core.c:1566
M0_INTERNAL void nlx_core_buf_desc_encode(struct nlx_core_transfer_mc *lctm, struct nlx_core_buffer *lcbuf, struct nlx_core_buf_desc *cbd)
Definition: lnet_core.c:292
struct m0_thread xtm_ev_thread
Definition: lnet_xo.h:93
static int nlx_xo_buf_register(struct m0_net_buffer *nb)
Definition: lnet_xo.c:203
M0_INTERNAL int nlx_core_dom_init(struct m0_net_domain *dom, struct nlx_core_domain *cd)
Definition: klnet_core.c:1167
uint64_t * b_words
Definition: bitmap.h:46
M0_INTERNAL int nlx_core_buf_passive_send(struct nlx_core_domain *cd, struct nlx_core_transfer_mc *ctm, struct nlx_core_buffer *cb)
Definition: klnet_core.c:1617
static m0_bcount_t nlx_xo_rpc_max_msg_size(struct m0_net_domain *ndom, m0_bcount_t rpc_size)
Definition: lnet_xo.c:625
static void nlx_xo_buf_del(struct m0_net_buffer *nb)
Definition: lnet_xo.c:379
static struct m0_chan chan[RDWR_REQUEST_MAX]
static int nlx_xo_bev_deliver_sync(struct m0_net_transfer_mc *tm)
Definition: lnet_xo.c:567
#define NLXDBGP(ptr, dbg, fmt,...)
Definition: lnet_main.c:879
static m0_bcount_t nlx_xo_get_max_buffer_segment_size(const struct m0_net_domain *dom)
Definition: lnet_xo.c:148
m0_bcount_t cb_length
#define NLX_FREE_ALIGNED_PTR(ptr)
Definition: lnet_core.h:633
#define _0C(exp)
Definition: assert.h:311
M0_INTERNAL int nlx_core_buf_event_wait(struct nlx_core_domain *cd, struct nlx_core_transfer_mc *ctm, m0_time_t timeout)
Definition: klnet_core.c:1685
static int nlx_xo_tm_stop(struct m0_net_transfer_mc *tm, bool cancel)
Definition: lnet_xo.c:450
const char * nx_name
Definition: net.h:125
static const struct m0_net_xprt_ops nlx_xo_xprt_ops
Definition: lnet_xo.c:642
static bool nlx_tm_invariant(const struct m0_net_transfer_mc *tm)
Definition: lnet_xo.c:56
enum m0_net_queue_type cb_qtype
int(* xo_dom_init)(const struct m0_net_xprt *xprt, struct m0_net_domain *dom)
Definition: net.h:139
struct m0_net_buf_desc nb_desc
Definition: net.h:1412
static int nlx_xo__nbd_allocate(struct m0_net_transfer_mc *tm, const struct nlx_core_buf_desc *cbd, struct m0_net_buf_desc *nbd)
Definition: lnet_xo.c:250
static struct bulkio_params * bp
Definition: bulkio_ut.c:44
struct m0_net_transfer_mc * xtm_tm
Definition: lnet_xo.h:87
M0_INTERNAL int nlx_core_buf_msg_send(struct nlx_core_domain *cd, struct nlx_core_transfer_mc *ctm, struct nlx_core_buffer *cb)
Definition: klnet_core.c:1417
size_t b_nr
Definition: bitmap.h:44
struct m0_net_xprt * xprt
Definition: module.c:61
#define M0_PRE_EX(cond)
static int nlx_xo_tm_start(struct m0_net_transfer_mc *tm, const char *addr)
Definition: lnet_xo.c:431
M0_INTERNAL m0_bcount_t default_xo_rpc_max_msg_size(struct m0_net_domain *ndom, m0_bcount_t rpc_size)
Definition: net.c:255
void * nb_xprt_private
Definition: net.h:1461
int(* _nlx_core_buf_event_wait)(struct nlx_core_domain *lcdom, struct nlx_core_transfer_mc *lctm, m0_time_t timeout)
Definition: lnet_xo.c:72
M0_INTERNAL void m0_chan_broadcast(struct m0_chan *chan)
Definition: chan.c:172
static int nlx_xo_dom_init(const struct m0_net_xprt *xprt, struct m0_net_domain *dom)
Definition: lnet_xo.c:103
int32_t rc
Definition: trigger_fop.h:47
M0_INTERNAL int nlx_core_buf_desc_decode(struct nlx_core_transfer_mc *lctm, struct nlx_core_buffer *lcbuf, struct nlx_core_buf_desc *cbd)
Definition: lnet_core.c:330
#define M0_POST_EX(cond)
Definition: vec.h:145
static void nlx_xo_bev_notify(struct m0_net_transfer_mc *tm, struct m0_chan *chan)
Definition: lnet_xo.c:585
struct m0_net_end_point * nb_ep
Definition: net.h:1424
m0_time_t(* _nlx_tm_get_buffer_timeout_tick)(const struct m0_net_transfer_mc *tm)
Definition: lnet_xo.c:78
enum m0_thread_state t_state
Definition: thread.h:111
struct nlx_core_transfer_mc xtm_core
Definition: lnet_xo.h:104
#define M0_IMPOSSIBLE(fmt,...)
static void nlx_xo_tm_fini(struct m0_net_transfer_mc *tm)
Definition: lnet_xo.c:413