Motr  M0
tm.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2012-2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 
23 #include "lib/arith.h" /* M0_CNT_INC */
24 #include "lib/assert.h"
25 #include "lib/errno.h"
26 #include "lib/misc.h"
27 #define M0_TRACE_SUBSYSTEM M0_TRACE_SUBSYS_NET
28 #include "lib/trace.h"
29 #include "lib/finject.h"
30 #include "motr/magic.h"
31 #include "net/net_internal.h"
32 #include "net/buffer_pool.h"
33 
38 M0_TL_DESCR_DEFINE(m0_net_tm, "tm list", M0_INTERNAL,
39  struct m0_net_buffer, nb_tm_linkage, nb_magic,
41 M0_TL_DEFINE(m0_net_tm, M0_INTERNAL, struct m0_net_buffer);
42 M0_EXPORTED(m0_net_tm_tlist_is_empty);
43 
44 M0_INTERNAL bool m0_net__tm_state_is_valid(enum m0_net_tm_state ts)
45 {
46  return ts >= M0_NET_TM_UNDEFINED && ts <= M0_NET_TM_FAILED;
47 }
48 
50 {
51  return et >= M0_NET_TEV_ERROR && et < M0_NET_TEV_NR;
52 }
53 
54 M0_INTERNAL bool m0_net__tm_event_invariant(const struct m0_net_tm_event *ev)
55 {
56  return
58  _0C(ev->nte_tm != NULL) &&
64  m0_net__ep_invariant(ev->nte_ep, ev->nte_tm, true)));
65 }
66 
67 M0_INTERNAL bool m0_net__tm_invariant(const struct m0_net_transfer_mc *tm)
68 {
69  return
70  _0C(tm != NULL) &&
71  _0C(tm->ntm_callbacks != NULL) &&
72  _0C(tm->ntm_dom != NULL) &&
75  tm->ntm_state <= M0_NET_TM_FAILED) &&
77  tm->ntm_ep != NULL)) &&
78  _0C(ergo(!M0_IN(tm->ntm_state, (M0_NET_TM_STARTED,
81  m0_net_tm_tlist_is_empty(&tm->ntm_q[i]))));
82 }
83 
84 M0_INTERNAL void m0_net_tm_event_post(const struct m0_net_tm_event *ev)
85 {
86  struct m0_net_transfer_mc *tm;
87  struct m0_net_buffer_pool *pool = NULL;
88 
89  M0_PRE(ev != NULL);
90  tm = ev->nte_tm;
91  M0_PRE(tm != NULL);
92  M0_PRE(m0_mutex_is_not_locked(&tm->ntm_mutex));
93 
94  /* pre-callback, in mutex */
95  m0_mutex_lock(&tm->ntm_mutex);
97 
98  if (ev->nte_type == M0_NET_TEV_STATE_CHANGE) {
99  tm->ntm_state = ev->nte_next_state;
100  if (tm->ntm_state == M0_NET_TM_STARTED) {
101  tm->ntm_ep = ev->nte_ep; /* ep now visible */
102  pool = tm->ntm_recv_pool;
103  }
104  }
105 
107  m0_mutex_unlock(&tm->ntm_mutex);
108 
109  (*tm->ntm_callbacks->ntc_event_cb)(ev);
110 
111  M0_LOG(M0_DEBUG, "tm=%p state=%d", tm, tm->ntm_state);
112 
113  /*
114  * post-callback, out of mutex: perform initial provisioning if
115  * required
116  */
117  if (pool != NULL)
119 
121 }
122 
123 M0_INTERNAL void m0_net__tm_post_callback(struct m0_net_transfer_mc *tm)
124 {
125  m0_mutex_lock(&tm->ntm_mutex);
127  if (tm->ntm_callback_counter == 0)
129  m0_mutex_unlock(&tm->ntm_mutex);
130 }
131 
132 static void m0_net__tm_cleanup(struct m0_net_transfer_mc *tm)
133 {
134  int i;
135  tm->ntm_dom = NULL;
137  m0_mutex_fini(&tm->ntm_mutex);
138  m0_nep_tlist_fini(&tm->ntm_end_points);
139  for (i = 0; i < ARRAY_SIZE(tm->ntm_q); ++i) {
140  m0_net_tm_tlist_fini(&tm->ntm_q[i]);
141  }
142  tm->ntm_xprt_private = NULL;
143 }
144 
145 M0_INTERNAL void m0_net__tm_cancel(struct m0_net_transfer_mc *tm)
146 {
147  struct m0_net_buffer *nb;
148  int qt;
149 
150  M0_PRE(m0_mutex_is_locked(&tm->ntm_mutex));
151 
152  for (qt = 0; qt < ARRAY_SIZE(tm->ntm_q); ++qt) {
153  m0_tl_for(m0_net_tm, &tm->ntm_q[qt], nb) {
154  tm->ntm_dom->nd_xprt->nx_ops->xo_buf_del(nb);
155  tm->ntm_qstats[qt].nqs_num_dels++;
156  } m0_tl_endfor;
157  }
158 }
159 
160 M0_INTERNAL int m0_net_tm_init(struct m0_net_transfer_mc *tm,
161  struct m0_net_domain *dom)
162 {
163  int rc;
164  int i;
165 
166  m0_mutex_lock(&dom->nd_mutex);
167  M0_PRE(tm != NULL);
169  M0_PRE(tm->ntm_callbacks != NULL &&
171 
172  if (M0_FI_ENABLED("fake_error")) {
173  m0_mutex_unlock(&dom->nd_mutex);
174  return M0_RC(-EINVAL);
175  }
176  m0_mutex_init(&tm->ntm_mutex);
177  tm->ntm_callback_counter = 0;
178  tm->ntm_dom = dom;
179  tm->ntm_ep = NULL;
180  m0_nep_tlist_init(&tm->ntm_end_points);
181  m0_chan_init(&tm->ntm_chan, &tm->ntm_mutex);
182  for (i = 0; i < ARRAY_SIZE(tm->ntm_q); ++i) {
183  m0_net_tm_tlist_init(&tm->ntm_q[i]);
184  }
185  M0_SET_ARR0(tm->ntm_qstats);
186  tm->ntm_xprt_private = NULL;
187  tm->ntm_bev_auto_deliver = true;
188  tm->ntm_recv_pool = NULL;
192  rc = dom->nd_xprt->nx_ops->xo_tm_init(tm);
193  if (rc >= 0) {
194  m0_list_add_tail(&dom->nd_tms, &tm->ntm_dom_linkage);
196  } else
197  m0_net__tm_cleanup(tm);
198  m0_mutex_unlock(&dom->nd_mutex);
199 
200  return M0_RC(rc);
201 }
202 M0_EXPORTED(m0_net_tm_init);
203 
204 M0_INTERNAL void m0_net_tm_fini(struct m0_net_transfer_mc *tm)
205 {
206  struct m0_net_domain *dom = tm->ntm_dom;
207  M0_ENTRY("tm=%p", tm);
208 
209  /*
210  * Wait for ongoing event processing to drain without holding lock:
211  * events modify state and end point refcounts. Also applies to ongoing
212  * provisioning, which requires a check for state in addition to
213  * counter.
214  */
215  if (tm->ntm_callback_counter > 0) {
216  struct m0_clink tmwait;
217  M0_LOG(M0_NOTICE, "tm=%p state=%d cc=%d", tm, tm->ntm_state,
219  m0_clink_init(&tmwait, NULL);
220  m0_clink_add_lock(&tm->ntm_chan, &tmwait);
221  while (tm->ntm_callback_counter > 0 ||
223  m0_chan_wait(&tmwait);
224  m0_clink_del_lock(&tmwait);
225  }
226 
227  m0_mutex_lock(&dom->nd_mutex);
231  m0_net_tm_tlist_is_empty(&tm->ntm_q[i])));
232  M0_PRE((m0_nep_tlist_is_empty(&tm->ntm_end_points) &&
233  tm->ntm_ep == NULL) ||
234  (m0_nep_tlist_length(&tm->ntm_end_points) == 1 &&
235  tm->ntm_ep != NULL &&
236  m0_nep_tlist_contains(&tm->ntm_end_points, tm->ntm_ep) &&
237  m0_atomic64_get(&tm->ntm_ep->nep_ref.ref_cnt) == 1));
238 
239  /* release method requires TM mutex to be locked */
240  m0_mutex_lock(&tm->ntm_mutex);
241  if (tm->ntm_ep != NULL) {
242  m0_ref_put(&tm->ntm_ep->nep_ref);
243  tm->ntm_ep = NULL;
244  }
245  m0_mutex_unlock(&tm->ntm_mutex);
246 
247  dom->nd_xprt->nx_ops->xo_tm_fini(tm);
248 
249  M0_ASSERT(m0_nep_tlist_is_empty(&tm->ntm_end_points));
250 
253  m0_net__tm_cleanup(tm);
255 
256  m0_mutex_unlock(&dom->nd_mutex);
257  M0_LEAVE("tm=%p", tm);
258 }
259 M0_EXPORTED(m0_net_tm_fini);
260 
261 M0_INTERNAL int m0_net_tm_start(struct m0_net_transfer_mc *tm, const char *addr)
262 {
263  int rc;
264 
265  M0_ENTRY("tm=%p addr=%s", (void *)tm, addr);
266 
267  M0_ASSERT(addr != NULL);
268  M0_PRE(tm != NULL);
269  m0_mutex_lock(&tm->ntm_mutex);
271  M0_PRE(tm->ntm_state == M0_NET_TM_INITIALIZED);
272 
273  if (M0_FI_ENABLED("fake_error")) {
274  tm->ntm_state = M0_NET_TM_FAILED;
275  m0_mutex_unlock(&tm->ntm_mutex);
276  return M0_RC(0);
277  }
278 
279  tm->ntm_state = M0_NET_TM_STARTING;
280  rc = tm->ntm_dom->nd_xprt->nx_ops->xo_tm_start(tm, addr);
281  if (rc < 0) {
282  /* xprt did not start, no retry supported */
283  tm->ntm_state = M0_NET_TM_FAILED;
284  M0_ASSERT(tm->ntm_ep == NULL);
285  }
287  m0_mutex_unlock(&tm->ntm_mutex);
288  M0_ASSERT(rc <= 0);
289  return M0_RC(rc);
290 }
291 M0_EXPORTED(m0_net_tm_start);
292 
293 M0_INTERNAL int m0_net_tm_stop(struct m0_net_transfer_mc *tm, bool abort)
294 {
295  int rc;
296  enum m0_net_tm_state oldstate;
297 
298  M0_ENTRY("tm=%p abort=%d", (void *)tm, !!abort);
299 
300  m0_mutex_lock(&tm->ntm_mutex);
302  M0_PRE(M0_IN(tm->ntm_state, (M0_NET_TM_INITIALIZED, M0_NET_TM_STARTING,
304 
305  oldstate = tm->ntm_state;
306  tm->ntm_state = M0_NET_TM_STOPPING;
307  rc = tm->ntm_dom->nd_xprt->nx_ops->xo_tm_stop(tm, abort);
308  if (rc < 0) {
309  tm->ntm_state = oldstate;
310  } else
311  m0_atomic64_set(&tm->ntm_recv_queue_deficit, 0);
312 
314  m0_mutex_unlock(&tm->ntm_mutex);
315  M0_ASSERT(rc <= 0);
316  return M0_RC(rc);
317 }
318 M0_EXPORTED(m0_net_tm_stop);
319 
320 M0_INTERNAL int m0_net__tm_stats_get(struct m0_net_transfer_mc *tm,
321  enum m0_net_queue_type qtype,
322  struct m0_net_qstats *qs, bool reset)
323 {
324  M0_PRE(qtype == M0_NET_QT_NR || m0_net__qtype_is_valid(qtype));
325  M0_ASSERT(reset || qs != NULL);
326  M0_PRE(m0_mutex_is_locked(&tm->ntm_mutex));
328  if (qtype == M0_NET_QT_NR) {
329  if (qs != NULL)
330  memcpy(qs, tm->ntm_qstats, sizeof tm->ntm_qstats);
331  if (reset)
332  M0_SET_ARR0(tm->ntm_qstats);
333  } else {
334  if (qs != NULL)
335  *qs = tm->ntm_qstats[qtype];
336  if (reset)
337  M0_SET0(&tm->ntm_qstats[qtype]);
338  }
339 
340  return 0;
341 }
342 
343 M0_INTERNAL int m0_net_tm_stats_get(struct m0_net_transfer_mc *tm,
344  enum m0_net_queue_type qtype,
345  struct m0_net_qstats *qs, bool reset)
346 {
347  int rc;
348 
349  m0_mutex_lock(&tm->ntm_mutex);
350  rc = m0_net__tm_stats_get(tm, qtype, qs, reset);
351  m0_mutex_unlock(&tm->ntm_mutex);
352  return M0_RC(rc);
353 }
354 M0_EXPORTED(m0_net_tm_stats_get);
355 
356 M0_INTERNAL int m0_net_tm_confine(struct m0_net_transfer_mc *tm,
357  const struct m0_bitmap *processors)
358 {
359  int rc;
360  m0_mutex_lock(&tm->ntm_mutex);
363  M0_PRE(processors != NULL);
364  if (tm->ntm_dom->nd_xprt->nx_ops->xo_tm_confine != NULL)
365  rc =
366  tm->ntm_dom->nd_xprt->nx_ops->xo_tm_confine(tm, processors);
367  else
368  rc = -ENOSYS;
371  m0_mutex_unlock(&tm->ntm_mutex);
372  return M0_RC(rc);
373 }
374 M0_EXPORTED(m0_net_tm_confine);
375 
376 M0_INTERNAL int
378 {
379  int rc;
380  m0_mutex_lock(&tm->ntm_mutex);
386  if (rc == 0)
387  tm->ntm_bev_auto_deliver = false;
388  } else
389  rc = -ENOSYS;
390  M0_POST(ergo(rc == 0, !tm->ntm_bev_auto_deliver));
392  m0_mutex_unlock(&tm->ntm_mutex);
393  return M0_RC(rc);
394 }
396 
398 {
399  m0_mutex_lock(&tm->ntm_mutex);
405  m0_mutex_unlock(&tm->ntm_mutex);
406 }
408 
410 {
411  bool result;
412  m0_mutex_lock(&tm->ntm_mutex);
416  result = tm->ntm_dom->nd_xprt->nx_ops->xo_bev_pending(tm);
418  m0_mutex_unlock(&tm->ntm_mutex);
419  return result;
420 }
421 M0_EXPORTED(m0_net_buffer_event_pending);
422 
423 M0_INTERNAL void m0_net_buffer_event_notify(struct m0_net_transfer_mc *tm,
424  struct m0_chan *chan)
425 {
426  m0_mutex_lock(&tm->ntm_mutex);
432  m0_mutex_unlock(&tm->ntm_mutex);
433 }
434 M0_EXPORTED(m0_net_buffer_event_notify);
435 
436 M0_INTERNAL void m0_net_tm_colour_set(struct m0_net_transfer_mc *tm,
437  uint32_t colour)
438 {
439  m0_mutex_lock(&tm->ntm_mutex);
443  tm->ntm_pool_colour = colour;
444  m0_mutex_unlock(&tm->ntm_mutex);
445 }
446 M0_EXPORTED(m0_net_tm_colour_set);
447 
448 M0_INTERNAL uint32_t m0_net_tm_colour_get(struct m0_net_transfer_mc *tm)
449 {
450  uint32_t colour;
451  m0_mutex_lock(&tm->ntm_mutex);
453  colour = tm->ntm_pool_colour;
454  m0_mutex_unlock(&tm->ntm_mutex);
455  return colour;
456 }
457 M0_EXPORTED(m0_net_tm_colour_get);
458 
459 M0_INTERNAL int m0_net_tm_pool_attach(struct m0_net_transfer_mc *tm,
460  struct m0_net_buffer_pool *bufpool,
461  const struct m0_net_buffer_callbacks
462  *callbacks, m0_bcount_t min_recv_size,
463  uint32_t max_recv_msgs,
464  uint32_t min_recv_queue_len)
465 {
466  int rc;
467  m0_mutex_lock(&tm->ntm_mutex);
470  M0_PRE(bufpool != NULL);
471  M0_PRE(callbacks != NULL &&
472  callbacks->nbc_cb[M0_NET_QT_MSG_RECV] != NULL);
473  M0_PRE(min_recv_size > 0);
474  M0_PRE(max_recv_msgs > 0);
475  if (bufpool->nbp_ndom == tm->ntm_dom) {
476  tm->ntm_recv_pool = bufpool;
477  tm->ntm_recv_pool_callbacks = callbacks;
478  tm->ntm_recv_queue_min_recv_size = min_recv_size;
480  if(min_recv_queue_len > M0_NET_TM_RECV_QUEUE_DEF_LEN)
481  tm->ntm_recv_queue_min_length = min_recv_queue_len;
482  rc = 0;
483  } else
484  rc = -EINVAL;
485  m0_mutex_unlock(&tm->ntm_mutex);
486  return M0_RC(rc);
487 }
488 M0_EXPORTED(m0_net_tm_pool_attach);
489 
490 M0_INTERNAL void m0_net_tm_pool_length_set(struct m0_net_transfer_mc *tm,
491  uint32_t len)
492 {
493  struct m0_net_buffer_pool *pool = NULL;
494 
495  m0_mutex_lock(&tm->ntm_mutex);
498  tm->ntm_state == M0_NET_TM_STARTING ||
501  tm->ntm_recv_queue_min_length = len;
502  if (tm->ntm_recv_pool != NULL && tm->ntm_state == M0_NET_TM_STARTED) {
503  pool = tm->ntm_recv_pool;
505  }
506  m0_mutex_unlock(&tm->ntm_mutex);
507  if (pool != NULL) {
509  m0_mutex_lock(&tm->ntm_mutex);
512  m0_mutex_unlock(&tm->ntm_mutex);
513  }
514  return;
515 }
516 M0_EXPORTED(m0_net_tm_pool_length_set);
517 
518 #undef M0_TRACE_SUBSYSTEM
519 
522 /*
523  * Local variables:
524  * c-indentation-style: "K&R"
525  * c-basic-offset: 8
526  * tab-width: 8
527  * fill-column: 79
528  * scroll-step: 1
529  * End:
530  */
M0_INTERNAL void m0_net__tm_cancel(struct m0_net_transfer_mc *tm)
Definition: tm.c:145
const struct m0_net_xprt_ops * nx_ops
Definition: net.h:126
M0_INTERNAL void m0_list_link_fini(struct m0_list_link *link)
Definition: list.c:176
m0_bcount_t ntm_recv_queue_min_recv_size
Definition: net.h:927
M0_INTERNAL void m0_chan_wait(struct m0_clink *link)
Definition: chan.c:336
#define M0_PRE(cond)
uint32_t ntm_recv_queue_min_length
Definition: net.h:908
M0_INTERNAL void m0_mutex_unlock(struct m0_mutex *mutex)
Definition: mutex.c:66
M0_INTERNAL int m0_net_tm_start(struct m0_net_transfer_mc *tm, const char *addr)
Definition: tm.c:261
void(* xo_bev_deliver_all)(struct m0_net_transfer_mc *tm)
Definition: net.h:314
#define NULL
Definition: misc.h:38
M0_INTERNAL void m0_clink_init(struct m0_clink *link, m0_chan_cb_t cb)
Definition: chan.c:201
M0_INTERNAL void m0_clink_del_lock(struct m0_clink *link)
Definition: chan.c:293
#define ergo(a, b)
Definition: misc.h:293
M0_INTERNAL int m0_net__tm_stats_get(struct m0_net_transfer_mc *tm, enum m0_net_queue_type qtype, struct m0_net_qstats *qs, bool reset)
Definition: tm.c:320
uint64_t nqs_num_dels
Definition: net.h:769
#define M0_LOG(level,...)
Definition: trace.h:167
M0_INTERNAL bool m0_net__tm_invariant(const struct m0_net_transfer_mc *tm)
Definition: tm.c:67
M0_LEAVE()
M0_INTERNAL bool m0_mutex_is_not_locked(const struct m0_mutex *mutex)
Definition: mutex.c:101
M0_INTERNAL int m0_net_tm_stats_get(struct m0_net_transfer_mc *tm, enum m0_net_queue_type qtype, struct m0_net_qstats *qs, bool reset)
Definition: tm.c:343
struct m0_net_qstats ntm_qstats[M0_NET_QT_NR]
Definition: net.h:880
enum m0_net_tm_state ntm_state
Definition: net.h:819
static int max_recv_msgs
M0_INTERNAL bool m0_net__tm_event_invariant(const struct m0_net_tm_event *ev)
Definition: tm.c:54
void(* xo_bev_notify)(struct m0_net_transfer_mc *tm, struct m0_chan *chan)
Definition: net.h:331
M0_INTERNAL void m0_list_del(struct m0_list_link *old)
Definition: list.c:147
struct m0_net_domain * ntm_dom
Definition: net.h:853
M0_INTERNAL bool m0_net__ep_invariant(struct m0_net_end_point *ep, struct m0_net_transfer_mc *tm, bool under_tm_mutex)
Definition: ep.c:42
uint64_t m0_bcount_t
Definition: types.h:77
#define M0_SET0(obj)
Definition: misc.h:64
M0_INTERNAL void m0_mutex_lock(struct m0_mutex *mutex)
Definition: mutex.c:49
M0_INTERNAL int m0_net_tm_pool_attach(struct m0_net_transfer_mc *tm, struct m0_net_buffer_pool *bufpool, const struct m0_net_buffer_callbacks *callbacks, m0_bcount_t min_recv_size, uint32_t max_recv_msgs, uint32_t min_recv_queue_len)
Definition: tm.c:459
M0_INTERNAL void m0_net__tm_provision_recv_q(struct m0_net_transfer_mc *tm)
Definition: tm_provision.c:509
bool ntm_bev_auto_deliver
Definition: net.h:891
struct m0_tl ntm_end_points
Definition: net.h:856
#define m0_tl_endfor
Definition: tlist.h:700
return M0_RC(rc)
const struct m0_net_buffer_callbacks * ntm_recv_pool_callbacks
Definition: net.h:902
struct m0_chan ntm_chan
Definition: net.h:874
#define M0_ENTRY(...)
Definition: trace.h:170
int i
Definition: dir.c:1033
#define M0_SET_ARR0(arr)
Definition: misc.h:72
M0_INTERNAL void m0_ref_put(struct m0_ref *ref)
Definition: refs.c:38
enum m0_net_tm_state nte_next_state
Definition: net.h:723
void * ntm_xprt_private
Definition: net.h:886
M0_INTERNAL int m0_net_tm_init(struct m0_net_transfer_mc *tm, struct m0_net_domain *dom)
Definition: tm.c:160
M0_INTERNAL bool m0_net__tm_state_is_valid(enum m0_net_tm_state ts)
Definition: tm.c:44
uint32_t ntm_recv_queue_max_recv_msgs
Definition: net.h:933
M0_INTERNAL void m0_chan_init(struct m0_chan *chan, struct m0_mutex *ch_guard)
Definition: chan.c:96
#define M0_ASSERT(cond)
M0_INTERNAL void m0_net_tm_fini(struct m0_net_transfer_mc *tm)
Definition: tm.c:204
M0_INTERNAL bool m0_mutex_is_locked(const struct m0_mutex *mutex)
Definition: mutex.c:95
const struct m0_net_xprt * nd_xprt
Definition: net.h:396
M0_INTERNAL bool m0_net__qtype_is_valid(enum m0_net_queue_type qt)
Definition: buf.c:41
void(* ntc_event_cb)(const struct m0_net_tm_event *ev)
Definition: net.h:752
M0_INTERNAL void m0_net_tm_pool_length_set(struct m0_net_transfer_mc *tm, uint32_t len)
Definition: tm.c:490
struct m0_atomic64 ref_cnt
Definition: refs.h:38
m0_net_tm_state
Definition: net.h:630
static struct m0_stob_domain * dom
Definition: storage.c:38
struct m0_atomic64 ntm_recv_queue_deficit
Definition: net.h:914
uint32_t ntm_callback_counter
Definition: net.h:850
m0_net_tm_ev_type
Definition: net.h:641
struct m0_list_link ntm_dom_linkage
Definition: net.h:883
M0_INTERNAL void m0_net_tm_event_post(const struct m0_net_tm_event *ev)
Definition: tm.c:84
M0_INTERNAL void m0_mutex_init(struct m0_mutex *mutex)
Definition: mutex.c:35
#define M0_POST(cond)
Definition: xcode.h:73
M0_TL_DEFINE(m0_net_tm, M0_INTERNAL, struct m0_net_buffer)
m0_net_buffer_cb_proc_t nbc_cb[M0_NET_QT_NR]
Definition: net.h:1272
Definition: chan.h:229
struct m0_tl ntm_q[M0_NET_QT_NR]
Definition: net.h:877
M0_INTERNAL void m0_net_tm_colour_set(struct m0_net_transfer_mc *tm, uint32_t colour)
Definition: tm.c:436
M0_INTERNAL int m0_net_tm_stop(struct m0_net_transfer_mc *tm, bool abort)
Definition: tm.c:293
M0_INTERNAL void m0_net_buffer_event_deliver_all(struct m0_net_transfer_mc *tm)
Definition: tm.c:397
struct m0_net_end_point * nte_ep
Definition: net.h:729
int(* xo_tm_confine)(struct m0_net_transfer_mc *tm, const struct m0_bitmap *processors)
Definition: net.h:170
struct m0_ref nep_ref
Definition: net.h:491
static struct m0_pool pool
Definition: iter_ut.c:58
static int64_t m0_atomic64_get(const struct m0_atomic64 *a)
#define m0_forall(var, nr,...)
Definition: misc.h:112
struct m0_net_domain * nbp_ndom
Definition: buffer_pool.h:261
void m0_clink_add_lock(struct m0_chan *chan, struct m0_clink *link)
Definition: chan.c:255
M0_INTERNAL void m0_net_buffer_event_notify(struct m0_net_transfer_mc *tm, struct m0_chan *chan)
Definition: tm.c:423
void(* xo_buf_del)(struct m0_net_buffer *nb)
Definition: net.h:290
#define M0_CNT_INC(cnt)
Definition: arith.h:226
M0_INTERNAL bool m0_net_buffer_event_pending(struct m0_net_transfer_mc *tm)
Definition: tm.c:409
static struct m0_chan chan[RDWR_REQUEST_MAX]
#define M0_FI_ENABLED(tag)
Definition: finject.h:231
M0_INTERNAL int m0_net_buffer_event_deliver_synchronously(struct m0_net_transfer_mc *tm)
Definition: tm.c:377
M0_TL_DESCR_DEFINE(m0_net_tm, "tm list", M0_INTERNAL, struct m0_net_buffer, nb_tm_linkage, nb_magic, M0_NET_BUFFER_LINK_MAGIC, M0_NET_BUFFER_HEAD_MAGIC)
Definition: queue.c:27
struct m0_net_end_point * ntm_ep
Definition: net.h:868
M0_INTERNAL uint32_t m0_net_tm_colour_get(struct m0_net_transfer_mc *tm)
Definition: tm.c:448
m0_net_queue_type
Definition: net.h:591
M0_INTERNAL void m0_net__tm_post_callback(struct m0_net_transfer_mc *tm)
Definition: tm.c:123
#define _0C(exp)
Definition: assert.h:311
M0_INTERNAL int m0_net_tm_confine(struct m0_net_transfer_mc *tm, const struct m0_bitmap *processors)
Definition: tm.c:356
M0_INTERNAL void m0_mutex_fini(struct m0_mutex *mutex)
Definition: mutex.c:42
bool(* xo_bev_pending)(struct m0_net_transfer_mc *tm)
Definition: net.h:322
#define M0_CNT_DEC(cnt)
Definition: arith.h:219
enum m0_net_tm_ev_type nte_type
Definition: net.h:691
M0_INTERNAL bool m0_net__tm_ev_type_is_valid(enum m0_net_tm_ev_type et)
Definition: tm.c:49
static void m0_net__tm_cleanup(struct m0_net_transfer_mc *tm)
Definition: tm.c:132
uint32_t ntm_pool_colour
Definition: net.h:921
M0_INTERNAL void m0_list_add_tail(struct m0_list *head, struct m0_list_link *new)
Definition: list.c:119
#define m0_tl_for(name, head, obj)
Definition: tlist.h:695
M0_INTERNAL void m0_chan_fini_lock(struct m0_chan *chan)
Definition: chan.c:112
int(* xo_bev_deliver_sync)(struct m0_net_transfer_mc *tm)
Definition: net.h:301
M0_INTERNAL void m0_chan_broadcast(struct m0_chan *chan)
Definition: chan.c:172
int32_t rc
Definition: trigger_fop.h:47
#define ARRAY_SIZE(a)
Definition: misc.h:45
const struct m0_net_tm_callbacks * ntm_callbacks
Definition: net.h:816
static void m0_atomic64_set(struct m0_atomic64 *a, int64_t num)
struct m0_net_transfer_mc * nte_tm
Definition: net.h:696
struct m0_net_buffer_pool * ntm_recv_pool
Definition: net.h:896