Motr  M0
tm_provision_ut.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2012-2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 
23 /*
24  * This file is depends on bulk_if.c and is included in it, so that bulk
25  * interface dummy transport is reused to test auto provisioning of receive
26  * message queue.
27  */
28 
29 #include "net/buffer_pool.h"
30 
31 static int max_recv_msgs = 1;
33 enum {
37  MIN_RECV_SIZE = 1 << 12,
38 };
39 
41 void ut_tm_prov_event_cb(const struct m0_net_tm_event *ev)
42 {
43  struct m0_net_transfer_mc *tm;
44  tm = ev->nte_tm;
45  /*
46  * Check that provisioning is not happened when this cb is called and
47  * TM is in M0_NET_TM_STARTED state.
48  */
49  if (tm->ntm_state == M0_NET_TM_STARTED)
50  M0_UT_ASSERT(m0_net_tm_tlist_length(
51  &tm->ntm_q[M0_NET_QT_MSG_RECV]) == 0);
53 }
54 
55 /* UT transfer machine */
58 };
59 
60 static void ut_prov_msg_recv_cb(const struct m0_net_buffer_event *ev)
61 {
62  struct m0_net_transfer_mc *tm;
63  struct m0_net_buffer *nb;
64 
65  M0_UT_ASSERT(ev != NULL && ev->nbe_buffer != NULL);
66  nb = ev->nbe_buffer;
67  tm = nb->nb_tm;
68  M0_UT_ASSERT(tm->ntm_recv_pool != NULL && nb->nb_pool != NULL);
69 
71  if (nb->nb_tm->ntm_state == M0_NET_TM_STARTED)
72  if (nb->nb_pool->nbp_free > 0)
74  &tm->ntm_recv_queue_deficit) == 0);
75  if (!(nb->nb_flags & M0_NET_BUF_QUEUED))
78 }
79 
80 static const struct m0_net_buffer_callbacks ut_buf_prov_cb = {
81  .nbc_cb = {
88  },
89 };
90 
91 static void ut_pool_low(struct m0_net_buffer_pool *bp)
92 {
93  /* Buffer pool is below threshold. */
94 }
95 
96 static bool pool_not_empty_called = false;
98 {
100  pool_not_empty_called = true;
101 }
102 
103 static const struct m0_net_buffer_pool_ops b_ops = {
105  .nbpo_below_threshold = ut_pool_low,
106 };
107 
108 static bool ut_tm_prov_stop_called = false;
109 static int ut_tm_prov_stop(struct m0_net_transfer_mc *tm, bool cancel)
110 {
111  int rc;
112  struct m0_clink tmwait;
113  struct m0_net_buffer *nb;
114  struct m0_tl *ql;
115 
116  m0_clink_init(&tmwait, NULL);
117  ql = &tm->ntm_q[M0_NET_QT_MSG_RECV];
118 
119  M0_UT_ASSERT(m0_mutex_is_locked(&tm->ntm_mutex));
120  m0_tl_for(m0_net_tm, ql, nb) {
121  ut_buf_del_called = false;
122  m0_clink_add(&tm->ntm_chan, &tmwait);
123  m0_mutex_unlock(&tm->ntm_mutex);
124  m0_net_buffer_del(nb, tm);
126  /* wait on channel for post (and consume UT thread) */
127  m0_chan_wait(&tmwait);
128  m0_mutex_lock(&tm->ntm_mutex);
129  m0_clink_del(&tmwait);
131  M0_UT_ASSERT(rc == 0);
132  } m0_tl_endfor;
133 
134  ut_tm_prov_stop_called = true;
135  rc = ut_tm_stop(tm, false);
136  m0_clink_fini(&tmwait);
137  return rc;
138 }
139 
140 /*
141  * It Checks that when a buffer is de-queued from receive message queue,
142  * re-provision of the queue happens before the callback is called. In
143  * call back buffers are returned to the pool if they are not queued.
144  * Checked in ut_prov_msg_recv_cb after m0_net_buffer_del is called.
145  * It adds a deleted buffer with TM's colour into the pool in corresponding
146  * colour list.
147  */
148 static struct m0_net_buffer *
150 {
151  struct m0_net_buffer *nb;
152  struct m0_clink tmwait;
153  int rc;
154  uint32_t prov_free;
155 
156  ut_buf_del_called = false;
157  prov_free = pool_prov->nbp_free;
158  M0_UT_ASSERT(m0_net_tm_tlist_length(
159  &pool_prov->nbp_colours[tm->ntm_pool_colour]) == 0);
160  nb = m0_net_tm_tlist_head(&tm->ntm_q[M0_NET_QT_MSG_RECV]);
161  m0_clink_init(&tmwait, NULL);
162  m0_clink_add_lock(&tm->ntm_chan, &tmwait);
163  m0_net_buffer_del(nb, tm);
165  /* wait on channel for post (and consume UT thread) */
166  m0_chan_wait(&tmwait);
167  m0_clink_del_lock(&tmwait);
169  M0_UT_ASSERT(rc == 0);
170  /* A buffer with colour of TM is put back into the pool. */
171  M0_UT_ASSERT(pool_prov->nbp_free == prov_free);
172  M0_UT_ASSERT(m0_net_tm_tlist_length(&tm->ntm_q[M0_NET_QT_MSG_RECV]) ==
174  M0_UT_ASSERT(m0_net_tm_tlist_length(
175  &pool_prov->nbp_colours[tm->ntm_pool_colour]) == 1);
176  m0_clink_fini(&tmwait);
177  return nb;
178 }
179 
181  struct m0_net_transfer_mc *tm)
182 {
184  M0_UT_ASSERT(nb == m0_net_tm_tlist_tail(
185  &tm->ntm_q[M0_NET_QT_MSG_RECV]));
186  M0_UT_ASSERT(m0_net_tm_tlist_length(
187  &pool_prov->nbp_colours[tm->ntm_pool_colour]) == 0);
188 }
189 
190 /* TM stop and fini */
191 static void ut_tm_stop_fini(struct m0_net_transfer_mc *tm)
192 {
193  struct m0_clink tmwait;
194  int rc;
195  m0_clink_init(&tmwait, NULL);
196  m0_clink_add_lock(&tm->ntm_chan, &tmwait);
197  rc = m0_net_tm_stop(tm, false);
198  M0_UT_ASSERT(rc == 0);
199  m0_chan_wait(&tmwait);
200  m0_clink_del_lock(&tmwait);
201  m0_thread_join(&ut_tm_thread); /* cleanup thread */
203  m0_net_tm_fini(tm);
204  m0_clink_fini(&tmwait);
205 }
206 
209  .ntm_state = M0_NET_TM_UNDEFINED
210 };
211 
214  .ntm_state = M0_NET_TM_UNDEFINED
215 };
216 
218 
219 static void test_net_tm_prov(void)
220 {
221  int rc;
222  struct m0_net_transfer_mc *tm1 = &ut_prov_tm1;
223  struct m0_net_transfer_mc *tm2 = &ut_prov_tm2;
224  struct m0_net_domain *dom = &ut_prov_dom;
226  m0_bcount_t buf_seg_size;
227  uint32_t buf_segs;
228  struct m0_clink tmwait;
229  uint32_t tm_colours = 0;
230  struct m0_net_buffer *nb_tm1;
231  struct m0_net_buffer *nb_tm2;
232  uint32_t shift = 12;
233 
235 
236  /* initialize the domain */
237  ut_dom_init_called = false;
239 
241  M0_UT_ASSERT(rc == 0);
243  M0_UT_ASSERT(dom->nd_xprt == &ut_xprt);
244  M0_UT_ASSERT(dom->nd_xprt_private == &ut_xprt_pvt);
245  M0_ASSERT(m0_mutex_is_not_locked(&dom->nd_mutex));
246 
249  pool_prov->nbp_ops = &b_ops;
250 
251  /* get max buffer size */
254 
255  /* get max buffer segment size */
257  M0_UT_ASSERT(buf_seg_size == UT_MAX_BUF_SEGMENT_SIZE);
258 
259  /* get max buffer segments */
261  M0_UT_ASSERT(buf_segs == UT_MAX_BUF_SEGMENTS);
262 
263  /* allocate buffers for testing */
265  buf_seg_size, POOL_COLOURS, shift, false);
267  M0_UT_ASSERT(rc == 0);
271 
272  /* TM init with callbacks */
273  rc = m0_net_tm_init(tm1, dom);
274  M0_UT_ASSERT(rc == 0);
278 
279  /* API Tests */
280  m0_net_tm_colour_set(tm1, ++tm_colours);
281  M0_UT_ASSERT(tm_colours < POOL_COLOURS);
282  M0_UT_ASSERT(m0_net_tm_colour_get(tm1) == tm_colours);
283 
287  M0_UT_ASSERT(rc == 0);
294 
295  /* TM start */
296  m0_clink_init(&tmwait, NULL);
297  m0_clink_add_lock(&tm1->ntm_chan, &tmwait);
299  M0_UT_ASSERT(m0_net_tm_tlist_length(&tm1->ntm_q[M0_NET_QT_MSG_RECV]) ==
300  0);
301  rc = m0_net_tm_start(tm1, "addr1");
302  M0_UT_ASSERT(rc == 0);
305  tm1->ntm_state == M0_NET_TM_STARTED);
306 
307  /* wait on channel until TM state changed to started */
308  m0_chan_wait(&tmwait);
309  m0_clink_del_lock(&tmwait);
313  M0_UT_ASSERT(tm1->ntm_ep != NULL);
315  /*
316  * When TM starts initial provisioning happens before the channel is
317  * notified of the state change.
318  * Check for initial provisioning.
319  */
320  M0_UT_ASSERT(m0_net_tm_tlist_length(&tm1->ntm_q[M0_NET_QT_MSG_RECV]) !=
321  0);
322  M0_UT_ASSERT(m0_net_tm_tlist_length(&tm1->ntm_q[M0_NET_QT_MSG_RECV]) ==
326  /* clean up; real xprt would handle this itself */
329 
330  /*
331  * Check for provisioning when minimum buffers in the receive queue
332  * of tm is changed. Also re-provisioning happens synchronously with
333  * this call.
334  */
340 
341  /* Check for deficit when required buffers are more than that of pool.*/
342  m0_net_tm_pool_length_set(tm1, 10);
346 
347  /* Check for provisioning when pool is replenished. */
348  pool_not_empty_called = false;
352  M0_UT_ASSERT(rc == 2);
358 
359  /* Initialize another TM with different colour. */
360  rc = m0_net_tm_init(tm2, dom);
361  M0_UT_ASSERT(rc == 0);
362 
363  m0_net_tm_colour_set(tm2, ++tm_colours);
364  M0_UT_ASSERT(tm_colours < POOL_COLOURS);
365  M0_UT_ASSERT(m0_net_tm_colour_get(tm2) == tm_colours);
366  max_recv_msgs = 2;
370  M0_UT_ASSERT(rc == 0);
375 
376  /* TM1 start */
377  m0_clink_init(&tmwait, NULL);
378  m0_clink_add_lock(&tm2->ntm_chan, &tmwait);
380  M0_UT_ASSERT(m0_net_tm_tlist_length(&tm2->ntm_q[M0_NET_QT_MSG_RECV]) ==
381  0);
382  rc = m0_net_tm_start(tm2, "addr2");
383  M0_UT_ASSERT(rc == 0);
386  tm2->ntm_state == M0_NET_TM_STARTED);
387 
388  /* wait on channel until TM state changed to started */
389  m0_chan_wait(&tmwait);
390  m0_clink_del_lock(&tmwait);
394  /* No buffers to initially provision TM1. */
395  M0_UT_ASSERT(m0_net_tm_tlist_length(&tm2->ntm_q[M0_NET_QT_MSG_RECV]) ==
396  0);
399 
400  /* clean up; real xprt would handle this itself */
403 
404  /*
405  * Check for domain wide provisioning when pool is replenished.
406  * As pool is empty make deficit in TM1 to 3.so that both the TM's
407  * are provisioned.
408  */
409  m0_net_tm_pool_length_set(tm1, 13);
413 
414  pool_not_empty_called = false;
418  M0_UT_ASSERT(rc == 6);
422  M0_UT_ASSERT(m0_net_tm_tlist_length(&tm2->ntm_q[M0_NET_QT_MSG_RECV]) ==
427 
428  /*
429  * To test use case "buffer colour correctness during auto
430  * provisioning".
431  * - Buffer in TM1 having colour1 will be returned to the empty
432  * pool when TM1 buffer is deleted.
433  * - Also delete a buffer in TM2 so that pool contain buffers of both
434  * colours.
435  * - make sure buffer pool will have more than one buffer, otherwise
436  * TM2 may get TM1 colured buffer.
437  * - Check that corresponding coloured buffers are provisioned to TM1
438  * and TM2.
439  */
440  /* Add some uncoloured buffers. */
444  M0_UT_ASSERT(rc == 2);
445 
446  M0_UT_ASSERT(m0_net_tm_tlist_length(
447  &pool_prov->nbp_colours[tm1->ntm_pool_colour]) == 0);
448  M0_UT_ASSERT(m0_net_tm_tlist_length(
449  &pool_prov->nbp_colours[tm2->ntm_pool_colour]) == 0);
450  /*
451  * Adds a colured buffer into the pool and checks for it's presence in
452  * corresponding coloured list of the pool.
453  */
454  nb_tm1 = pool_colour_buffer_add(tm1);
455  nb_tm2 = pool_colour_buffer_add(tm2);
456  M0_UT_ASSERT(m0_net_tm_tlist_length(
457  &pool_prov->nbp_colours[tm2->ntm_pool_colour]) == 1);
458  M0_UT_ASSERT(m0_net_tm_tlist_length(
459  &pool_prov->nbp_colours[tm1->ntm_pool_colour]) == 1);
460 
461  /* Add some uncoloured buffers. */
465  M0_UT_ASSERT(rc == 2);
466 
467  /*
468  * Provisions coloured buffer from the pool and checks it's correctness
469  * in receive queue of corresponding TM regardless of the order of
470  * provisioning.
471  */
474 
475  M0_UT_ASSERT(m0_net_tm_tlist_length(
476  &pool_prov->nbp_colours[tm1->ntm_pool_colour]) == 0);
477  M0_UT_ASSERT(m0_net_tm_tlist_length(
478  &pool_prov->nbp_colours[tm2->ntm_pool_colour]) == 0);
479 
480  nb_tm1 = pool_colour_buffer_add(tm1);
481  nb_tm2 = pool_colour_buffer_add(tm2);
482  M0_UT_ASSERT(m0_net_tm_tlist_length(
483  &pool_prov->nbp_colours[tm1->ntm_pool_colour]) == 1);
484  M0_UT_ASSERT(m0_net_tm_tlist_length(
485  &pool_prov->nbp_colours[tm1->ntm_pool_colour]) == 1);
486 
489 
490  /*
491  * When TM stop is called it returns buffers in TM receive queue to
492  * the pool in ut_prov_msg_recv_cb. As pool is empty, adding buffers
493  * to it will trigger m0_net_buffer_pool_not_empty cb which does the
494  * domain wide re-provisioning based on deficit value.
495  *
496  * To test use case "return a buffer to the pool and trigger
497  * re-provisioning", do the following.
498  * - Create some deficit in TM2.
499  * - Stop the TM1
500  * As a result of this buffers used in TM1 will be returned to
501  * the empty pool and will be used to provision TM2.
502  */
503  pool_not_empty_called = false;
505  /* Create deficit of 10 buffers in TM2. */
506  m0_net_tm_pool_length_set(tm2, 15);
508 
509  /* TM1 stop and fini */
510  ut_tm_stop_fini(tm1);
512  /* Check whether all buffers are returned to the pool. */
517  /* TM2 is provisioned with buffers of TM1 returned to the pool. */
519 
520  /* TM2 stop and fini */
521  ut_tm_stop_fini(tm2);
522  /* Check whether all buffers are returned to the pool. */
525 
526  m0_clink_fini(&tmwait);
527  /* Finalize the buffer pool. */
530 
531  /* fini the domain */
532  ut_dom_fini_called = false;
535  ut_dom_init_called = false;
536 }
537 
539  .ts_name = "net-prov-ut",
540  .ts_init = NULL,
541  .ts_fini = NULL,
542  .ts_tests = {
543  { "net_prov", test_net_tm_prov},
544  { NULL, NULL }
545  }
546 };
547 M0_EXPORTED(m0_net_tm_prov_ut);
M0_INTERNAL m0_bcount_t m0_net_domain_get_max_buffer_segment_size(struct m0_net_domain *dom)
static int ut_tm_prov_event_cb_calls
M0_INTERNAL void m0_net_buffer_pool_fini(struct m0_net_buffer_pool *pool)
Definition: buffer_pool.c:154
m0_bcount_t ntm_recv_queue_min_recv_size
Definition: net.h:927
M0_INTERNAL void m0_chan_wait(struct m0_clink *link)
Definition: chan.c:336
struct m0_net_transfer_mc * nb_tm
Definition: net.h:1357
uint32_t ntm_recv_queue_min_length
Definition: net.h:908
M0_INTERNAL void m0_mutex_unlock(struct m0_mutex *mutex)
Definition: mutex.c:66
void m0_net_domain_fini(struct m0_net_domain *dom)
Definition: domain.c:71
M0_INTERNAL int m0_net_tm_start(struct m0_net_transfer_mc *tm, const char *addr)
Definition: tm.c:261
struct m0_net_buffer_pool * nb_pool
Definition: net.h:1508
static int ut_tm_prov_stop(struct m0_net_transfer_mc *tm, bool cancel)
#define NULL
Definition: misc.h:38
M0_INTERNAL void m0_clink_init(struct m0_clink *link, m0_chan_cb_t cb)
Definition: chan.c:201
M0_INTERNAL void m0_clink_del(struct m0_clink *link)
Definition: chan.c:267
M0_INTERNAL void m0_clink_del_lock(struct m0_clink *link)
Definition: chan.c:293
int m0_thread_join(struct m0_thread *q)
Definition: kthread.c:169
static void ut_passive_bulk_recv_cb(const struct m0_net_buffer_event *ev)
Definition: bulk_if.c:554
static struct m0_net_buffer * pool_colour_buffer_add(struct m0_net_transfer_mc *tm)
void ut_tm_prov_event_cb(const struct m0_net_tm_event *ev)
static bool ut_end_point_create_called
Definition: bulk_if.c:62
M0_INTERNAL bool m0_mutex_is_not_locked(const struct m0_mutex *mutex)
Definition: mutex.c:101
M0_INTERNAL void m0_net_buffer_pool_unlock(struct m0_net_buffer_pool *pool)
Definition: buffer_pool.c:203
static void ut_active_bulk_recv_cb(const struct m0_net_buffer_event *ev)
Definition: bulk_if.c:564
const struct m0_net_buffer_pool_ops * nbp_ops
Definition: buffer_pool.h:263
enum m0_net_tm_state ntm_state
Definition: net.h:819
static int max_recv_msgs
struct m0_thread ut_del_thread
Definition: bulk_if.c:272
struct m0_tl * nbp_colours
Definition: buffer_pool.h:273
uint64_t nb_flags
Definition: net.h:1489
static void ut_active_bulk_send_cb(const struct m0_net_buffer_event *ev)
Definition: bulk_if.c:569
uint64_t m0_bcount_t
Definition: types.h:77
M0_INTERNAL bool m0_net_buffer_del(struct m0_net_buffer *buf, struct m0_net_transfer_mc *tm)
Definition: buf.c:261
M0_INTERNAL void m0_mutex_lock(struct m0_mutex *mutex)
Definition: mutex.c:49
static bool ut_tm_prov_stop_called
Definition: ut.h:77
struct m0_net_buffer * nbe_buffer
Definition: net.h:1194
M0_INTERNAL int m0_net_tm_pool_attach(struct m0_net_transfer_mc *tm, struct m0_net_buffer_pool *bufpool, const struct m0_net_buffer_callbacks *callbacks, m0_bcount_t min_recv_size, uint32_t max_recv_msgs, uint32_t min_recv_queue_len)
Definition: tm.c:459
M0_INTERNAL void m0_net_domain_buffer_pool_not_empty(struct m0_net_buffer_pool *pool)
Definition: tm_provision.c:484
static struct m0_net_buffer_pool * pool_prov
static struct m0_net_tm_callbacks ut_tm_prov_cb
#define m0_tl_endfor
Definition: tlist.h:700
static struct @424 ut_xprt_pvt
const struct m0_net_buffer_callbacks * ntm_recv_pool_callbacks
Definition: net.h:902
struct m0_chan ntm_chan
Definition: net.h:874
static void test_net_tm_prov(void)
M0_INTERNAL int m0_net_tm_init(struct m0_net_transfer_mc *tm, struct m0_net_domain *dom)
Definition: tm.c:160
static void ut_msg_send_cb(const struct m0_net_buffer_event *ev)
Definition: bulk_if.c:549
uint32_t ntm_recv_queue_max_recv_msgs
Definition: net.h:933
#define M0_ASSERT(cond)
M0_INTERNAL void m0_net_tm_fini(struct m0_net_transfer_mc *tm)
Definition: tm.c:204
static struct m0_net_xprt_ops ut_xprt_ops
Definition: bulk_if.c:425
M0_INTERNAL bool m0_mutex_is_locked(const struct m0_mutex *mutex)
Definition: mutex.c:95
static struct m0_net_domain ut_prov_dom
void(* ntc_event_cb)(const struct m0_net_tm_event *ev)
Definition: net.h:752
M0_INTERNAL void m0_net_tm_pool_length_set(struct m0_net_transfer_mc *tm, uint32_t len)
Definition: tm.c:490
static bool pool_not_empty_called
struct m0_atomic64 ref_cnt
Definition: refs.h:38
static bool ut_dom_init_called
Definition: bulk_if.c:57
static int ut_tm_stop(struct m0_net_transfer_mc *tm, bool cancel)
Definition: bulk_if.c:371
Definition: tlist.h:251
void m0_thread_fini(struct m0_thread *q)
Definition: thread.c:92
static struct m0_stob_domain * dom
Definition: storage.c:38
void(* nbpo_not_empty)(struct m0_net_buffer_pool *)
Definition: buffer_pool.h:150
struct m0_atomic64 ntm_recv_queue_deficit
Definition: net.h:914
static bool ut_dom_fini_called
Definition: bulk_if.c:58
M0_INTERNAL void m0_net_buffer_pool_lock(struct m0_net_buffer_pool *pool)
Definition: buffer_pool.c:186
static bool ut_tm_init_called
Definition: bulk_if.c:70
struct m0_list_link ntm_dom_linkage
Definition: net.h:883
static const struct m0_net_buffer_callbacks ut_buf_prov_cb
M0_INTERNAL bool m0_list_contains(const struct m0_list *list, const struct m0_list_link *link)
Definition: list.c:87
static bool ut_buf_del_called
Definition: bulk_if.c:68
static struct m0_thread ut_tm_thread
Definition: bulk_if.c:312
M0_INTERNAL m0_bcount_t m0_net_domain_get_max_buffer_size(struct m0_net_domain *dom)
m0_net_buffer_cb_proc_t nbc_cb[M0_NET_QT_NR]
Definition: net.h:1272
static struct m0_net_transfer_mc ut_prov_tm2
struct m0_tl ntm_q[M0_NET_QT_NR]
Definition: net.h:877
M0_INTERNAL void m0_net_tm_colour_set(struct m0_net_transfer_mc *tm, uint32_t colour)
Definition: tm.c:436
M0_INTERNAL int m0_net_tm_stop(struct m0_net_transfer_mc *tm, bool abort)
Definition: tm.c:293
struct m0_ref nep_ref
Definition: net.h:491
static struct m0_net_transfer_mc ut_prov_tm1
static int64_t m0_atomic64_get(const struct m0_atomic64 *a)
void m0_clink_add_lock(struct m0_chan *chan, struct m0_clink *link)
Definition: chan.c:255
const char * ts_name
Definition: ut.h:99
static bool ut_tm_start_called
Definition: bulk_if.c:72
int m0_net_domain_init(struct m0_net_domain *dom, const struct m0_net_xprt *xprt)
Definition: domain.c:36
#define M0_CNT_INC(cnt)
Definition: arith.h:226
struct m0_net_end_point * ntm_ep
Definition: net.h:868
M0_INTERNAL uint32_t m0_net_tm_colour_get(struct m0_net_transfer_mc *tm)
Definition: tm.c:448
#define M0_ALLOC_PTR(ptr)
Definition: memory.h:86
M0_INTERNAL void m0_clink_add(struct m0_chan *chan, struct m0_clink *link)
Definition: chan.c:228
static const struct m0_net_buffer_pool_ops b_ops
M0_INTERNAL int m0_net_buffer_pool_provision(struct m0_net_buffer_pool *pool, uint32_t buf_nr)
Definition: buffer_pool.c:125
M0_INTERNAL void m0_clink_fini(struct m0_clink *link)
Definition: chan.c:208
static void ut_prov_msg_recv_cb(const struct m0_net_buffer_event *ev)
M0_INTERNAL int32_t m0_net_domain_get_max_buffer_segments(struct m0_net_domain *dom)
static void ut_tm_stop_fini(struct m0_net_transfer_mc *tm)
M0_INTERNAL void m0_net_buffer_pool_put(struct m0_net_buffer_pool *pool, struct m0_net_buffer *buf, uint32_t colour)
Definition: buffer_pool.c:243
static void ut_passive_bulk_send_cb(const struct m0_net_buffer_event *ev)
Definition: bulk_if.c:559
static uint32_t buf_size
Definition: ad.c:75
static struct bulkio_params * bp
Definition: bulkio_ut.c:44
uint32_t ntm_pool_colour
Definition: net.h:921
static struct m0_net_xprt ut_xprt
Definition: bulk_if.c:448
int(* xo_tm_stop)(struct m0_net_transfer_mc *tm, bool cancel)
Definition: net.h:206
static void provision_buffer_validate_colour(struct m0_net_buffer *nb, struct m0_net_transfer_mc *tm)
#define m0_tl_for(name, head, obj)
Definition: tlist.h:695
void m0_free(void *data)
Definition: memory.c:146
struct m0_ut_suite m0_net_tm_prov_ut
M0_INTERNAL int m0_net_buffer_pool_init(struct m0_net_buffer_pool *pool, struct m0_net_domain *ndom, uint32_t threshold, uint32_t seg_nr, m0_bcount_t seg_size, uint32_t colours, unsigned shift, bool dont_dump)
Definition: buffer_pool.c:82
int32_t rc
Definition: trigger_fop.h:47
const struct m0_net_tm_callbacks * ntm_callbacks
Definition: net.h:816
#define M0_UT_ASSERT(a)
Definition: ut.h:46
static void ut_pool_low(struct m0_net_buffer_pool *bp)
static void ut_pool_not_empty(struct m0_net_buffer_pool *bp)
struct m0_net_transfer_mc * nte_tm
Definition: net.h:696
struct m0_net_buffer_pool * ntm_recv_pool
Definition: net.h:896