Motr  M0
bulkio_ut.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2013-2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 
23 #include <sys/stat.h>
24 #include <sys/types.h>
25 
26 #include "lib/processor.h"
27 #include "lib/locality.h"
28 #include "lib/finject.h"
29 #include "ut/ut.h"
30 #include "bulkio_common.h"
31 #include "net/lnet/lnet.h"
32 #include "rpc/rpclib.h"
33 #include "ioservice/io_fops.c" /* To access static APIs. */
34 #include "ioservice/io_foms.c" /* To access static APIs. */
35 #include "motr/setup.h"
36 #include "motr/setup_internal.h" /* m0_motr_conf_setup */
37 #include "pool/pool.h"
38 #include "fop/fom_generic.c"
39 
40 #define M0_TRACE_SUBSYSTEM M0_TRACE_SUBSYS_IOSERVICE
41 
42 static bool bulkio_stob_created = false;
43 
44 static struct bulkio_params *bp;
45 
47 static bool fol_check_enabled = false;
48 extern void bulkioapi_test(void);
49 static int io_fop_server_write_fom_create(struct m0_fop *fop,
50  struct m0_fom **m,
51  struct m0_reqh *reqh);
52 static int ut_io_fom_cob_rw_create(struct m0_fop *fop, struct m0_fom **m,
53  struct m0_reqh *reqh);
54 static int io_fop_server_read_fom_create(struct m0_fop *fop, struct m0_fom **m,
55  struct m0_reqh *reqh);
56 static int io_fop_stob_create_fom_create(struct m0_fop *fop, struct m0_fom **m,
57  struct m0_reqh *reqh);
58 static int check_write_fom_tick(struct m0_fom *fom);
59 static int check_read_fom_tick(struct m0_fom *fom);
60 
61 static const struct m0_fop_type_ops bulkio_stob_create_ops = {
63  .fto_io_coalesce = io_fop_coalesce,
64  .fto_io_desc_get = io_fop_desc_get,
65 };
66 
69  .fto_io_coalesce = io_fop_coalesce,
70  .fto_io_desc_get = io_fop_desc_get,
71 };
72 
75  .fto_io_coalesce = io_fop_coalesce,
76  .fto_io_desc_get = io_fop_desc_get,
77 };
78 
81 };
82 
85 };
86 
89 };
90 
93 };
94 
95 static void bulkio_stob_fom_fini(struct m0_fom *fom)
96 {
97  struct m0_io_fom_cob_rw *fom_obj;
98 
99  fom_obj = container_of(fom, struct m0_io_fom_cob_rw, fcrw_gen);
100  m0_stob_put(fom_obj->fcrw_stob);
101  m0_fom_fini(fom);
102  m0_free(fom);
103 }
104 
106 {
107  struct m0_reqh_io_service *serv_obj;
108  struct m0_rios_buffer_pool *bpdesc = NULL;
109  struct m0_net_domain *fop_ndom = NULL;
110  struct m0_fop *fop = NULL;
111 
112  fop = fom->fo_fop;
113  serv_obj = container_of(fom->fo_service,
114  struct m0_reqh_io_service, rios_gen);
115 
116  /* Get network buffer pool for network domain */
117  fop_ndom = m0_fop_domain_get(fop);
118 
119  bpdesc = m0_tl_find(bufferpools, bpdesc, &serv_obj->rios_buffer_pools,
120  bpdesc->rios_ndom == fop_ndom);
121 
122  return bpdesc == NULL ? NULL : &bpdesc->rios_bp;
123 }
124 
125 
126 /*
127  * - This is positive test case to test m0_io_fom_cob_rw_tick(fom).
128  * - This function test next phase after every defined phase for Write FOM.
129  * - Validation of next phase is done as per state transition in detail design.
130  * @see DLD-bulk-server-lspec-state
131  */
133 {
134  int rc;
135  int phase0;
136 
137  phase0 = m0_fom_phase(fom);
138  M0_LOG(M0_DEBUG, "phase=%d", phase0);
140  if (fol_check_enabled) {
141  if (phase0 == M0_FOPH_FOL_REC_ADD) {
142  int rc;
143 
144  if (payload_buf.b_addr != NULL)
147  &fom->fo_tx.tx_betx.t_payload);
148  M0_UT_ASSERT(rc == 0);
149  }
150  }
151  if (m0_fom_rc(fom) != 0) {
153  return rc;
154  }
155  switch (phase0) {
160  break;
163  break;
166  break;
169  break;
172  break;
176  M0_FOPH_SUCCESS)));
177  break;
178  }
179  return rc;
180 }
181 
182 /*
183  * - This is positive test case to test m0_io_fom_cob_rw_tick(fom).
184  * - This function test next phase after every defined phase for Read FOM.
185  * - Validation of next phase is done as per state transition in detail design.
186  * @see DLD-bulk-server-lspec-state
187  */
189 {
190  int rc;
191  int phase0;
192 
193  phase0 = m0_fom_phase(fom);
195  if (m0_fom_rc(fom) != 0) {
197  return rc;
198  }
199  switch (phase0) {
204  break;
207  break;
210  break;
213  break;
216  break;
220  M0_FOPH_SUCCESS)));
221  break;
222  }
223  return rc;
224 }
225 
226 /*
227  * This function intercepts actual I/O FOM state,
228  * for state transition testing.
229  *
230  * This ut FOM work with real fop send by bulk client.
231  * - Client first send write fop
232  * - Fops at server side are intercepted by this dummy state function and
233  checks all possible state transitions.
234  * - It simulates failure environment for particular state and restore
235  * it again after each test.
236  * - After reply fop is received by client, client sends a read fop to read
237  * data written by previous write fop.
238  * - Further it will checks remaining state transitions.
239  * - After reply fop is received by client, at client side received data is
240  * compared with the original data used to send it.
241  */
242 static int ut_io_fom_cob_rw_state(struct m0_fom *fom)
243 {
244  return m0_is_read_fop(fom->fo_fop) ?
246 }
247 
257 };
258 
259 static int nb_nr = 0;
260 static struct m0_net_buffer *nb_list[64];
262 static int next_write_test = TEST00;
263 static int next_read_test = TEST00;
264 
265 static void empty_buffers_pool(uint32_t colour)
266 {
267  nb_nr--;
269  do {
271  } while (nb_list[nb_nr] != NULL);
273 }
274 
275 static void release_one_buffer(uint32_t colour)
276 {
280 }
281 
282 static void fill_buffers_pool(uint32_t colour)
283 {
285  while (nb_nr > 0)
288 }
289 
290 static void builkio_ut_stob_get(struct m0_io_fom_cob_rw *fom_obj)
291 {
292  struct m0_storage_devs *devs = m0_cs_storage_devs_get();
293  struct m0_storage_dev *dev;
294  struct m0_stob_domain *dom;
295 
296  M0_UT_ASSERT(devs != NULL);
297  M0_UT_ASSERT(fom_obj->fcrw_stob != NULL);
298  m0_stob_get(fom_obj->fcrw_stob);
299  dom = m0_stob_dom_get(fom_obj->fcrw_stob);
300  M0_UT_ASSERT(dom != NULL);
301  m0_storage_devs_lock(devs);
302  dev = m0_storage_devs_find_by_dom(devs, dom);
303  M0_UT_ASSERT(dev != NULL);
304  M0_LOG(M0_DEBUG, "get: dev=%p, ref=%" PRIi64
305  "state=%d type=%d, %"PRIu64,
306  dev,
307  m0_ref_read(&dev->isd_ref),
308  dev->isd_ha_state,
309  dev->isd_srv_type,
310  dev->isd_cid);
311  m0_storage_dev_get(dev);
313 }
314 
315 static void fom_phase_set(struct m0_fom *fom, int phase)
316 {
317  if (m0_fom_phase(fom) == M0_FOPH_FAILURE) {
318  const struct fom_phase_desc *fpd_phase;
319 
320  while (m0_fom_phase(fom) != M0_FOPH_FINISH) {
321  fpd_phase = &fpd_table[m0_fom_phase(fom)];
322  m0_fom_phase_set(fom, fpd_phase->fpd_nextphase);
323  }
324 
325  m0_sm_fini(&fom->fo_sm_phase);
326  M0_SET0(&fom->fo_sm_phase);
327  m0_sm_init(&fom->fo_sm_phase, &fom->fo_type->ft_conf,
328  M0_FOM_PHASE_INIT, &fom->fo_loc->fl_group);
329 
331  fpd_phase = &fpd_table[m0_fom_phase(fom)];
332  m0_fom_phase_set(fom, fpd_phase->fpd_nextphase);
333  }
334  }
335 
336  while (phase != m0_fom_phase(fom)) {
338 
339  st = m0_is_read_fop(fom->fo_fop) ?
342 
343  if (M0_IN(phase, (st.fcrw_st_next_phase_again,
344  st.fcrw_st_next_phase_wait))) {
345  m0_fom_phase_set(fom, phase);
346  break;
347  }
348 
349  m0_fom_phase_set(fom, st.fcrw_st_next_phase_again != 0 ?
350  st.fcrw_st_next_phase_again :
351  st.fcrw_st_next_phase_wait);
352  }
353 }
354 
355 /*
356  * - This function tests next phase after every defined phase for Write FOM.
357  * - Validation of next phase is done as per state transition in detail design.
358  * @see DLD-bulk-server-lspec-state
359  * - This test covers all positive as well as negative cases.
360  * Note : For each test case it does following things,
361  * - simulates the environment,
362  * - run state function for respective I/O FOM,
363  * - check output state & return code,
364  * - restores the FOM to it's clean state by using the saved original data.
365  */
366 static int check_write_fom_tick(struct m0_fom *fom)
367 {
368  int rc;
369  uint32_t colour;
370  int acquired_net_bufs;
371  int saved_ndesc;
372  struct m0_fop_cob_rw *rwfop;
373  struct m0_fop *fop;
374  struct m0_io_fom_cob_rw *fom_obj;
375  struct m0_fid saved_fid;
376  struct m0_fid invalid_fid;
377  struct m0_stob_io_desc *saved_stobio_desc;
378  struct m0_stob *saved_stob;
379  int saved_count;
380  struct m0_net_domain *netdom;
381  struct m0_net_transfer_mc *tm;
382 
383  fom_obj = container_of(fom, struct m0_io_fom_cob_rw, fcrw_gen);
384  fop = fom->fo_fop;
385  rwfop = io_rw_get(fop);
386 
387  tm = m0_fop_tm_get(fop);
388  colour = m0_net_tm_colour_get(tm);
389 
390  if (m0_fom_phase(fom) < M0_FOPH_NR) {
391  /*
392  * No need to test generic phases.
393  */
395  if (next_write_test <= TEST00)
397  } else if (next_write_test == TEST00) {
400  } else if (next_write_test == TEST01) {
401  /* Acquire all buffer pool buffer test some of cases. */
402  if (fom_obj->fcrw_bp == NULL)
404  else
405  buf_pool = fom_obj->fcrw_bp;
407 
408  empty_buffers_pool(colour);
409 
410  /*
411  * Case 01: No network buffer is available with the buffer pool.
412  * Input phase : M0_FOPH_IO_FOM_BUFFER_ACQUIRE
413  * Expected Output phase: M0_FOPH_IO_FOM_BUFFER_WAIT
414  */
416  M0_UT_ASSERT(m0_fom_rc(fom) == 0 &&
417  rc == M0_FSO_WAIT &&
419 
420  /* Cleanup & make clean FOM for next test. */
421  rc = M0_FSO_WAIT;
422  fom->fo_sm_phase.sm_rc = 0;
423 
424  release_one_buffer(colour);
426  } else if (next_write_test == TEST02) {
427  /*
428  * Case 02: No network buffer is available with the buffer pool.
429  * Even after getting buffer pool not-empty event,
430  * buffers are not available in pool (which could be
431  * used by other FOMs in the server).
432  * Input phase : M0_FOPH_IO_FOM_BUFFER_WAIT
433  * Expected Output phase: M0_FOPH_IO_FOM_BUFFER_WAIT
434  */
435 
436  empty_buffers_pool(colour);
437 
439  M0_UT_ASSERT(m0_fom_rc(fom) == 0 &&
440  rc == M0_FSO_WAIT &&
442 
443  /* Cleanup & rstore FOM for next test. */
444  rc = M0_FSO_WAIT;
445 
446  release_one_buffer(colour);
448  } else if (next_write_test == TEST03) {
449  int cdi = fom_obj->fcrw_curr_desc_index;
450 
451  /*
452  * Case 03 : Network buffer is available with the buffer pool.
453  * Input phase : M0_FOPH_IO_FOM_BUFFER_ACQUIRE
454  * Expected Output phase: M0_FOPH_IO_ZERO_COPY_INIT
455  */
457 
459  M0_UT_ASSERT(m0_fom_rc(fom) == 0 &&
460  rc == M0_FSO_AGAIN &&
462 
463  /*
464  * Cleanup & restore FOM for next test.
465  * Since previous case successfully acquired network buffer
466  * and now buffer pool not having any network buffer, this
467  * buffer need to return back to the buffer pool.
468  */
469  acquired_net_bufs =
470  netbufs_tlist_length(&fom_obj->fcrw_netbuf_list);
472  while (acquired_net_bufs > 0) {
473  struct m0_net_buffer *nb;
474 
475  nb = netbufs_tlist_tail(&fom_obj->fcrw_netbuf_list);
476  m0_net_buffer_pool_put(fom_obj->fcrw_bp, nb, colour);
477  netbufs_tlink_del_fini(nb);
478  acquired_net_bufs--;
479  }
481  fom_obj->fcrw_batch_size = 0;
482 
483  /*
484  * Case 04 : Network buffer is available with the buffer pool.
485  * Input phase : M0_FOPH_IO_FOM_BUFFER_WAIT
486  * Expected Output phase: M0_FOPH_IO_ZERO_COPY_INIT
487  */
489 
491  M0_UT_ASSERT(m0_fom_rc(fom) == 0 &&
492  rc == M0_FSO_AGAIN &&
494  /*
495  * No need to cleanup here, since FOM will be transitioned to
496  * expected phase.
497  */
498 
499  /*
500  * Case 05 : Zero-copy failure
501  * Input phase : M0_FOPH_IO_ZERO_COPY_INIT
502  * Expected Output phase: M0_FOPH_FAILURE
503  */
504 
505  /*
506  * Modify net buffer used count in fop (value greater than
507  * net domain max), so that zero-copy initialisation fails.
508  */
509  saved_count = rwfop->crw_desc.id_descs[cdi].bdd_used;
510  netdom = m0_fop_domain_get(fop);
511  rwfop->crw_desc.id_descs[cdi].bdd_used =
512  m0_net_domain_get_max_buffer_size(netdom) + 4096;
513 
515  builkio_ut_stob_get(fom_obj);
516 
517  m0_fi_enable_once("zero_copy_initiate", "keep-net-buffers");
519  M0_UT_ASSERT(m0_fom_rc(fom) != 0 &&
520  rc == M0_FSO_AGAIN &&
522 
523  /* Cleanup & restore FOM for next test. */
524  rwfop->crw_desc.id_descs[cdi].bdd_used = saved_count;
525 
526  /*
527  * Case 06 : Zero-copy success
528  * Input phase : M0_FOPH_IO_ZERO_COPY_INIT
529  * Expected Output phase: M0_FOPH_IO_ZERO_COPY_WAIT
530  */
531  /*
532  * To bypass request handler need to change FOM callback
533  * function which wakeup FOM from wait.
534  */
536 
538  M0_UT_ASSERT(m0_fom_rc(fom) == 0 &&
539  rc == M0_FSO_WAIT &&
542  } else if (next_write_test == TEST07) {
543  /*
544  * Case 07 : Zero-copy failure
545  * Input phase : M0_FOPH_IO_ZERO_COPY_WAIT
546  * Expected Output phase: M0_FOPH_FAILURE
547  */
549  fom_obj->fcrw_bulk.rb_rc = -1;
550  builkio_ut_stob_get(fom_obj);
551 
552  m0_fi_enable_once("zero_copy_finish", "keep-net-buffers");
554  M0_UT_ASSERT(m0_fom_rc(fom) != 0 &&
555  rc == M0_FSO_AGAIN &&
557 
558  /* Cleanup & make clean FOM for next test. */
559  fom_obj->fcrw_bulk.rb_rc = 0;
560 
561  /*
562  * Case 08 : Zero-copy success from wait state.
563  * Input phase : M0_FOPH_IO_ZERO_COPY_WAIT
564  * Expected Output phase: M0_FOPH_TXN_INIT
565  */
568  M0_UT_ASSERT(m0_fom_rc(fom) == 0 &&
569  rc == M0_FSO_AGAIN &&
572  } else if (next_write_test == TEST10) {
573  /*
574  * Case 09 : STOB I/O launch failure
575  * Input phase : M0_FOPH_IO_STOB_INIT
576  * Expected Output phase: M0_FOPH_FAILURE
577  */
578 
579  /* Save original fid and pass invialid fid
580  * to make I/O launch fail. */
581  saved_fid = rwfop->crw_fid;
582  saved_stob = fom_obj->fcrw_stob;
583  m0_fid_set(&invalid_fid, 111, 222);
584  m0_fid_tassume(&invalid_fid, &m0_cob_fid_type);
585  fom_obj->fcrw_stob = NULL;
586  rwfop->crw_fid = invalid_fid;
588 
589  m0_fi_enable_once("io_launch", "keep-net-buffers");
591  M0_UT_ASSERT(m0_fom_rc(fom) != 0 && rc == M0_FSO_AGAIN &&
593 
594  /* Cleanup & make clean FOM for next test. */
595  rwfop->crw_fid = saved_fid;
596  fom_obj->fcrw_stob = saved_stob;
597 
598  /*
599  * Case 10 : STOB I/O launch success
600  * Input phase : M0_FOPH_IO_STOB_INIT
601  * Expected Output phase: M0_FOPH_IO_STOB_WAIT
602  */
603  /*
604  * To bypass request handler need to change FOM callback
605  * function which wakeup FOM from wait.
606  */
608 
610  M0_UT_ASSERT(m0_fom_rc(fom) == 0 && rc == M0_FSO_WAIT &&
612 
614  } else if (next_write_test == TEST11) {
615  /*
616  * Case 11 : STOB I/O failure from wait state.
617  * Input phase : M0_FOPH_IO_STOB_WAIT
618  * Expected Output phase: M0_FOPH_FAILURE
619  */
620 
621  /*
622  * To test this case there is a need to invalidate stobio
623  * descriptor, since io_finish() removes the stobio descriptor
624  * from list.
625  * There is only one stobio descriptor.
626  * Before returning error this phase will do following phases :
627  * - free and remove stobio descriptors in list,
628  * - put stob object
629  * - leave FOM block
630  */
631  saved_stobio_desc = stobio_tlist_pop(&fom_obj->fcrw_stio_list);
632  M0_UT_ASSERT(saved_stobio_desc != NULL);
633 
634  builkio_ut_stob_get(fom_obj);
636  m0_fi_enable_once("io_finish", "fake_error");
637  m0_fi_enable_once("io_finish", "keep-net-buffers");
639  M0_UT_ASSERT(m0_fom_rc(fom) != 0 && rc == M0_FSO_AGAIN &&
641 
642  /*
643  * Cleanup & make clean FOM for next test.
644  * Restore original fom.
645  */
646  stobio_tlist_add(&fom_obj->fcrw_stio_list, saved_stobio_desc);
647 
648  /*
649  * Case 12 : STOB I/O success
650  * Input phase : M0_FOPH_IO_STOB_WAIT
651  * Expected Output phase: M0_FOPH_IO_BUFFER_RELEASE
652  */
654 
656  M0_UT_ASSERT(m0_fom_rc(fom) == 0 &&
657  rc == M0_FSO_AGAIN &&
659 
660  /*
661  * Case 13 : Processing of remaining buffer descriptors.
662  * Input phase : M0_FOPH_IO_BUFFER_RELEASE
663  * Expected Output phase: M0_FOPH_IO_FOM_BUFFER_ACQUIRE
664  */
666 
667  saved_ndesc = fom_obj->fcrw_ndesc;
668  fom_obj->fcrw_ndesc = 2;
669  rwfop->crw_desc.id_nr = 2;
671  M0_UT_ASSERT(m0_fom_rc(fom) == 0 &&
672  rc == M0_FSO_AGAIN &&
673  m0_fom_phase(fom) ==
675 
676  /* Cleanup & make clean FOM for next test. */
677  fom_obj->fcrw_ndesc = saved_ndesc;
678  rwfop->crw_desc.id_nr = saved_ndesc;
679 
680  /*
681  * Case 14 : All buffer descriptors are processed.
682  * Input phase : M0_FOPH_IO_BUFFER_RELEASE
683  * Expected Output phase: M0_FOPH_SUCCESS
684  */
686 
688  M0_UT_ASSERT(m0_fom_rc(fom) == 0 &&
689  rc == M0_FSO_AGAIN &&
691 
692  fill_buffers_pool(colour);
694  } else if (next_write_test == TEST12) {
695  /* @todo XXX Add tests for M0_FOPH_IO_SYNC, M0_IO_FLAG_SYNC. */
698  M0_UT_ASSERT(m0_fom_rc(fom) == 0 &&
699  rc == M0_FSO_AGAIN &&
701  } else {
702  M0_UT_ASSERT(0); /* this should not happen */
703  rc = M0_FSO_WAIT; /* to avoid compiler warning */
704  }
705 
706  return rc;
707 }
708 
709 /*
710  * - This function test next phase after every defined phase for Read FOM.
711  * - Validation of next phase is done as per state transition in detail design.
712  * @see DLD-bulk-server-lspec-state
713  * - This test cover positive as well as negative cases.
714  * Note : For each test case it does following things
715  * - simulate environemnt,
716  * - run state function for respective I/O FOM,
717  * - check output state & return code,
718  * - restores the FOM to it's clean state by using the saved original data.
719  */
720 static int check_read_fom_tick(struct m0_fom *fom)
721 {
722  int rc;
723  uint32_t colour;
724  int acquired_net_bufs;
725  int saved_count;
726  int saved_ndesc;
727  struct m0_fop_cob_rw *rwfop;
728  struct m0_net_domain *netdom;
729  struct m0_fop *fop;
730  struct m0_io_fom_cob_rw *fom_obj;
731  struct m0_fid saved_fid;
732  struct m0_fid invalid_fid;
733  struct m0_stob_io_desc *saved_stobio_desc;
734  struct m0_stob *saved_stob;
735  struct m0_net_transfer_mc *tm;
736 
737  fom_obj = container_of(fom, struct m0_io_fom_cob_rw, fcrw_gen);
738  fop = fom->fo_fop;
739  rwfop = io_rw_get(fop);
740 
741  tm = m0_fop_tm_get(fop);
742  colour = m0_net_tm_colour_get(tm);
743 
744  if (m0_fom_phase(fom) < M0_FOPH_NR) {
745  /*
746  * No need to test generic phases.
747  */
750  } else if (next_read_test == TEST00) {
753  } else if (next_read_test == TEST01) {
754  /* Acquire all buffer pool buffer test some of cases. */
755  if (fom_obj->fcrw_bp == NULL)
757  else
758  buf_pool = fom_obj->fcrw_bp;
760 
761  /* Acquire all buffers from buffer pool to make it empty */
762  empty_buffers_pool(colour);
763 
764  /*
765  * Case 01 : No network buffer is available with buffer pool.
766  * Input phase : M0_FOPH_IO_FOM_BUFFER_ACQUIRE
767  * Expected Output phase: M0_FOPH_IO_FOM_BUFFER_WAIT
768  */
770 
772  M0_UT_ASSERT(m0_fom_rc(fom) == 0 && rc == M0_FSO_WAIT &&
774 
775  /* Cleanup & make clean FOM for next test. */
776  rc = M0_FSO_WAIT;
777 
778  release_one_buffer(colour);
780  } else if (next_read_test == TEST02) {
781  /*
782  * Case 02 : No network buffer is available with buffer pool.
783  * Even after getting buffer pool not-empty event,
784  * buffers are not available in pool (which could be
785  * used by other FOMs in the server).
786  * Input phase : M0_FOPH_IO_FOM_BUFFER_WAIT
787  * Expected Output phase: M0_FOPH_IO_FOM_BUFFER_WAIT
788  */
789 
790  empty_buffers_pool(colour);
791 
793  M0_UT_ASSERT(m0_fom_rc(fom) == 0 &&
794  rc == M0_FSO_WAIT &&
796 
797  /* Cleanup & make clean FOM for next test. */
798  rc = M0_FSO_WAIT;
799 
800  release_one_buffer(colour);
802  } else if (next_read_test == TEST03) {
803  /*
804  * Case 03 : Network buffer is available with the buffer pool.
805  * Input phase : M0_FOPH_IO_FOM_BUFFER_ACQUIRE
806  * Expected Output phase: M0_FOPH_IO_STOB_INIT
807  */
809 
811  M0_UT_ASSERT(m0_fom_rc(fom) == 0 && rc == M0_FSO_AGAIN &&
813 
814  /*
815  * Cleanup & make clean FOM for next test.
816  * Since previous this case successfully acquired network buffer
817  * and now buffer pool not having network buffer, this buffer
818  * need to return back to the buffer pool.
819  */
820  acquired_net_bufs =
821  netbufs_tlist_length(&fom_obj->fcrw_netbuf_list);
823  while (acquired_net_bufs > 0) {
824  struct m0_net_buffer *nb;
825 
826  nb = netbufs_tlist_tail(&fom_obj->fcrw_netbuf_list);
827  m0_net_buffer_pool_put(fom_obj->fcrw_bp, nb, colour);
828  netbufs_tlink_del_fini(nb);
829  acquired_net_bufs--;
830  }
832  fom_obj->fcrw_batch_size = 0;
833 
834  /*
835  * Case 04 : Network buffer available with buffer pool.
836  * Input phase : M0_FOPH_IO_FOM_BUFFER_WAIT
837  * Expected Output phase: M0_FOPH_IO_STOB_INIT
838  */
840 
842  M0_UT_ASSERT(m0_fom_rc(fom) == 0 &&
843  rc == M0_FSO_AGAIN &&
845 
846  /* No need to cleanup here, since FOM will transitioned
847  * to expected phase.
848  */
849 
850  /*
851  * Case 05 : STOB I/O launch failure
852  * Input phase : M0_FOPH_IO_STOB_INIT
853  * Expected Output phase: M0_FOPH_FAILURE
854  */
855 
856  /* Save original fid and pass invalid fid to make I/O launch
857  * fail. */
858  saved_fid = rwfop->crw_fid;
859  saved_stob = fom_obj->fcrw_stob;
860  m0_fid_set(&invalid_fid, 111, 222);
861  m0_fid_tassume(&invalid_fid, &m0_cob_fid_type);
862  fom_obj->fcrw_stob = NULL;
863  rwfop->crw_fid = invalid_fid;
864 
866 
867  m0_fi_enable_once("io_launch", "keep-net-buffers");
869  M0_UT_ASSERT(m0_fom_rc(fom) != 0 &&
870  rc == M0_FSO_AGAIN &&
872 
873  /* Cleanup & make clean FOM for next test. */
874  rwfop->crw_fid = saved_fid;
875  fom_obj->fcrw_stob = saved_stob;
876 
877  /*
878  * Case 06 : STOB I/O launch success
879  * Input phase : M0_FOPH_IO_STOB_INIT
880  * Expected Output phase: M0_FOPH_IO_STOB_WAIT
881  */
882  /*
883  * To bypass request handler need to change FOM callback
884  * function which wakeup FOM from wait.
885  */
887 
889  M0_UT_ASSERT(m0_fom_rc(fom) == 0 &&
890  rc == M0_FSO_WAIT &&
893  } else if (next_read_test == TEST07) {
894  int cdi = fom_obj->fcrw_curr_desc_index;
895 
896  /*
897  * Case 07 : STOB I/O failure
898  * Input phase : M0_FOPH_IO_STOB_WAIT
899  * Expected Output phase: M0_FOPH_FAILURE
900  */
901 
902  /*
903  * To test this case there is a need to invalidate stobio
904  * descriptor, since io_finish() remove stobio descriptor
905  * from list.
906  * There is only one stobio descriptor.
907  * Before returning error this phase will do following phases :
908  * - free and remove stobio descriptors in list,
909  * - put stob object
910  * - leave FOM block
911  */
912  saved_stobio_desc = stobio_tlist_pop(&fom_obj->fcrw_stio_list);
913  M0_UT_ASSERT(saved_stobio_desc != NULL);
914 
916  builkio_ut_stob_get(fom_obj);
917 
918  m0_fi_enable_once("io_finish", "fake_error");
919  m0_fi_enable_once("io_finish", "keep-net-buffers");
921  M0_UT_ASSERT(m0_fom_rc(fom) != 0 &&
922  rc == M0_FSO_AGAIN &&
924 
925  /*
926  * Cleanup & make clean FOM for next test.
927  * Restore original fom.
928  */
929  stobio_tlist_add(&fom_obj->fcrw_stio_list, saved_stobio_desc);
930 
931  /*
932  * Case 08 : STOB I/O success
933  * Input phase : M0_FOPH_IO_STOB_WAIT
934  * Expected Output phase: M0_FOPH_IO_ZERO_COPY_INIT
935  */
937 
939  M0_UT_ASSERT(m0_fom_rc(fom) == 0 &&
940  rc == M0_FSO_AGAIN &&
942  /*
943  * Case 09 : Zero-copy failure
944  * Input phase : M0_FOPH_IO_ZERO_COPY_INIT
945  * Expected Output phase: M0_FOPH_FAILURE
946  */
947 
948  /*
949  * Modify net buffer used count in fop (value greater than
950  * net domain max), so that zero-copy initialisation fails.
951  */
952  saved_count = rwfop->crw_desc.id_descs[cdi].bdd_used;
953  netdom = m0_fop_domain_get(fop);
954  rwfop->crw_desc.id_descs[cdi].bdd_used =
955  m0_net_domain_get_max_buffer_size(netdom) + 4096;
956 
958  builkio_ut_stob_get(fom_obj);
959 
960  m0_fi_enable_once("zero_copy_initiate", "keep-net-buffers");
962  M0_UT_ASSERT(m0_fom_rc(fom) != 0 &&
963  rc == M0_FSO_AGAIN &&
965 
966  /* Cleanup & make clean FOM for next test. */
967  rwfop->crw_desc.id_descs[cdi].bdd_used = saved_count;
969 
970  /*
971  * Case 10 : Zero-copy success
972  * Input phase : M0_FOPH_IO_ZERO_COPY_INIT
973  * Expected Output phase: M0_FOPH_IO_ZERO_COPY_WAIT
974  */
975  /*
976  * To bypass request handler need to change FOM callback
977  * function which wakeup FOM from wait.
978  */
980 
982  M0_UT_ASSERT(m0_fom_rc(fom) == 0 &&
983  rc == M0_FSO_WAIT &&
986  } else if (next_read_test == TEST11) {
987  /*
988  * Case 11 : Zero-copy failure
989  * Input phase : M0_FOPH_IO_ZERO_COPY_WAIT
990  * Expected Output phase: M0_FOPH_FAILURE
991  */
993  fom_obj->fcrw_bulk.rb_rc = -1;
994  builkio_ut_stob_get(fom_obj);
995 
996  m0_fi_enable_once("zero_copy_finish", "keep-net-buffers");
998  M0_UT_ASSERT(m0_fom_rc(fom) != 0 &&
999  rc == M0_FSO_AGAIN &&
1001 
1002  /* Cleanup & make clean FOM for next test. */
1003  fom_obj->fcrw_bulk.rb_rc = 0;
1004 
1005  /*
1006  * Case 12 : Zero-copy success
1007  * Input phase : M0_FOPH_IO_ZERO_COPY_WAIT
1008  * Expected Output phase: M0_FOPH_IO_BUFFER_RELEASE
1009  */
1011 
1013  M0_UT_ASSERT(m0_fom_rc(fom) == 0 &&
1014  rc == M0_FSO_AGAIN &&
1016 
1017 
1018  /*
1019  * Case 13 : Processing of remaining buffer descriptors.
1020  * Input phase : M0_FOPH_IO_BUFFER_RELEASE
1021  * Expected Output phase: M0_FOPH_IO_FOM_BUFFER_ACQUIRE
1022  */
1024 
1025  saved_ndesc = fom_obj->fcrw_ndesc;
1026  fom_obj->fcrw_ndesc = 2;
1027  rwfop->crw_desc.id_nr = 2;
1029  M0_UT_ASSERT(m0_fom_rc(fom) == 0 &&
1030  rc == M0_FSO_AGAIN &&
1031  m0_fom_phase(fom) ==
1033 
1034  /* Cleanup & make clean FOM for next test. */
1035  fom_obj->fcrw_ndesc = saved_ndesc;
1036  rwfop->crw_desc.id_nr = saved_ndesc;
1037 
1038  /*
1039  * Case 14 : All buffer descriptors are processed.
1040  * Input phase : M0_FOPH_IO_BUFFER_RELEASE
1041  * Expected Output phase: M0_FOPH_SUCCESS
1042  */
1044 
1046  M0_UT_ASSERT(m0_fom_rc(fom) == 0 &&
1047  rc == M0_FSO_AGAIN &&
1049 
1050  fill_buffers_pool(colour);
1051  } else {
1052  M0_UT_ASSERT(0); /* this should not happen */
1053  rc = M0_FSO_WAIT; /* to avoid compiler warning */
1054  }
1055 
1056  return rc;
1057 }
1058 
1059 /* It is used to create the stob specified in the fid of each fop. */
1061 {
1062  struct m0_fop_cob_rw *rwfop;
1063  struct m0_stob_domain *fom_stdom;
1064  struct m0_stob_id stob_id;
1065  int rc;
1066  struct m0_fop_cob_writev_rep *wrep;
1067  struct m0_io_fom_cob_rw *fom_obj;
1068  struct m0_fom_cob_op cc;
1069  struct m0_reqh_io_service *ios;
1070  struct m0_cob_attr attr = { {0, } };
1071  struct m0_cob_oikey oikey;
1072  struct m0_cob *cob;
1073 
1075  fom_obj = container_of(fom, struct m0_io_fom_cob_rw, fcrw_gen);
1076  ios = container_of(fom->fo_service, struct m0_reqh_io_service,
1077  rios_gen);
1078 
1079  rwfop = io_rw_get(fom->fo_fop);
1080  m0_fid_convert_cob2stob(&rwfop->crw_fid, &stob_id);
1081  fom_stdom = m0_stob_domain_find_by_stob_id(&stob_id);
1082  M0_UT_ASSERT(fom_stdom != NULL);
1083 
1084  if (m0_fom_phase(fom) < M0_FOPH_NR) {
1085  if (m0_fom_phase(fom) == M0_FOPH_TXN_OPEN) {
1089  }
1090  return m0_fom_tick_generic(fom);
1091  }
1092 
1093  cc.fco_stob_id = stob_id;
1094  cc.fco_gfid = rwfop->crw_gfid;
1095  cc.fco_cfid = rwfop->crw_fid;
1096  cc.fco_cob_idx = (uint32_t) rwfop->crw_gfid.f_key;
1097  cc.fco_cob_type = M0_COB_IO;
1098 
1100  if (!bulkio_stob_created)
1101  M0_UT_ASSERT(rc == 0);
1102  else {
1103  M0_UT_ASSERT(rc == -EEXIST);
1104  }
1105 
1106  rc = m0_stob_find(&stob_id, &fom_obj->fcrw_stob);
1107  M0_UT_ASSERT(rc == 0);
1108  rc = m0_stob_locate(fom_obj->fcrw_stob);
1109  M0_UT_ASSERT(rc == 0);
1110  rc = m0_stob_create(fom_obj->fcrw_stob, &fom->fo_tx, NULL);
1111  if (!bulkio_stob_created)
1112  M0_UT_ASSERT(rc == 0);
1113  else {
1114  M0_UT_ASSERT(rc == -EEXIST);
1115  }
1116 
1117  m0_cob_oikey_make(&oikey, &cc.fco_cfid, 0);
1118  rc = m0_cob_locate(ios->rios_cdom, &oikey, 0, &cob);
1119  M0_UT_ASSERT(rc == 0);
1120 
1121  wrep = m0_fop_data(fom->fo_rep_fop);
1122  wrep->c_rep.rwr_rc = 0;
1123  wrep->c_rep.rwr_count = rwfop->crw_ivec.ci_nr;
1125  return M0_FSO_AGAIN;
1126 }
1127 
1130  .fo_tick = bulkio_stob_create_fom_tick,
1131  .fo_home_locality = m0_io_fom_cob_rw_locality_get
1132 };
1133 
1136  .fo_tick = bulkio_server_write_fom_tick,
1137  .fo_home_locality = m0_io_fom_cob_rw_locality_get
1138 };
1139 
1140 static const struct m0_fom_ops ut_io_fom_cob_rw_ops = {
1142  .fo_tick = ut_io_fom_cob_rw_state,
1143  .fo_home_locality = m0_io_fom_cob_rw_locality_get
1144 };
1145 
1148  .fo_tick = bulkio_server_read_fom_tick,
1149  .fo_home_locality = m0_io_fom_cob_rw_locality_get
1150 };
1151 
1153  struct m0_fom **m,
1154  struct m0_reqh *reqh)
1155 {
1156  int rc;
1157  struct m0_fom *fom;
1158 
1160  M0_UT_ASSERT(rc == 0);
1161  fom->fo_ops = &bulkio_stob_create_fom_ops;
1162  *m = fom;
1163 
1164  return 0;
1165 }
1166 
1168  struct m0_fom **m,
1169  struct m0_reqh *reqh)
1170 {
1171  int rc;
1172  struct m0_fom *fom;
1173 
1175  M0_UT_ASSERT(rc == 0);
1176  fom->fo_ops = &bulkio_server_write_fom_ops;
1177  *m = fom;
1178  M0_UT_ASSERT(fom->fo_fop != 0);
1179  return rc;
1180 }
1181 
1182 /*
1183  * This creates FOM for ut.
1184  */
1185 static int ut_io_fom_cob_rw_create(struct m0_fop *fop, struct m0_fom **m,
1186  struct m0_reqh *reqh)
1187 {
1188  int rc;
1189  struct m0_fom *fom;
1190 
1191  /*
1192  * Case : This tests the I/O FOM create api.
1193  * It use real I/O FOP
1194  */
1196  M0_UT_ASSERT(rc == 0 &&
1197  fom != NULL &&
1198  fom->fo_rep_fop != NULL &&
1199  fom->fo_fop != NULL &&
1200  fom->fo_type != NULL &&
1201  fom->fo_ops != NULL);
1202 
1203  fom->fo_ops = &ut_io_fom_cob_rw_ops;
1204  *m = fom;
1205  M0_UT_ASSERT(fom->fo_fop != 0);
1206  return rc;
1207 }
1208 
1210  struct m0_fom **m,
1211  struct m0_reqh *reqh)
1212 {
1213  int rc;
1214  struct m0_fom *fom;
1215 
1217  M0_UT_ASSERT(rc == 0);
1218  fom->fo_ops = &bulkio_server_read_fom_ops;
1219  *m = fom;
1220  M0_UT_ASSERT(fom->fo_fop != 0);
1221  return rc;
1222 }
1223 
1224 static void bulkio_stob_create(void)
1225 {
1226  struct m0_fop_cob_rw *rw;
1227  enum M0_RPC_OPCODES op;
1228  struct thrd_arg targ[IO_FIDS_NR];
1229  int i;
1230  int rc;
1231 
1235  for (i = 0; i < IO_FIDS_NR; ++i) {
1239  NULL);
1240  M0_UT_ASSERT(rc == 0);
1241  /*
1242  * We replace the original ->ft_ops and ->ft_fom_type for
1243  * regular io_fops. This is reset later.
1244  */
1249  rw = io_rw_get(&bp->bp_wfops[i]->if_fop);
1253  M0_DI_CRC32_4K);
1254  rw->crw_fid = bp->bp_fids[i];
1255  targ[i].ta_index = i;
1256  targ[i].ta_op = op;
1257  targ[i].ta_bp = bp;
1258  io_fops_rpc_submit(&targ[i]);
1259  }
1261  bulkio_stob_created = true;
1262 }
1263 
1264 static void io_fops_submit(uint32_t index, enum M0_RPC_OPCODES op)
1265 {
1266  struct thrd_arg targ = {};
1267 
1268  targ.ta_index = index;
1269  targ.ta_op = op;
1270  targ.ta_bp = bp;
1271  io_fops_rpc_submit(&targ);
1272 }
1273 
1275 {
1276  struct m0_fop *fop;
1277 
1278  io_fops_create(bp, op, 1, 1, bp->bp_seg_nr);
1279  if (op == M0_IOSERVICE_WRITEV_OPCODE) {
1280  fop = &bp->bp_wfops[0]->if_fop;
1282  } else {
1283  fop = &bp->bp_rfops[0]->if_fop;
1285  }
1286  /*
1287  * Here we replace the original ->ft_ops and ->ft_fom_type as they were
1288  * changed during bulkio_stob_create test.
1289  */
1292  io_fops_submit(0, op);
1293 }
1294 
1296 {
1297  int j;
1298  struct m0_bufvec *buf;
1299 
1300  buf = &bp->bp_iobuf[0]->nb_buffer;
1301  for (j = 0; j < bp->bp_seg_nr; ++j) {
1302  memset(buf->ov_buf[j], 'b', IO_SEG_SIZE);
1303  }
1304  fol_check_enabled = true;
1306 
1307  buf = &bp->bp_iobuf[0]->nb_buffer;
1308  for (j = 0; j < bp->bp_seg_nr; ++j) {
1309  memset(buf->ov_buf[j], 'a', IO_SEG_SIZE);
1310  }
1312  fol_check_enabled = false;
1313 }
1314 
1315 #define WRITE_FOP_DATA(fop) M0_XCODE_OBJ(m0_fop_cob_writev_xc, fop)
1316 
1318 {
1319  struct m0_reqh *reqh;
1320  struct m0_fol_rec dec_rec;
1321  int result;
1322  struct m0_fol_frag *dec_frag;
1323  struct m0_fop *fop;
1324  struct m0_fop_cob_writev *wfop;
1325 
1326  fop = &bp->bp_wfops[0]->if_fop;
1327  wfop = (struct m0_fop_cob_writev *)m0_fop_data(fop);
1328 
1330  M0_LOG(M0_DEBUG, "payload_buf=" BUF_F, BUF_P(&payload_buf));
1331  m0_fol_rec_init(&dec_rec, &reqh->rh_fol);
1332  result = m0_fol_rec_decode(&dec_rec, &payload_buf);
1333  M0_UT_ASSERT(result == 0);
1334 
1335  /* FOL record frags are 2 for AD stob type and 1 for LINUX stob type. */
1336  M0_UT_ASSERT(dec_rec.fr_header.rh_frags_nr == 1 ||
1337  dec_rec.fr_header.rh_frags_nr == 2);
1338  m0_tl_for(m0_rec_frag, &dec_rec.fr_frags, dec_frag) {
1339  struct m0_fop_fol_frag *fp_frag = dec_frag->rp_data;
1340 
1341  if (dec_frag->rp_ops->rpo_type->rpt_index ==
1344  struct m0_fop_cob_writev_rep *wfop_rep;
1345 
1347  &WRITE_FOP_DATA(fp_frag->ffrp_fop),
1348  &WRITE_FOP_DATA(wfop)) == 0);
1349  wfop_rep = fp_frag->ffrp_rep;
1350  M0_UT_ASSERT(wfop_rep->c_rep.rwr_rc == 0);
1351  M0_UT_ASSERT(wfop_rep->c_rep.rwr_count > 0);
1352  }
1353  } m0_tl_endfor;
1354 
1355  m0_fol_rec_fini(&dec_rec);
1357 }
1358 
1360 {
1361  int j;
1362  struct m0_bufvec *buf;
1363  struct m0_reqh *reqh;
1364  struct m0_fol_rec dec_rec;
1365  struct m0_dtx dtx;
1366  struct m0_sm_group *grp = m0_locality0_get()->lo_grp;
1367  int result;
1368  struct m0_fol_frag *dec_frag;
1369  struct m0_buf save_buf = M0_BUF_INIT0;
1370  /* m0_get()->i_reqh_uses_ad_stob */
1371  bool stob_ad = m0_cs_storage_devs_get() != NULL;
1372 
1373  if (!stob_ad)
1374  return;
1375 
1376  buf = &bp->bp_iobuf[0]->nb_buffer;
1377  /* Write data "b" in the file. */
1378  for (j = 0; j < bp->bp_seg_nr; ++j) {
1379  memset(buf->ov_buf[j], 'b', IO_SEG_SIZE);
1380  }
1383  result = m0_buf_copy(&save_buf, &payload_buf);
1384  M0_UT_ASSERT(result == 0);
1387 
1388  /* Write data "a" in the file. */
1389  for (j = 0; j < bp->bp_seg_nr; ++j) {
1390  memset(buf->ov_buf[j], 'a', IO_SEG_SIZE);
1391  }
1394 
1395  /* Undo the last write, so that file contains data "b". */
1397  M0_UT_ASSERT(reqh != NULL);
1399 
1400  m0_fol_rec_init(&dec_rec, &reqh->rh_fol);
1401  result = m0_fol_rec_decode(&dec_rec, &save_buf);
1402  M0_UT_ASSERT(result == 0);
1403  m0_buf_free(&save_buf);
1404 
1405  M0_UT_ASSERT(dec_rec.fr_header.rh_frags_nr == 1 ||
1406  dec_rec.fr_header.rh_frags_nr == 2);
1407  m0_tl_for(m0_rec_frag, &dec_rec.fr_frags, dec_frag) {
1408  if (dec_frag->rp_ops->rpo_type->rpt_index ==
1410  struct m0_fop_fol_frag *fp_frag;
1411  struct m0_fop_type *ftype;
1412 
1413  fp_frag = dec_frag->rp_data;
1414  M0_UT_ASSERT(fp_frag->ffrp_fop_code ==
1416 
1417  ftype = m0_fop_type_find(fp_frag->ffrp_fop_code);
1418  M0_UT_ASSERT(ftype != NULL);
1419  M0_UT_ASSERT(ftype->ft_ops->fto_undo != NULL &&
1420  ftype->ft_ops->fto_redo != NULL);
1421  result = ftype->ft_ops->fto_undo(fp_frag,
1422  &reqh->rh_fol);
1423  } else {
1425  M0_SET0(&dtx);
1427  dec_frag->rp_ops->rpo_undo_credit(dec_frag,
1428  &dtx.tx_betx_cred);
1429  m0_dtx_open_sync(&dtx);
1430  result = dec_frag->rp_ops->rpo_undo(dec_frag,
1431  &dtx.tx_betx);
1432  m0_dtx_done_sync(&dtx);
1433  m0_dtx_fini(&dtx);
1435  }
1436  M0_UT_ASSERT(result == 0);
1437  } m0_tl_endfor;
1438  m0_fol_rec_fini(&dec_rec);
1439 
1440  /* Read that data from file and compare it with data "b". */
1444 }
1445 
1446 /*
1447  * Sends regular write and read io fops, although replaces the original FOM
1448  * types in each io fop type with UT specific FOM types.
1449  */
1451 {
1452  int j;
1453  enum M0_RPC_OPCODES op;
1454  struct m0_bufvec *buf;
1455  struct m0_reqh *reqh;
1456 
1457  buf = &bp->bp_iobuf[0]->nb_buffer;
1458  for (j = 0; j < bp->bp_seg_nr; ++j) {
1459  memset(buf->ov_buf[j], 'b', IO_SEG_SIZE);
1460  }
1462  io_fops_create(bp, op, 1, 1, bp->bp_seg_nr);
1465  bp->bp_wfops[0]->if_fop.f_type->ft_ops =
1467  io_fops_submit(0, op);
1471 
1472  buf = &bp->bp_iobuf[0]->nb_buffer;
1473  for (j = 0; j < bp->bp_seg_nr; ++j) {
1474  memset(buf->ov_buf[j], 'a', IO_SEG_SIZE);
1475  }
1477  io_fops_create(bp, op, 1, 1, bp->bp_seg_nr);
1480  bp->bp_rfops[0]->if_fop.f_type->ft_ops =
1482  io_fops_submit(0, op);
1485 }
1486 
1487 /*
1488  * Sends regular write and read fops although replaces the original FOM types
1489  * in each io fop type with UT specific FOM types to check state transition for
1490  * I/O FOM.
1491  */
1493 {
1494  int j;
1495  enum M0_RPC_OPCODES op;
1496  struct m0_bufvec *buf;
1497  struct m0_reqh *reqh;
1498 
1499  buf = &bp->bp_iobuf[0]->nb_buffer;
1500  for (j = 0; j < bp->bp_seg_nr; ++j) {
1501  memset(buf->ov_buf[j], 'b', IO_SEG_SIZE);
1502  }
1504  io_fops_create(bp, op, 1, 1, bp->bp_seg_nr);
1507  bp->bp_wfops[0]->if_fop.f_type->ft_ops =
1509  io_fops_submit(0, op);
1513 
1514  buf = &bp->bp_iobuf[0]->nb_buffer;
1515  for (j = 0; j < bp->bp_seg_nr; ++j) {
1516  memset(buf->ov_buf[j], 'a', IO_SEG_SIZE);
1517  }
1519  io_fops_create(bp, op, 1, 1, bp->bp_seg_nr);
1522  bp->bp_rfops[0]->if_fop.f_type->ft_ops =
1524  io_fops_submit(0, op);
1527 }
1528 
1536 {
1537  int i;
1538  int j;
1539  enum M0_RPC_OPCODES op;
1540  struct thrd_arg targ[IO_FOPS_NR];
1541  struct m0_bufvec *buf;
1542  struct m0_be_tx_remid remid;
1543  int rc;
1544  struct m0_reqh *reqh;
1545 
1546  for (i = 0; i < IO_FOPS_NR; ++i) {
1547  buf = &bp->bp_iobuf[i]->nb_buffer;
1548  for (j = 0; j < bp->bp_seg_nr; ++j) {
1549  memset(buf->ov_buf[j], 'b', IO_SEG_SIZE);
1550  }
1551  }
1553  --op) {
1554  /*
1555  * IO fops are deallocated by an rpc item type op on receiving
1556  * the reply fop. See io_item_free().
1557  */
1559  for (i = 0; i < IO_FOPS_NR; ++i) {
1560  targ[i].ta_index = i;
1561  targ[i].ta_op = op;
1562  targ[i].ta_bp = bp;
1563  io_fops_rpc_submit(&targ[i]);
1564  remid = targ[i].ta_bp->bp_remid;
1565  }
1567  }
1568 
1569  /*
1570  * Send the fsync fop request.
1571  * targ[0] suffices, since it contains the session.
1572  */
1573  rc = io_fsync_send_fop(&remid, &targ[0]);
1574  M0_UT_ASSERT(rc == 0);
1577 }
1578 
1580 {
1581  int rc;
1582  int i;
1583  int j;
1584  enum M0_RPC_OPCODES op;
1585  struct thrd_arg targ[IO_FOPS_NR];
1586  struct m0_bufvec *buf;
1587  struct m0_reqh *reqh;
1588 
1589  for (i = 0; i < IO_FOPS_NR; ++i) {
1590  buf = &bp->bp_iobuf[i]->nb_buffer;
1591  for (j = 0; j < bp->bp_seg_nr; ++j) {
1592  memset(buf->ov_buf[j], 'b', IO_SEG_SIZE);
1593  }
1594  }
1596  --op) {
1597  /*
1598  * IO fops are deallocated by an rpc item type op on receiving
1599  * the reply fop. See io_item_free().
1600  */
1602  for (i = 0; i < IO_FOPS_NR; ++i) {
1603  targ[i].ta_index = i;
1604  targ[i].ta_op = op;
1605  targ[i].ta_bp = bp;
1606  M0_SET0(bp->bp_threads[i]);
1608  struct thrd_arg *,
1610  &targ[i], "io_thrd");
1611  M0_UT_ASSERT(rc == 0);
1612  }
1613  /* Waits till all threads finish their job. */
1614  for (i = 0; i < IO_FOPS_NR; ++i) {
1616  buf = &bp->bp_iobuf[i]->nb_buffer;
1617  for (j = 0; j < bp->bp_seg_nr; ++j) {
1618  memset(buf->ov_buf[j], 'a', IO_SEG_SIZE);
1619  }
1620  }
1622  }
1625 }
1626 
1627 static void add_buffer_bulk(struct m0_rpc_bulk *rbulk,
1628  enum M0_RPC_OPCODES op,
1629  int index)
1630 {
1631  struct m0_rpc_bulk_buf *rbuf;
1632  int rc;
1633  int i;
1634 
1635  /*
1636  * Adds a m0_rpc_bulk_buf structure to list of such structures
1637  * in m0_rpc_bulk.
1638  */
1639  rc = m0_rpc_bulk_buf_add(rbulk, bp->bp_seg_nr, 0, &bp->bp_cnetdom,
1640  NULL, &rbuf);
1641  M0_UT_ASSERT(rc == 0);
1642  M0_UT_ASSERT(rbuf != NULL);
1643 
1644  /* Adds io buffers to m0_rpc_bulk_buf structure. */
1645  for (i = 0; i < bp->bp_seg_nr; ++i) {
1649  bp->bp_offsets[0], &bp->bp_cnetdom);
1650  M0_UT_ASSERT(rc == 0);
1651  bp->bp_offsets[0] +=
1653  }
1654  bp->bp_offsets[0] += IO_SEG_SIZE;
1655 
1659 }
1660 
1661 static void fop_create_populate(int index, enum M0_RPC_OPCODES op, int buf_nr)
1662 {
1663  struct m0_io_fop **io_fops;
1664  struct m0_rpc_bulk *rbulk;
1665  struct m0_io_fop *iofop;
1666  struct m0_fop_cob_rw *rw;
1667  int i;
1668  int rc;
1669 
1670  if (op == M0_IOSERVICE_WRITEV_OPCODE) {
1673  } else {
1676  }
1677 
1678  io_fops = (op == M0_IOSERVICE_WRITEV_OPCODE) ? bp->bp_wfops :
1679  bp->bp_rfops;
1680  M0_ALLOC_PTR(io_fops[index]);
1681 
1683  rc = m0_io_fop_init(io_fops[index], &bp->bp_fids[0],
1685  else
1686  rc = m0_io_fop_init(io_fops[index], &bp->bp_fids[0],
1688  M0_UT_ASSERT(rc == 0);
1689  iofop = io_fops[index];
1690  rbulk = &iofop->if_rbulk;
1691  rw = io_rw_get(&io_fops[index]->if_fop);
1692 
1693  rw->crw_fid = bp->bp_fids[0];
1695  rw->crw_pver = CONF_PVER_FID;
1697 
1698 
1699  for (i = 0; i < buf_nr; ++i)
1700  add_buffer_bulk(rbulk, op, i);
1701 
1702  /*
1703  * Allocates memory for array of net buf descriptors and array of
1704  * index vectors from io fop.
1705  */
1706  rc = m0_io_fop_prepare(&iofop->if_fop);
1707  M0_UT_ASSERT(rc == 0);
1708 
1709  /*
1710  * Stores the net buf desc/s after adding the corresponding
1711  * net buffers to transfer machine to io fop wire format.
1712  */
1715  M0_UT_ASSERT(rc == 0);
1716 
1717  for (i = 0; i < IO_FIDS_NR; ++i)
1719 }
1720 
1722 {
1723  int i;
1724  int j;
1725  int buf_nr;
1726  enum M0_RPC_OPCODES op;
1727  struct m0_bufvec *buf;
1728  struct m0_reqh *reqh;
1729 
1730  buf_nr = IO_FOPS_NR / 4;
1731  for (i = 0; i < buf_nr; ++i) {
1732  buf = &bp->bp_iobuf[i]->nb_buffer;
1733  for (j = 0; j < bp->bp_seg_nr; ++j) {
1734  memset(buf->ov_buf[j], 'b', IO_SEG_SIZE);
1735  }
1736  }
1738  fop_create_populate(0, op, buf_nr);
1740  io_fops_submit(0, op);
1744 
1745  for (i = 0; i < buf_nr; ++i) {
1746  buf = &bp->bp_iobuf[i]->nb_buffer;
1747  for (j = 0; j < bp->bp_seg_nr; ++j) {
1748  memset(buf->ov_buf[j], 'a', IO_SEG_SIZE);
1749  }
1750  }
1752  fop_create_populate(0, op, buf_nr);
1754  io_fops_submit(0, op);
1757 }
1758 
1759 static void bulkio_init(void)
1760 {
1761  int rc;
1762  const char *caddr = "0@lo:12345:34:*";
1763  const char *saddr = "0@lo:12345:34:1";
1764 
1765  nb_nr = 0;
1766  M0_SET0(&nb_list);
1767  buf_pool = NULL;
1770  /*
1771  * Current set of tests work with standalone io_fops, but
1772  * io_fop_di_prepare() relies on the fact that an io_fop is embedded
1773  * into the io_req_fop structure. Therefore, we have to skip DI prepare
1774  * for the tests in order to avoid crashes.
1775  */
1776  m0_fi_enable("io_fop_di_prepare", "skip_di_for_ut");
1777  m0_fi_enable("m0_file_init", "skip_di_for_ut");
1778  m0_fi_enable("stob_ad_domain_create", "write_undo");
1779 
1780  M0_ALLOC_PTR(bp);
1781  M0_ASSERT(bp != NULL);
1783 
1784  rc = bulkio_server_start(bp, saddr);
1785  M0_UT_ASSERT(rc == 0);
1786  M0_UT_ASSERT(bp->bp_sctx != NULL);
1787  rc = bulkio_client_start(bp, caddr, saddr);
1788  M0_UT_ASSERT(rc == 0);
1789  M0_UT_ASSERT(bp->bp_cctx != NULL);
1790 
1792 }
1793 
1794 static void bulkio_fini(void)
1795 {
1796  struct m0_reqh *reqh;
1797  int i;
1798 
1799  for (i = 0; i < IO_FIDS_NR; ++i)
1800  m0_file_fini(&bp->bp_file[i]);
1806  m0_free(bp);
1807 
1808  m0_fi_disable("io_fop_di_prepare", "skip_di_for_ut");
1809  m0_fi_disable("m0_file_init", "skip_di_for_ut");
1810  m0_fi_disable("stob_ad_domain_create", "write_undo");
1811 }
1812 
1813 /*
1814  * Only used for user-space UT.
1815  */
1817  .ts_name = "bulk-server-ut",
1818  .ts_tests = {
1819  /*
1820  * Intentionally kept as first test case. It initializes
1821  * all necessary data for sending IO fops. Keeping
1822  * bulkio_init() as .ts_init requires changing all
1823  * M0_UT_ASSERTS to M0_ASSERTS.
1824  */
1825  { "bulkio_init", bulkio_init},
1826  { "bulkio_server_single_read_write",
1828  { "bulkio_server_write_fol_rec_verify",
1830  { "bulkio_server_write_fol_rec_undo_verify",
1832  { "bulkio_server_read_write_state_test",
1834  { "bulkio_server_vectored_read_write",
1836  /*
1837  * Keep this test close to bulkio_server_multiple_read_write.
1838  * Otherwise something breaks :)
1839  */
1840  { "bulkio_server_fsync_multiple_read_write",
1842  { "bulkio_server_rw_multiple_nb_server",
1844  { "bulkio_server_rw_state_transition_test",
1853 #if 0
1854 
1855  { "bulkio_server_read_write_fv_mismatch",
1856  bulkio_server_read_write_fv_mismatch},
1857 #endif
1858  { "bulkio_fini", bulkio_fini},
1859  { NULL, NULL }
1860  }
1861 };
1862 M0_EXPORTED(bulkio_server_ut);
1863 #undef M0_TRACE_SUBSYSTEM
uint32_t fcrw_ndesc
Definition: io_foms.h:178
static struct ctx cc
M0_INTERNAL struct m0_stob_domain * m0_stob_dom_get(struct m0_stob *stob)
Definition: stob.c:338
struct m0_be_domain * bs_domain
Definition: seg.h:82
uint64_t rwr_count
Definition: io_fops.h:324
M0_INTERNAL int m0_cc_cob_setup(struct m0_fom_cob_op *cc, struct m0_cob_domain *cdom, const struct m0_cob_attr *attr, struct m0_be_tx *ctx)
Definition: cob_foms.c:1033
#define M0_ALLOC_ARR(arr, nr)
Definition: memory.h:84
Definition: dtm.h:554
struct m0_tl fcrw_stio_list
Definition: io_foms.h:213
Definition: cob.h:581
struct m0_fol_rec_header fr_header
Definition: fol.h:192
static int bulkio_server_read_fom_tick(struct m0_fom *fom)
Definition: bulkio_ut.c:188
static int io_fop_stob_create_fom_create(struct m0_fop *fop, struct m0_fom **m, struct m0_reqh *reqh)
Definition: bulkio_ut.c:1152
struct m0_reqh * m0_cs_reqh_get(struct m0_motr *cctx)
Definition: setup.c:1762
int(* rpo_undo)(struct m0_fol_frag *frag, struct m0_be_tx *tx)
Definition: fol.h:284
#define NULL
Definition: misc.h:38
struct m0_thread ** bp_threads
Definition: bulkio_common.h:88
M0_RPC_OPCODES
Definition: rpc_opcodes.h:41
M0_INTERNAL void m0_fol_rec_fini(struct m0_fol_rec *rec)
Definition: fol.c:104
static void fom_phase_set(struct m0_fom *fom, int phase)
Definition: bulkio_ut.c:315
static struct m0_addb2_mach * m
Definition: consumer.c:38
M0_INTERNAL int m0_stob_locate(struct m0_stob *stob)
Definition: stob.c:128
static const struct m0_fom_ops bulkio_stob_create_fom_ops
Definition: bulkio_ut.c:1128
struct m0_tl rios_buffer_pools
Definition: io_service.h:98
M0_INTERNAL int m0_rpc_bulk_store(struct m0_rpc_bulk *rbulk, const struct m0_rpc_conn *conn, struct m0_net_buf_desc_data *to_desc, const struct m0_net_buffer_callbacks *bulk_cb)
Definition: bulk.c:520
uint32_t crw_index
Definition: io_fops.h:388
struct m0_bufvec nb_buffer
Definition: net.h:1322
int m0_thread_join(struct m0_thread *q)
Definition: kthread.c:169
static void bulkio_server_write_fol_rec_undo_verify(void)
Definition: bulkio_ut.c:1359
const struct m0_fol_frag_ops * rp_ops
Definition: fol.h:244
void * b_addr
Definition: buf.h:39
struct m0_tl fcrw_netbuf_list
Definition: io_foms.h:223
static struct m0_sm_group * grp
Definition: bytecount.c:38
M0_INTERNAL int m0_rpc_bulk_buf_databuf_add(struct m0_rpc_bulk_buf *rbuf, void *buf, m0_bcount_t count, m0_bindex_t index, struct m0_net_domain *netdom)
Definition: bulk.c:331
#define M0_LOG(level,...)
Definition: trace.h:167
static struct m0_be_tx_credit * m0_fom_tx_credit(struct m0_fom *fom)
Definition: fom.h:542
M0_INTERNAL void m0_storage_dev_get(struct m0_storage_dev *dev)
Definition: storage_dev.c:466
M0_INTERNAL struct m0_net_buffer * m0_net_buffer_pool_get(struct m0_net_buffer_pool *pool, uint32_t colour)
Definition: buffer_pool.c:215
M0_INTERNAL void m0_net_buffer_pool_unlock(struct m0_net_buffer_pool *pool)
Definition: buffer_pool.c:203
static void builkio_ut_stob_get(struct m0_io_fom_cob_rw *fom_obj)
Definition: bulkio_ut.c:290
static void bulkio_fini(void)
Definition: bulkio_ut.c:1794
M0_INTERNAL void m0_dtx_init(struct m0_dtx *tx, struct m0_be_domain *be_domain, struct m0_sm_group *sm_group)
Definition: dtm.c:67
struct m0_rpc_server_ctx * bp_sctx
struct m0_vec ov_vec
Definition: vec.h:147
int(* fto_create)(struct m0_fop *fop, struct m0_fom **out, struct m0_reqh *reqh)
Definition: fom.h:650
struct m0_rpc_bulk if_rbulk
Definition: io_fops.h:177
struct m0_fid bp_fids[IO_FIDS_NR]
Definition: bulkio_common.h:70
int fcrw_curr_desc_index
Definition: io_foms.h:180
const struct m0_fid_type m0_cob_fid_type
Definition: cob.c:117
struct m0_net_domain * rios_ndom
Definition: io_service.h:81
static void io_fops_submit(uint32_t index, enum M0_RPC_OPCODES op)
Definition: bulkio_ut.c:1264
enum m0_ha_obj_state isd_ha_state
Definition: storage_dev.h:92
struct m0_tl fr_frags
Definition: fol.h:201
struct m0_net_buf_desc_data * id_descs
Definition: io_fops.h:313
void * m0_fop_data(const struct m0_fop *fop)
Definition: fop.c:220
static int bulkio_server_write_fom_tick(struct m0_fom *fom)
Definition: bulkio_ut.c:132
static void bulkio_init(void)
Definition: bulkio_ut.c:1759
uint32_t rpt_index
Definition: fol.h:262
#define M0_THREAD_INIT(thread, TYPE, init, func, arg, namefmt,...)
Definition: thread.h:139
static void fill_buffers_pool(uint32_t colour)
Definition: bulkio_ut.c:282
struct m0_sm_conf io_conf
Definition: io_foms.c:822
void(* rpo_undo_credit)(const struct m0_fol_frag *frag, struct m0_be_tx_credit *accum)
Definition: fol.h:286
int bulkio_server_start(struct bulkio_params *bp, const char *saddr)
Definition: bulkio_common.c:82
struct m0_rpc_bulk fcrw_bulk
Definition: io_foms.h:217
static int void * buf
Definition: dir.c:1019
struct m0_net_buffer_pool * fcrw_bp
Definition: io_foms.h:205
void * rp_data
Definition: fol.h:249
static const struct m0_fop_type_ops bulkio_stob_create_ops
Definition: bulkio_ut.c:61
#define container_of(ptr, type, member)
Definition: misc.h:33
static const struct m0_fom_type_ops ut_io_fom_cob_rw_type_ops
Definition: bulkio_ut.c:91
#define M0_SET0(obj)
Definition: misc.h:64
uint32_t ci_nr
Definition: vec.h:635
Definition: ut.h:77
static const struct m0_fom_type_ops bulkio_server_read_fomt_ops
Definition: bulkio_ut.c:83
struct m0_fid crw_pver
Definition: io_fops.h:391
static void io_fop_desc_get(struct m0_fop *fop, struct m0_net_buf_desc_data **desc)
Definition: io_fops.c:1799
const struct m0_sm_conf m0_generic_conf
Definition: fom_generic.c:838
void ** ov_buf
Definition: vec.h:149
m0_fom_phase
Definition: fom.h:372
struct m0_fol_frag_type m0_fop_fol_frag_type
static struct m0_be_tx * m0_fom_tx(struct m0_fom *fom)
Definition: fom.h:537
M0_INTERNAL int m0_dtx_done_sync(struct m0_dtx *tx)
Definition: dtm.c:122
struct m0_be_tx_remid bp_remid
Definition: sock.c:887
const struct m0_fol_frag_type * rpo_type
Definition: fol.h:283
static void bulkio_server_write_fol_rec_verify(void)
Definition: bulkio_ut.c:1317
struct m0_fom_type ft_fom_type
Definition: fop.h:232
int bulkio_client_start(struct bulkio_params *bp, const char *caddr, const char *saddr)
const struct m0_fom_type * fo_type
Definition: dump.c:107
#define m0_tl_endfor
Definition: tlist.h:700
#define BUF_F
Definition: buf.h:75
uint64_t bp_offsets[IO_FIDS_NR]
Definition: bulkio_common.h:73
struct m0_ref isd_ref
Definition: storage_dev.h:101
static void bulkio_stob_create(void)
Definition: bulkio_ut.c:1224
op
Definition: libdemo.c:64
M0_INTERNAL void m0_cob_tx_credit(struct m0_cob_domain *dom, enum m0_cob_op optype, struct m0_be_tx_credit *accum)
Definition: cob.c:2281
enum M0_RPC_OPCODES ta_op
static int ut_io_fom_cob_rw_state(struct m0_fom *fom)
Definition: bulkio_ut.c:242
Definition: buf.h:37
static const struct m0_fom_ops bulkio_server_write_fom_ops
Definition: bulkio_ut.c:1134
M0_INTERNAL void m0_sm_group_unlock(struct m0_sm_group *grp)
Definition: sm.c:96
static void io_fop_replied(struct m0_fop *fop, struct m0_fop *bkpfop)
Definition: io_fops.c:1773
void io_fops_destroy(struct bulkio_params *bp)
M0_INTERNAL void m0_fol_rec_init(struct m0_fol_rec *rec, struct m0_fol *fol)
Definition: fol.c:98
static void bulkio_stob_fom_fini(struct m0_fom *fom)
Definition: bulkio_ut.c:95
int i
Definition: dir.c:1033
static void m0_io_fom_cob_rw_fini(struct m0_fom *fom)
Definition: io_foms.c:2396
struct m0_fop_type * f_type
Definition: fop.h:81
#define PRIu64
Definition: types.h:58
struct m0_fid crw_fid
Definition: io_fops.h:385
struct m0_net_buffer_pool rios_bp
Definition: io_service.h:79
M0_INTERNAL void m0_fid_set(struct m0_fid *fid, uint64_t container, uint64_t key)
Definition: fid.c:116
static int check_write_fom_tick(struct m0_fom *fom)
Definition: bulkio_ut.c:366
struct m0_fop if_fop
Definition: io_fops.h:174
static bool bulkio_stob_created
Definition: bulkio_ut.c:42
static void io_single_fop_submit(enum M0_RPC_OPCODES op)
Definition: bulkio_ut.c:1274
struct m0_cob_domain * rios_cdom
Definition: io_service.h:100
static void attr(struct m0_addb2__context *ctx, const uint64_t *v, char *buf)
Definition: dump.c:949
enum m0_net_queue_type nb_qtype
Definition: net.h:1363
M0_INTERNAL void m0_storage_devs_lock(struct m0_storage_devs *devs)
Definition: storage_dev.c:71
M0_INTERNAL void m0_fi_disable(const char *fp_func, const char *fp_tag)
Definition: finject.c:485
struct m0_net_buffer * bb_nbuf
Definition: bulk.h:177
Definition: stob.h:163
int m0_fom_tick_generic(struct m0_fom *fom)
Definition: fom_generic.c:848
struct m0_rpc_client_ctx * bp_cctx
static void m0_fi_enable(const char *func, const char *tag)
Definition: finject.h:276
void m0_fom_fini(struct m0_fom *fom)
Definition: fom.c:1324
fom_state_transition_tests
Definition: bulkio_ut.c:248
int32_t rb_rc
Definition: bulk.h:266
static struct m0_cob * cob
Definition: bytecount.c:40
struct m0_io_descs crw_desc
Definition: io_fops.h:400
#define M0_ASSERT(cond)
struct m0_rm_domain bp_rdom
static int ut_io_fom_cob_rw_create(struct m0_fop *fop, struct m0_fom **m, struct m0_reqh *reqh)
Definition: bulkio_ut.c:1185
M0_INTERNAL int m0_xcode_cmp(const struct m0_xcode_obj *o0, const struct m0_xcode_obj *o1)
Definition: xcode.c:572
static const struct m0_fop_type_ops bulkio_server_read_fop_ut_ops
Definition: bulkio_ut.c:73
static void bulkio_server_multiple_read_write(void)
Definition: bulkio_ut.c:1579
void io_fops_rpc_submit(struct thrd_arg *t)
M0_INTERNAL void m0_cob_oikey_make(struct m0_cob_oikey *oikey, const struct m0_fid *fid, int linkno)
Definition: cob.c:141
M0_INTERNAL void m0_fid_convert_cob2stob(const struct m0_fid *cob_fid, struct m0_stob_id *stob_id)
Definition: fid_convert.c:141
struct m0_rpc_conn rcx_connection
Definition: rpclib.h:146
static int next_read_test
Definition: bulkio_ut.c:263
static int nb_nr
Definition: bulkio_ut.c:259
static void bulkio_server_single_read_write(void)
Definition: bulkio_ut.c:1295
static const struct m0_fom_type_ops bulkio_server_write_fomt_ops
Definition: bulkio_ut.c:79
static int io_fop_coalesce(struct m0_fop *res_fop, uint64_t size)
Definition: io_fops.c:1614
static struct m0_stob_domain * dom
Definition: storage.c:38
#define M0_BUF_INIT0
Definition: buf.h:71
M0_INTERNAL void m0_net_buffer_pool_lock(struct m0_net_buffer_pool *pool)
Definition: buffer_pool.c:186
struct m0_be_tx_credit tx_betx_cred
Definition: dtm.h:560
void bulkio_params_init(struct bulkio_params *bp)
static const struct m0_fom_type_ops bulkio_stob_create_fomt_ops
Definition: bulkio_ut.c:87
static struct m0_io_fom_cob_rw_state_transition io_fom_read_st[]
Definition: io_foms.c:641
struct bulkio_params * ta_bp
M0_INTERNAL uint32_t m0_fid_cob_device_id(const struct m0_fid *cob_fid)
Definition: fid_convert.c:81
static int check_read_fom_tick(struct m0_fom *fom)
Definition: bulkio_ut.c:720
M0_INTERNAL void m0_dtx_fini(struct m0_dtx *tx)
Definition: dtm.c:134
Definition: reqh.h:94
struct m0_sm_group * lo_grp
Definition: locality.h:67
enum m0_conf_service_type isd_srv_type
Definition: storage_dev.h:94
Definition: dump.c:103
M0_INTERNAL m0_bcount_t m0_net_domain_get_max_buffer_size(struct m0_net_domain *dom)
static int bulkio_stob_create_fom_tick(struct m0_fom *fom)
Definition: bulkio_ut.c:1060
void bulkio_client_stop(struct m0_rpc_client_ctx *cctx)
M0_INTERNAL void m0_buf_free(struct m0_buf *buf)
Definition: buf.c:55
static size_t m0_io_fom_cob_rw_locality_get(const struct m0_fom *fom)
Definition: io_foms.c:2478
m0_bcount_t * v_count
Definition: vec.h:53
M0_INTERNAL int m0_buf_copy(struct m0_buf *dest, const struct m0_buf *src)
Definition: buf.c:104
static const struct m0_fop_type_ops bulkio_server_write_fop_ut_ops
Definition: bulkio_ut.c:67
static int next_write_test
Definition: bulkio_ut.c:262
M0_INTERNAL struct m0_storage_devs * m0_cs_storage_devs_get(void)
Definition: setup.c:1783
M0_INTERNAL struct m0_fop_type * m0_fop_type_find(uint32_t opcode)
Definition: fop.c:388
M0_INTERNAL struct m0_storage_dev * m0_storage_devs_find_by_dom(struct m0_storage_devs *devs, struct m0_stob_domain *dom)
Definition: storage_dev.c:161
struct m0_fop_cob_rw_reply c_rep
Definition: io_fops.h:363
M0_INTERNAL struct m0_stob_domain * m0_stob_domain_find_by_stob_id(const struct m0_stob_id *stob_id)
Definition: domain.c:294
M0_INTERNAL void m0_reqh_idle_wait(struct m0_reqh *reqh)
Definition: reqh.c:606
void cob_attr_default_fill(struct m0_cob_attr *attr)
static int m0_io_fom_cob_rw_tick(struct m0_fom *fom)
Definition: io_foms.c:2233
const struct m0_fom_type_ops * ft_ops
Definition: fom.h:614
static void bulkio_server_read_write_state_test(void)
Definition: bulkio_ut.c:1450
struct m0_ut_suite bulkio_server_ut
Definition: bulkio_ut.c:1816
static void empty_buffers_pool(uint32_t colour)
Definition: bulkio_ut.c:265
M0_INTERNAL int m0_stob_create(struct m0_stob *stob, struct m0_dtx *dtx, const char *str_cfg)
Definition: stob.c:154
static const struct m0_io_fom_cob_rw_state_transition io_fom_write_st[]
Definition: io_foms.c:679
Definition: fom.h:481
struct m0_fop_type m0_fop_cob_readv_fopt
Definition: io_fops.c:71
const char * ts_name
Definition: ut.h:99
static bool fol_check_enabled
Definition: bulkio_ut.c:47
static void fop_create_populate(int index, enum M0_RPC_OPCODES op, int buf_nr)
Definition: bulkio_ut.c:1661
M0_INTERNAL int64_t m0_ref_read(const struct m0_ref *ref)
Definition: refs.c:44
struct m0_net_buffer ** bp_iobuf
Definition: bulkio_common.h:85
struct m0_reqh reqh
Definition: rm_foms.c:48
void bulkioapi_test(void)
static const struct m0_fid CONF_PVER_FID
Definition: bulkio_common.h:64
int(* fto_redo)(struct m0_fop_fol_frag *ffrag, struct m0_fol *fol)
Definition: fop.h:274
M0_INTERNAL struct m0_locality * m0_locality0_get(void)
Definition: locality.c:169
Definition: fid.h:38
struct m0_io_fop ** bp_rfops
Definition: bulkio_common.h:76
uint64_t f_key
Definition: fid.h:40
struct m0_be_tx tx_betx
Definition: dtm.h:559
M0_INTERNAL void m0_sm_init(struct m0_sm *mach, const struct m0_sm_conf *conf, uint32_t state, struct m0_sm_group *grp)
Definition: sm.c:313
M0_INTERNAL uint32_t m0_net_tm_colour_get(struct m0_net_transfer_mc *tm)
Definition: tm.c:448
static struct m0_net_buffer * nb_list[64]
Definition: bulkio_ut.c:260
#define M0_ALLOC_PTR(ptr)
Definition: memory.h:86
#define PRIi64
Definition: types.h:59
uint32_t rh_frags_nr
Definition: fol.h:158
struct m0_net_domain * m0_fop_domain_get(const struct m0_fop *fop)
Definition: fop.c:486
static void bulkio_server_read_write_multiple_nb(void)
Definition: bulkio_ut.c:1721
uint32_t id_nr
Definition: io_fops.h:312
const struct m0_fop_type_ops * ft_ops
Definition: fop.h:228
M0_INTERNAL int m0_fom_rc(const struct m0_fom *fom)
Definition: fom.c:1727
struct m0_fom fcrw_gen
Definition: io_foms.h:174
struct m0_fid crw_gfid
Definition: io_fops.h:382
#define BUF_P(p)
Definition: buf.h:76
struct m0_sm_conf ft_conf
Definition: fom.h:615
void bulkio_params_fini(struct bulkio_params *bp)
static void bulkio_server_rw_state_transition_test(void)
Definition: bulkio_ut.c:1492
void bulkio_server_stop(struct m0_rpc_server_ctx *sctx)
static struct m0_buf payload_buf
Definition: bulkio_ut.c:46
static struct m0_fop * fop
Definition: item.c:57
struct m0_net_domain bp_cnetdom
Definition: bulkio_common.h:99
uint32_t fcrw_batch_size
Definition: io_foms.h:186
struct m0_fol rh_fol
Definition: reqh.h:121
struct m0_be_seg * rh_beseg
Definition: reqh.h:112
M0_INTERNAL void m0_sm_group_lock(struct m0_sm_group *grp)
Definition: sm.c:83
struct m0_net_buffer_pool * ut_get_buffer_pool(struct m0_fom *fom)
Definition: bulkio_ut.c:105
#define WRITE_FOP_DATA(fop)
Definition: bulkio_ut.c:1315
static void m0_fi_enable_once(const char *func, const char *tag)
Definition: finject.h:301
M0_INTERNAL void m0_storage_devs_unlock(struct m0_storage_devs *devs)
Definition: storage_dev.c:77
M0_INTERNAL int m0_cob_locate(struct m0_cob_domain *dom, struct m0_cob_oikey *oikey, uint64_t flags, struct m0_cob **out)
Definition: cob.c:1407
M0_INTERNAL int m0_stob_find(const struct m0_stob_id *id, struct m0_stob **out)
Definition: stob.c:92
M0_INTERNAL int m0_dtx_open_sync(struct m0_dtx *tx)
Definition: dtm.c:101
static const struct fom_phase_desc fpd_table[]
Definition: fom_generic.c:566
int io_fsync_send_fop(struct m0_be_tx_remid *remid, struct thrd_arg *t)
void io_fops_create(struct bulkio_params *bp, enum M0_RPC_OPCODES op, int fids_nr, int fops_nr, int segs_nr)
M0_INTERNAL void m0_file_fini(struct m0_file *file)
Definition: file.c:498
void(* fo_fini)(struct m0_fom *fom)
Definition: fom.h:657
M0_INTERNAL void m0_net_buffer_pool_put(struct m0_net_buffer_pool *pool, struct m0_net_buffer *buf, uint32_t colour)
Definition: buffer_pool.c:243
static const struct m0_fom_ops bulkio_server_read_fom_ops
Definition: bulkio_ut.c:1146
static struct m0_net_buffer_pool * buf_pool
Definition: bulkio_ut.c:261
void(* fto_fop_replied)(struct m0_fop *fop, struct m0_fop *bfop)
Definition: fop.h:267
uint64_t isd_cid
Definition: storage_dev.h:81
M0_INTERNAL int m0_io_fop_init(struct m0_io_fop *iofop, const struct m0_fid *gfid, struct m0_fop_type *ftype, void(*fop_release)(struct m0_ref *))
Definition: io_fops.c:865
static struct bulkio_params * bp
Definition: bulkio_ut.c:44
M0_INTERNAL bool m0_is_read_fop(const struct m0_fop *fop)
Definition: io_fops.c:916
M0_INTERNAL void m0_stob_create_credit(struct m0_stob_domain *dom, struct m0_be_tx_credit *accum)
Definition: stob.c:148
const struct m0_fop_type_ops io_fop_rwv_ops
Definition: io_fops.c:229
void m0_fom_phase_set(struct m0_fom *fom, int phase)
Definition: fom.c:1688
const struct m0_net_buffer_callbacks m0_rpc__buf_bulk_cb
Definition: bulk.c:238
static int io_fop_server_write_fom_create(struct m0_fop *fop, struct m0_fom **m, struct m0_reqh *reqh)
Definition: bulkio_ut.c:1167
const struct m0_fom_type_ops io_fom_type_ops
Definition: io_foms.c:633
M0_INTERNAL struct m0_fop_cob_rw * io_rw_get(struct m0_fop *fop)
Definition: io_fops.c:1037
static const struct m0_fom_ops ut_io_fom_cob_rw_ops
Definition: bulkio_ut.c:1140
M0_INTERNAL int m0_io_fop_prepare(struct m0_fop *fop)
Definition: io_fops.c:1513
#define m0_tl_find(name, var, head,...)
Definition: tlist.h:757
M0_INTERNAL int m0_rpc_bulk_buf_add(struct m0_rpc_bulk *rbulk, uint32_t segs_nr, m0_bcount_t length, struct m0_net_domain *netdom, struct m0_net_buffer *nb, struct m0_rpc_bulk_buf **out)
Definition: bulk.c:291
struct m0_fop_type m0_fop_cob_writev_fopt
Definition: io_fops.c:72
#define m0_tl_for(name, head, obj)
Definition: tlist.h:695
void m0_free(void *data)
Definition: memory.c:146
static void bulkio_server_fsync_multiple_read_write(void)
Definition: bulkio_ut.c:1535
M0_INTERNAL void m0_stob_get(struct m0_stob *stob)
Definition: stob.c:275
static int io_fop_server_read_fom_create(struct m0_fop *fop, struct m0_fom **m, struct m0_reqh *reqh)
Definition: bulkio_ut.c:1209
struct m0_io_indexvec crw_ivec
Definition: io_fops.h:411
int32_t rc
Definition: trigger_fop.h:47
struct m0_io_fop ** bp_wfops
Definition: bulkio_common.h:79
static int m0_io_fom_cob_rw_create(struct m0_fop *fop, struct m0_fom **out, struct m0_reqh *reqh)
Definition: io_foms.c:1144
#define M0_UT_ASSERT(a)
Definition: ut.h:46
struct m0_stob * fcrw_stob
Definition: io_foms.h:207
struct m0_motr rsx_motr_ctx
Definition: rpclib.h:84
M0_INTERNAL void m0_stob_put(struct m0_stob *stob)
Definition: stob.c:291
int(* fto_undo)(struct m0_fop_fol_frag *ffrag, struct m0_fol *fol)
Definition: fop.h:273
static void add_buffer_bulk(struct m0_rpc_bulk *rbulk, enum M0_RPC_OPCODES op, int index)
Definition: bulkio_ut.c:1627
struct m0_net_transfer_mc * m0_fop_tm_get(const struct m0_fop *fop)
Definition: fop.c:479
Definition: fop.h:79
static void release_one_buffer(uint32_t colour)
Definition: bulkio_ut.c:275
Definition: vec.h:145
M0_INTERNAL void m0_file_init(struct m0_file *file, const struct m0_fid *fid, struct m0_rm_domain *dom, enum m0_di_types di_type)
Definition: file.c:477
struct m0_file bp_file[IO_FIDS_NR]
M0_INTERNAL void m0_fid_tassume(struct m0_fid *fid, const struct m0_fid_type *ft)
Definition: fid.c:146
uint32_t bp_seg_nr
M0_INTERNAL void m0_sm_fini(struct m0_sm *mach)
Definition: sm.c:331
uint32_t ffrp_fop_code
Definition: fop.h:354
M0_INTERNAL int m0_fol_rec_decode(struct m0_fol_rec *rec, struct m0_buf *at)
Definition: fol.c:331