Motr  M0
io_req_fop.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 #include "motr/client.h"
23 #include "motr/client_internal.h"
24 #include "motr/addb.h"
25 #include "motr/pg.h"
26 #include "motr/io.h"
27 #include "motr/sync.h"
28 
29 #include "lib/memory.h" /* m0_alloc, m0_free */
30 #include "lib/errno.h" /* ENOMEM */
31 #include "lib/atomic.h" /* m0_atomic_{inc,dec,get} */
32 #include "lib/cksum_utils.h"
33 #include "rpc/rpc_machine_internal.h" /* m0_rpc_machine_lock */
34 #include "fop/fom_generic.h" /* m0_rpc_item_generic_reply_rc */
35 #include "cob/cob.h" /* M0_COB_IO M0_COB_PVER M0_COB_NLINK */
36 #include "rpc/addb2.h"
37 #include "rpc/item.h"
38 #include "rpc/rpc_internal.h"
39 
40 #define M0_TRACE_SUBSYSTEM M0_TRACE_SUBSYS_CLIENT
41 #include "lib/trace.h" /* M0_LOG */
42 
43 /*
44  * No initialisation for iofop_bobtype as it isn't const,
45  * iofop_bobtype is initialised as a list type.
46  */
48 M0_BOB_DEFINE(M0_INTERNAL, &iofop_bobtype, ioreq_fop);
49 
54 M0_TL_DESCR_DEFINE(iofops, "List of IO fops", M0_INTERNAL,
55  struct ioreq_fop, irf_link, irf_magic,
57 M0_TL_DEFINE(iofops, M0_INTERNAL, struct ioreq_fop);
58 
62 M0_INTERNAL bool ioreq_fop_invariant(const struct ioreq_fop *fop)
63 {
64  return M0_RC(fop != NULL &&
65  _0C(ioreq_fop_bob_check(fop)) &&
66  _0C(fop->irf_tioreq != NULL) &&
67  _0C(fop->irf_ast.sa_cb != NULL) &&
68  _0C(fop->irf_ast.sa_mach != NULL));
69 }
70 
71 static bool should_ioreq_sm_complete(struct m0_op_io *ioo)
72 {
73  struct m0_client *instance;
74 
76  /* Ensure that replies for iofops and bulk data have been received. */
77  return m0_atomic64_get(&ioo->ioo_nwxfer.nxr_iofop_nr) == 0 &&
79  /*
80  * In case of writing in oostore mode, ensure that all
81  * cob creation fops (if any) have received reply.
82  */
84  (M0_IN(ioreq_sm_state(ioo), (IRS_WRITING, IRS_TRUNCATE)))) ?
85  m0_atomic64_get(&ioo->ioo_nwxfer.nxr_ccfop_nr) == 0 : true);
86 }
87 
88 M0_INTERNAL struct m0_file *m0_client_fop_to_file(struct m0_fop *fop)
89 {
90  struct m0_op_io *ioo;
91  struct nw_xfer_request *xfer;
92  struct m0_io_fop *iofop;
93  struct ioreq_fop *irfop;
94 
95  iofop = M0_AMB(iofop, fop, if_fop);
96  irfop = bob_of(iofop, struct ioreq_fop, irf_iofop, &iofop_bobtype);
97  xfer = irfop->irf_tioreq->ti_nwxfer;
98 
99  ioo = bob_of(xfer, struct m0_op_io, ioo_nwxfer, &ioo_bobtype);
100 
101  return &ioo->ioo_flock;
102 }
103 
134 static void application_attribute_copy(struct m0_indexvec *rep_ivec,
135  struct target_ioreq *ti,
136  struct m0_op_io *ioo,
137  struct m0_buf *buf)
138 {
139  uint32_t unit_size;
140  uint32_t off;
141  uint32_t cs_sz;
142  m0_bindex_t rep_index;
143  m0_bindex_t ti_cob_index;
144  m0_bindex_t ti_goff_index;
145  struct m0_ivec_cursor rep_cursor;
146  struct m0_ivec_cursor ti_cob_cursor;
147  struct m0_ivec_cursor ti_goff_cursor;
148  struct m0_indexvec *ti_ivec = &ti->ti_ivec;
149  struct m0_indexvec *ti_goff_ivec = &ti->ti_goff_ivec;
150  void *dst;
151  void *src;
152 
153  if (!m0__obj_is_di_enabled(ioo)) {
154  return;
155  }
156  src = buf->b_addr;
157 
158  if (!buf->b_nob) {
159  /* Return as no checksum is present */
160  return;
161  }
162 
164  cs_sz = ioo->ioo_attr.ov_vec.v_count[0];
165 
166  m0_ivec_cursor_init(&rep_cursor, rep_ivec);
167  m0_ivec_cursor_init(&ti_cob_cursor, ti_ivec);
168  m0_ivec_cursor_init(&ti_goff_cursor, ti_goff_ivec);
169 
170  rep_index = m0_ivec_cursor_index(&rep_cursor);
171  ti_cob_index = m0_ivec_cursor_index(&ti_cob_cursor);
172  ti_goff_index = m0_ivec_cursor_index(&ti_goff_cursor);
173 
174  /* Move rep_cursor on unit boundary */
175  off = rep_index % unit_size;
176  if (off) {
177  if (!m0_ivec_cursor_move(&rep_cursor, unit_size - off))
178  rep_index = m0_ivec_cursor_index(&rep_cursor);
179  else
180  return;
181  }
182  off = ti_cob_index % unit_size;
183  if (off != 0) {
184  if (!m0_ivec_cursor_move(&ti_cob_cursor, unit_size - off)) {
185  ti_cob_index = m0_ivec_cursor_index(&ti_cob_cursor);
186  }
187  }
188  off = ti_goff_index % unit_size;
189  if (off != 0) {
190  if (!m0_ivec_cursor_move(&ti_goff_cursor, unit_size - off)) {
191  ti_goff_index = m0_ivec_cursor_index(&ti_goff_cursor);
192  }
193  }
194  M0_ASSERT(ti_cob_index <= rep_index);
195 
208  do {
209  rep_index = m0_ivec_cursor_index(&rep_cursor);
210  while (ti_cob_index != rep_index) {
211  if (m0_ivec_cursor_move(&ti_cob_cursor, unit_size) ||
212  m0_ivec_cursor_move(&ti_goff_cursor, unit_size)) {
213  M0_ASSERT(0);
214  }
215  ti_cob_index = m0_ivec_cursor_index(&ti_cob_cursor);
216  ti_goff_index = m0_ivec_cursor_index(&ti_goff_cursor);
217  }
218 
219  /* GOB offset should be in span of application provided GOB extent */
220  M0_ASSERT(ti_goff_index <=
221  (ioo->ioo_ext.iv_index[ioo->ioo_ext.iv_vec.v_nr-1] +
222  ioo->ioo_ext.iv_vec.v_count[ioo->ioo_ext.iv_vec.v_nr-1]));
223 
225  ti_goff_index,
226  &ioo->ioo_ext,
227  unit_size, cs_sz);
228  M0_ASSERT(dst != NULL);
229  memcpy(dst, src, cs_sz);
230  src = (char *)src + cs_sz;
231 
232  /* Source is m0_buf and we have to copy all the checksum one at a time */
233  M0_ASSERT(src <= (buf->b_addr + buf->b_nob));
234 
235  } while (!m0_ivec_cursor_move(&rep_cursor, unit_size));
236 }
237 
246 static void io_bottom_half(struct m0_sm_group *grp, struct m0_sm_ast *ast)
247 {
248  int rc;
249  uint64_t actual_bytes = 0;
250  struct m0_client *instance;
251  struct m0_op *op;
252  struct m0_op_io *ioo;
253  struct nw_xfer_request *xfer;
254  struct m0_io_fop *iofop;
255  struct ioreq_fop *irfop;
256  struct target_ioreq *tioreq;
257  struct m0_fop *reply_fop = NULL;
258  struct m0_rpc_item *req_item;
259  struct m0_rpc_item *reply_item;
260  struct m0_rpc_bulk *rbulk;
261  struct m0_fop_cob_rw_reply *rw_reply;
262  struct m0_indexvec rep_attr_ivec;
263  struct m0_fop_generic_reply *gen_rep;
264  struct m0_fop_cob_rw *rwfop;
265 
266  M0_ENTRY("sm_group %p sm_ast %p", grp, ast);
267 
268  M0_PRE(grp != NULL);
269  M0_PRE(ast != NULL);
270 
271  irfop = bob_of(ast, struct ioreq_fop, irf_ast, &iofop_bobtype);
272  iofop = &irfop->irf_iofop;
273  tioreq = irfop->irf_tioreq;
274  xfer = tioreq->ti_nwxfer;
275 
276  ioo = bob_of(xfer, struct m0_op_io, ioo_nwxfer, &ioo_bobtype);
277  op = &ioo->ioo_oo.oo_oc.oc_op;
279  M0_PRE(instance != NULL);
280  M0_PRE(M0_IN(irfop->irf_pattr, (PA_DATA, PA_PARITY)));
281  M0_PRE(M0_IN(ioreq_sm_state(ioo),
284  IRS_FAILED)));
285 
286  /* Check errors in rpc items of an IO reqest and its reply. */
287  rbulk = &iofop->if_rbulk;
288  req_item = &iofop->if_fop.f_item;
289  rwfop = io_rw_get(&iofop->if_fop);
290  reply_item = req_item->ri_reply;
291  rc = req_item->ri_error;
292  if (reply_item != NULL) {
293  reply_fop = m0_rpc_item_to_fop(reply_item);
294  rc = rc?: m0_rpc_item_generic_reply_rc(reply_item);
295  }
296  if (rc < 0 || reply_item == NULL) {
297  M0_ASSERT(ergo(reply_item == NULL, rc != 0));
298  M0_LOG(M0_ERROR, "[%p] rpc item %p rc=%d", ioo, req_item, rc);
299  goto ref_dec;
300  }
303 
304  /* Check errors in an IO request's reply. */
305  gen_rep = m0_fop_data(m0_rpc_item_to_fop(reply_item));
306  rw_reply = io_rw_rep_get(reply_fop);
307 
308  /*
309  * Copy attributes to client if reply received from read operation
310  * Skipping attribute_copy() if cksum validation is not allowed.
311  */
312  if (m0_is_read_rep(reply_fop) && op->op_code == M0_OC_READ &&
315  rwfop->crw_ivec.ci_nr, 0,
316  &rep_attr_ivec);
317 
318  application_attribute_copy(&rep_attr_ivec, tioreq, ioo,
319  &rw_reply->rwr_di_data_cksum);
320 
321  m0_indexvec_free(&rep_attr_ivec);
322  }
323  ioo->ioo_sns_state = rw_reply->rwr_repair_done;
324  M0_LOG(M0_DEBUG, "[%p] item %p[%u], reply received = %d, "
325  "sns state = %d", ioo, req_item,
326  req_item->ri_type->rit_opcode, rc, ioo->ioo_sns_state);
327  actual_bytes = rw_reply->rwr_count;
328  rc = gen_rep->gr_rc;
329  rc = rc ?: rw_reply->rwr_rc;
330  irfop->irf_reply_rc = rc;
331 
332  /* Update pending transaction number */
335  &ioo->ioo_obj->ob_entity, op, &rw_reply->rwr_mod_rep.fmr_remid);
336 
337 ref_dec:
338  /* For whatever reason, io didn't complete successfully.
339  * Reduce expected read bulk count */
340  if (rc < 0 && m0_is_read_fop(&iofop->if_fop))
342  m0_rpc_bulk_buf_length(rbulk));
343 
344  /* Propogate the error up as many stashed-rc layers as we can */
345  if (tioreq->ti_rc == 0)
346  tioreq->ti_rc = rc;
347 
348  /*
349  * Note: this is not necessary mean that this is 'real' error in the
350  * case of CROW is used (object is only created when it is first
351  * write)
352  */
353  if (xfer->nxr_rc == 0 && rc != 0) {
354  xfer->nxr_rc = rc;
355 
356 #define LOGMSG(ioo, rc, tireq) "ioo=%p from=%s rc=%d ti_rc=%d @"FID_F,\
357  (ioo), m0_rpc_conn_addr((tioreq)->ti_session->s_conn),\
358  (rc), (tioreq)->ti_rc, FID_P(&(tioreq)->ti_fid)
359 
360  if (rc == -ENOENT) /* normal for CROW */
361  M0_LOG(M0_DEBUG, LOGMSG(ioo, rc, tireq));
362  else
363  M0_LOG(M0_ERROR, LOGMSG(ioo, rc, tireq));
364 #undef LOGMSG
365  }
366 
367  /*
368  * Sining: don't set the ioo_rc utill replies come back from dgmode
369  * IO.
370  */
371  if (ioo->ioo_rc == 0 && ioo->ioo_dgmode_io_sent == true)
372  ioo->ioo_rc = rc;
373 
374  if (irfop->irf_pattr == PA_DATA)
375  tioreq->ti_databytes += rbulk->rb_bytes;
376  else
377  tioreq->ti_parbytes += rbulk->rb_bytes;
378 
379  M0_LOG(M0_INFO, "[%p] fop %p, Returned no of bytes = %llu, "
380  "expected = %llu",
381  ioo, &iofop->if_fop, (unsigned long long)actual_bytes,
382  (unsigned long long)rbulk->rb_bytes);
383 
384  /* Drops reference on reply fop. */
385  m0_fop_put0_lock(&iofop->if_fop);
387  m0_atomic64_dec(&instance->m0c_pending_io_nr);
388 
389  m0_mutex_lock(&xfer->nxr_lock);
391  if (should_ioreq_sm_complete(ioo)) {
392  m0_sm_state_set(&ioo->ioo_sm,
393  (M0_IN(ioreq_sm_state(ioo),
396 
397  /* post an ast to run iosm_handle_executed */
399  m0_sm_ast_post(ioo->ioo_oo.oo_sm_grp, &ioo->ioo_ast);
400  }
401  m0_mutex_unlock(&xfer->nxr_lock);
402 
403  M0_LOG(M0_DEBUG, "[%p] irfop=%p bulk=%p "FID_F
404  " Pending fops = %"PRIu64" bulk=%"PRIu64,
405  ioo, irfop, rbulk, FID_P(&tioreq->ti_fid),
408 
409  M0_LEAVE();
410 }
411 
419 static void io_rpc_item_cb(struct m0_rpc_item *item)
420 {
421  struct m0_fop *fop;
422  struct m0_fop *rep_fop;
423  struct m0_io_fop *iofop;
424  struct ioreq_fop *reqfop;
425  struct m0_op_io *ioo;
426 
427  M0_PRE(item != NULL);
428  M0_ENTRY("rpc_item %p", item);
429 
431  iofop = M0_AMB(iofop, fop, if_fop);
432  reqfop = bob_of(iofop, struct ioreq_fop, irf_iofop, &iofop_bobtype);
433  ioo = bob_of(reqfop->irf_tioreq->ti_nwxfer, struct m0_op_io,
435  /*
436  * NOTE: RPC errors are handled in io_bottom_half(), which is called
437  * by reqfop->irf_ast.
438  */
439 
440  /*
441  * Acquires a reference on IO reply fop since its contents
442  * are needed for policy decisions in io_bottom_half().
443  * io_bottom_half() takes care of releasing the reference.
444  */
445  if (item->ri_reply != NULL) {
448  }
449  M0_LOG(M0_INFO, "ioreq_fop %p, target_ioreq %p io_request %p",
450  reqfop, reqfop->irf_tioreq, ioo);
451 
452  m0_fop_get(&reqfop->irf_iofop.if_fop);
453  m0_sm_ast_post(ioo->ioo_sm.sm_grp, &reqfop->irf_ast);
454 
455  M0_LEAVE();
456 }
457 
459  struct m0_sm_ast *ast)
460 {
461  struct nw_xfer_request *xfer;
462  struct target_ioreq *ti;
463  struct cc_req_fop *cc_fop;
464  struct m0_op *op;
465  struct m0_op_io *ioo;
466  struct m0_fop_cob_op_reply *reply;
467  struct m0_fop *reply_fop = NULL;
468  struct m0_rpc_item *req_item;
469  struct m0_rpc_item *reply_item;
470  struct m0_be_tx_remid *remid = NULL;
471  int rc;
472 
473  ti = (struct target_ioreq *)ast->sa_datum;
474  ioo = bob_of(ti->ti_nwxfer, struct m0_op_io, ioo_nwxfer,
475  &ioo_bobtype);
476  op = &ioo->ioo_oo.oo_oc.oc_op;
477  xfer = ti->ti_nwxfer;
478  cc_fop = &ti->ti_cc_fop;
479  req_item = &cc_fop->crf_fop.f_item;
480  reply_item = req_item->ri_reply;
481  rc = req_item->ri_error;
482  if (reply_item != NULL) {
483  reply_fop = m0_rpc_item_to_fop(reply_item);
484  rc = rc ?: m0_rpc_item_generic_reply_rc(reply_item);
485  }
486  if (rc < 0 || reply_item == NULL) {
487  M0_ASSERT(ergo(reply_item == NULL, rc != 0));
488  goto ref_dec;
489  }
490  reply = m0_fop_data(m0_rpc_item_to_fop(reply_item));
491  /*
492  * Ignoring the case when an attempt is made to create a cob on target
493  * where previous IO had created it.
494  */
495  rc = rc ? M0_IN(reply->cor_rc, (0, -EEXIST)) ? 0 : reply->cor_rc : 0;
496 
497  remid = &reply->cor_common.cor_mod_rep.fmr_remid;
498 
499  /* Update pending transaction number */
502  &ioo->ioo_obj->ob_entity, op, remid);
503  /*
504  * @todo: in case confd is updated, a check is necessary similar to
505  * that present in m0t1fs. See
506  * m0t1fs/linux_kernel/file.c::io_bottom_half().
507  */
508 
509 ref_dec:
510  if (ti->ti_rc == 0 && rc != 0)
511  ti->ti_rc = rc;
512  if (xfer->nxr_rc == 0 && rc != 0)
513  xfer->nxr_rc = rc;
514  if (ioo->ioo_rc == 0 && rc != 0)
515  ioo->ioo_rc = rc;
516  m0_fop_put0_lock(&cc_fop->crf_fop);
517  if (reply_fop != NULL)
519  m0_mutex_lock(&xfer->nxr_lock);
521  if (should_ioreq_sm_complete(ioo)) {
522  if (ioreq_sm_state(ioo) == IRS_TRUNCATE)
524  else
527  m0_sm_ast_post(ioo->ioo_oo.oo_sm_grp, &ioo->ioo_ast);
528  }
529  m0_mutex_unlock(&xfer->nxr_lock);
530 }
531 
533 {
534  struct m0_op_io *ioo;
535  struct cc_req_fop *cc_fop;
536  struct target_ioreq *ti;
537  struct m0_fop *fop;
538  struct m0_fop *rep_fop;
539 
541  cc_fop = M0_AMB(cc_fop, fop, crf_fop);
542  ti = M0_AMB(ti, cc_fop, ti_cc_fop);
543  ioo = bob_of(ti->ti_nwxfer, struct m0_op_io,
544  ioo_nwxfer, &ioo_bobtype);
546  cc_fop->crf_ast.sa_datum = (void *)ti;
547  /* Reference on fop and its reply are released in cc_bottom_half. */
548  m0_fop_get(fop);
549  if (item->ri_reply != NULL) {
552  }
553 
554  m0_sm_ast_post(ioo->ioo_oo.oo_sm_grp, &cc_fop->crf_ast);
555 }
556 
557 /*
558  * io_rpc_item_cb can not be directly invoked from io fops code since it
559  * leads to build dependency of ioservice code over kernel code (kernel client).
560  * Hence, a new m0_rpc_item_ops structure is used for fops dispatched
561  * by client io requests in all cases.
562  */
563 static const struct m0_rpc_item_ops item_ops = {
565 };
566 
567 static const struct m0_rpc_item_ops cc_item_ops = {
569 };
570 
571 static void
573 {
574  struct m0_op_io *ioo;
575 
576  ioo = bob_of(ast, struct m0_op_io, ioo_done_ast, &ioo_bobtype);
577  m0_sm_state_set(&ioo->ioo_sm,
578  (M0_IN(ioreq_sm_state(ioo),
579  (IRS_READING,
583 
585  &ioo->ioo_ast);
586 }
593 static void client_passive_recv(const struct m0_net_buffer_event *evt)
594 {
595  struct m0_rpc_bulk *rbulk;
596  struct m0_rpc_bulk_buf *buf;
597  struct m0_net_buffer *nb;
598  struct m0_io_fop *iofop;
599  struct ioreq_fop *reqfop;
600  struct m0_op_io *ioo;
601  uint32_t req_sm_state;
602 
603  M0_ENTRY();
604 
605  M0_PRE(evt != NULL);
606  M0_PRE(evt->nbe_buffer != NULL);
607 
608  nb = evt->nbe_buffer;
609  buf = (struct m0_rpc_bulk_buf *)nb->nb_app_private;
610  rbulk = buf->bb_rbulk;
611  M0_LOG(M0_DEBUG, "PASSIVE recv, e=%p status=%d, len=%"PRIu64" rbulk=%p",
612  evt, evt->nbe_status, evt->nbe_length, rbulk);
613 
614  iofop = M0_AMB(iofop, rbulk, if_rbulk);
615  reqfop = bob_of(iofop, struct ioreq_fop, irf_iofop, &iofop_bobtype);
616  ioo = bob_of(reqfop->irf_tioreq->ti_nwxfer, struct m0_op_io,
617  ioo_nwxfer, &ioo_bobtype);
618 
619  M0_ASSERT(m0_is_read_fop(&iofop->if_fop));
621  "irfop=%p "FID_F" Pending fops = %"PRIu64"bulk = %"PRIu64,
622  reqfop, FID_P(&reqfop->irf_tioreq->ti_fid),
625 
626  /*
627  * buf will be released in this callback. But rbulk is still valid
628  * after that.
629  */
631  if ((evt->nbe_status != 0) ||
632  (iofop->if_fop.f_item.ri_error != 0))
633  return;
634 
635  /* Set io request's state*/
637 
638  req_sm_state = ioreq_sm_state(ioo);
639  if (req_sm_state != IRS_READ_COMPLETE &&
640  req_sm_state != IRS_WRITE_COMPLETE) {
641  /*
642  * It is possible that io_bottom_half() has already
643  * reduced the nxr_rdbulk_nr to 0 by this time, due to FOP
644  * receiving some error.
645  */
646 
649  if (should_ioreq_sm_complete(ioo)) {
652  }
653  }
655 
656  M0_LEAVE();
657 }
658 
661  .nbc_cb = {
666  }
667 };
668 
672 M0_INTERNAL int ioreq_fop_async_submit(struct m0_io_fop *iofop,
673  struct m0_rpc_session *session)
674 {
675  int rc;
676  struct m0_fop_cob_rw *rwfop;
677  struct m0_rpc_item *item;
678 
679  M0_ENTRY("m0_io_fop %p m0_rpc_session %p", iofop, session);
680 
681  M0_PRE(iofop != NULL);
682  M0_PRE(session != NULL);
683 
684  rwfop = io_rw_get(&iofop->if_fop);
685  M0_ASSERT(rwfop != NULL);
686 
688  rwfop->crw_desc.id_descs,
690  if (rc != 0)
691  goto out;
692 
693  item = &iofop->if_fop.f_item;
697  rc = m0_rpc_post(item);
698  M0_LOG(M0_INFO, "IO fops submitted to rpc, rc = %d", rc);
699 
701  m0_sm_id_get(&item->ri_sm));
702  /*
703  * Ignoring error from m0_rpc_post() so that the subsequent fop
704  * submission goes on. This is to ensure that the ioreq gets into dgmode
705  * subsequently without exiting from the healthy mode IO itself.
706  */
707  return M0_RC(0);
708 
709 out:
710  /*
711  * In case error is encountered either by m0_rpc_bulk_store() or
712  * queued net buffers, if any, will be deleted at io_req_fop_release.
713  */
714  return M0_RC(rc);
715 }
716 
717 /* Finds out pargrp_iomap from array of such structures in m0_op_ioo. */
718 static void ioreq_pgiomap_find(struct m0_op_io *ioo,
719  uint64_t grpid,
720  uint64_t *cursor,
721  struct pargrp_iomap **out)
722 {
723  uint64_t i;
724 
725  M0_PRE(ioo != NULL);
726  M0_PRE(out != NULL);
727  M0_PRE(cursor != NULL);
728  M0_PRE(*cursor < ioo->ioo_iomap_nr);
729  M0_ENTRY("group_id = %3"PRIu64", cursor = %3"PRIu64, grpid, *cursor);
730 
731  for (i = *cursor; i < ioo->ioo_iomap_nr; ++i)
732  if (ioo->ioo_iomaps[i]->pi_grpid == grpid) {
733  *out = ioo->ioo_iomaps[i];
734  *cursor = i;
735  break;
736  }
737 
738  M0_POST(i < ioo->ioo_iomap_nr);
739  M0_LEAVE();
740 }
741 
745 M0_INTERNAL int ioreq_fop_dgmode_read(struct ioreq_fop *irfop)
746 {
747  int rc;
748  uint32_t cnt;
749  uint32_t seg;
750  uint32_t seg_nr;
751  uint64_t grpid;
752  uint64_t pgcur = 0;
754  struct m0_op_io *ioo;
755  struct m0_rpc_bulk *rbulk;
756  struct pargrp_iomap *map = NULL;
757  struct m0_rpc_bulk_buf *rbuf;
758 
759  M0_PRE(irfop != NULL);
760  M0_ENTRY("target fid = "FID_F, FID_P(&irfop->irf_tioreq->ti_fid));
761 
762  ioo = bob_of(irfop->irf_tioreq->ti_nwxfer, struct m0_op_io,
763  ioo_nwxfer, &ioo_bobtype);
764  rbulk = &irfop->irf_iofop.if_rbulk;
765 
766  m0_tl_for (rpcbulk, &rbulk->rb_buflist, rbuf) {
767 
768  index = rbuf->bb_zerovec.z_index;
770 
771  for (seg = 0; seg < seg_nr; ) {
772 
773  grpid = pargrp_id_find(index[seg], ioo, irfop);
774  for (cnt = 1, ++seg; seg < seg_nr; ++seg) {
775 
776  M0_ASSERT(ergo(seg > 0, index[seg] >
777  index[seg - 1]));
779  (void *)index[seg]));
780 
781  if (grpid ==
782  pargrp_id_find(index[seg], ioo, irfop))
783  ++cnt;
784  else
785  break;
786  }
787 
788  ioreq_pgiomap_find(ioo, grpid, &pgcur, &map);
789  M0_ASSERT(map != NULL);
790  rc = map->pi_ops->pi_dgmode_process(map,
791  irfop->irf_tioreq, &index[seg - cnt],
792  cnt);
793  if (rc != 0)
794  return M0_ERR(rc);
795  }
796  } m0_tl_endfor;
797  return M0_RC(0);
798 }
799 
800 static void ioreq_cc_fop_release(struct m0_ref *ref)
801 {
802  struct m0_fop *fop = M0_AMB(fop, ref, f_ref);
803 
804  M0_ENTRY("fop: %p %s", fop, m0_fop_name(fop));
805  m0_fop_fini(fop);
806  /* no need to free the memory, because it is embedded into ti */
807  M0_LEAVE();
808 }
809 
810 M0_INTERNAL int ioreq_cc_fop_init(struct target_ioreq *ti)
811 {
812  struct m0_fop *fop;
813  struct m0_fop_cob_common *common;
814  struct m0_op_io *ioo;
815  struct m0_obj_attr *io_attr;
816  int rc;
817  struct m0_fop_type *fopt;
818  struct m0_rpc_item *item;
819 
820  fopt = ti->ti_req_type == TI_COB_TRUNCATE ?
822  if (ti->ti_req_type == TI_COB_TRUNCATE &&
823  ti->ti_trunc_ivec.iv_vec.v_nr == 0)
824  return 0;
825  fop = &ti->ti_cc_fop.crf_fop;
826  M0_LOG(M0_DEBUG, "fop=%p", fop);
829  if (rc != 0) {
830  m0_fop_fini(fop);
831  goto out;
832  }
833  ti->ti_cc_fop_inited = true;
835 
840  ioo = bob_of(ti->ti_nwxfer, struct m0_op_io, ioo_nwxfer,
841  &ioo_bobtype);
842  common = m0_cobfop_common_get(fop);
843  common->c_gobfid = ioo->ioo_oo.oo_fid;
844  common->c_cobfid = ti->ti_fid;
845  common->c_pver = ioo->ioo_pver;
846  common->c_cob_type = M0_COB_IO;
847  common->c_cob_idx = m0_fid_cob_device_id(&ti->ti_fid);
848  if (ti->ti_req_type == TI_COB_CREATE) {
849  common->c_flags |= M0_IO_FLAG_CROW;
850  common->c_body.b_pver = ioo->ioo_pver;
851  common->c_body.b_nlink = 1;
852  common->c_body.b_valid |= M0_COB_PVER;
853  common->c_body.b_valid |= M0_COB_NLINK;
854  common->c_body.b_valid |= M0_COB_LID;
855  io_attr = m0_io_attr(ioo);
856  common->c_body.b_lid = io_attr->oa_layout_id;
857  } else if (ti->ti_req_type == TI_COB_TRUNCATE) {
858  struct m0_fop_cob_truncate *trunc = m0_fop_data(fop);
859  uint32_t diff;
860 
863  ti->ti_trunc_ivec.iv_vec.v_nr, 0, &trunc->ct_io_ivec);
864  if (rc != 0)
865  goto out;
866 
867  trunc->ct_size = m0_io_count(&trunc->ct_io_ivec);
868  M0_LOG(M0_DEBUG, "trunc count%"PRIu64" diff:%d\n",
869  trunc->ct_size, diff);
870  }
872 
873  item = &fop->f_item;
874  M0_LOG(M0_DEBUG, "item="ITEM_FMT" osr_xid=%"PRIu64,
876 out:
877  return M0_RC(rc);
878 }
879 
886 static void ioreq_fop_release(struct m0_ref *ref)
887 {
888  struct m0_fop *fop;
889  struct m0_io_fop *iofop;
890  struct ioreq_fop *reqfop;
891  struct m0_fop_cob_rw *rwfop;
892  struct m0_rpc_bulk *rbulk;
893  struct nw_xfer_request *xfer;
894  struct m0_rpc_machine *rmach;
895  struct m0_rpc_item *item;
896 
897  M0_ENTRY("ref %p", ref);
898  M0_PRE(ref != NULL);
899 
900  fop = M0_AMB(fop, ref, f_ref);
901  rmach = m0_fop_rpc_machine(fop);
902  iofop = M0_AMB(iofop, fop, if_fop);
903  reqfop = bob_of(iofop, struct ioreq_fop, irf_iofop, &iofop_bobtype);
904  rbulk = &iofop->if_rbulk;
905  xfer = reqfop->irf_tioreq->ti_nwxfer;
906  item = &fop->f_item;
907 
908  /*
909  * Release the net buffers if rpc bulk object is still dirty.
910  * And wait on channel till all net buffers are deleted from
911  * transfer machine.
912  */
913  m0_mutex_lock(&xfer->nxr_lock);
914  m0_mutex_lock(&rbulk->rb_mutex);
915  if (!m0_tlist_is_empty(&rpcbulk_tl, &rbulk->rb_buflist)) {
916  struct m0_clink clink;
917  size_t buf_nr;
918  size_t non_queued_buf_nr;
919 
921  m0_clink_add(&rbulk->rb_chan, &clink);
922  buf_nr = rpcbulk_tlist_length(&rbulk->rb_buflist);
923  non_queued_buf_nr = m0_rpc_bulk_store_del_unqueued(rbulk);
924  m0_mutex_unlock(&rbulk->rb_mutex);
925 
926  m0_rpc_bulk_store_del(rbulk);
927  M0_LOG(M0_DEBUG, "fop %p, %p[%u], bulk %p, buf_nr %llu, "
928  "non_queued_buf_nr %llu", &iofop->if_fop, item,
929  item->ri_type->rit_opcode, rbulk,
930  (unsigned long long)buf_nr,
931  (unsigned long long)non_queued_buf_nr);
932 
933  if (m0_is_read_fop(&iofop->if_fop))
935  non_queued_buf_nr);
937  /* rio_replied() is not invoked for this item. */
939  m0_mutex_unlock(&xfer->nxr_lock);
940 
941  /*
942  * If there were some queued net bufs which had to be deleted,
943  * then it is required to wait for their callbacks.
944  */
945  if (buf_nr > non_queued_buf_nr) {
946  /*
947  * rpc_machine_lock may be needed from nlx_tm_ev_worker
948  * thread, which is going to wake us up. So we should
949  * release it to avoid deadlock.
950  */
951  m0_rpc_machine_unlock(rmach);
953  m0_rpc_machine_lock(rmach);
954  }
957  } else {
958  m0_mutex_unlock(&rbulk->rb_mutex);
959  m0_mutex_unlock(&xfer->nxr_lock);
960  }
962 
963  rwfop = io_rw_get(&iofop->if_fop);
964  M0_ASSERT(rwfop != NULL);
965  ioreq_fop_fini(reqfop);
966  /* see ioreq_fop_fini(). */
967  ioreq_fop_bob_fini(reqfop);
968  m0_io_fop_fini(iofop);
969  m0_free(reqfop);
970 
971  M0_LEAVE();
972 }
973 
977 M0_INTERNAL int ioreq_fop_init(struct ioreq_fop *fop,
978  struct target_ioreq *ti,
979  enum page_attr pattr)
980 {
981  int rc;
982  struct m0_fop_type *fop_type;
983  struct m0_op_io *ioo;
984  struct m0_fop_cob_rw *rwfop;
985 
986  M0_ENTRY("ioreq_fop %p, target_ioreq %p", fop, ti);
987 
988  M0_PRE(fop != NULL);
989  M0_PRE(ti != NULL);
990  M0_PRE(M0_IN(pattr, (PA_DATA, PA_PARITY)));
991 
992  ioo = bob_of(ti->ti_nwxfer, struct m0_op_io, ioo_nwxfer,
993  &ioo_bobtype);
994  M0_ASSERT(M0_IN(ioreq_sm_state(ioo),
997 
998  ioreq_fop_bob_init(fop);
999  iofops_tlink_init(fop);
1000  fop->irf_pattr = pattr;
1001  fop->irf_tioreq = ti;
1002  fop->irf_reply_rc = 0;
1003  fop->irf_ast.sa_cb = io_bottom_half;
1004  fop->irf_ast.sa_mach = &ioo->ioo_sm;
1005 
1006  fop_type = M0_IN(ioreq_sm_state(ioo),
1009  rc = m0_io_fop_init(&fop->irf_iofop, &ioo->ioo_oo.oo_fid,
1011  if (rc == 0) {
1012  /*
1013  * Currently m0_io_fop_init sets CROW flag for a READ op.
1014  * Diable the flag to force ioservice to return -ENOENT for
1015  * non-existing objects. (Temporary solution)
1016  */
1017  rwfop = io_rw_get(&fop->irf_iofop.if_fop);
1018  if (ioo->ioo_oo.oo_oc.oc_op.op_code == M0_OC_READ) {
1019  rwfop->crw_flags &= ~M0_IO_FLAG_CROW;
1020  }
1021 
1022  /*
1023  * Changes ri_ops of rpc item so as to execute client's own
1024  * callback on receiving a reply.
1025  */
1026  fop->irf_iofop.if_fop.f_item.ri_ops = &item_ops;
1027  }
1028 
1030  return M0_RC(rc);
1031 }
1032 
1036 M0_INTERNAL void ioreq_fop_fini(struct ioreq_fop *fop)
1037 {
1038  M0_ENTRY("ioreq_fop %p", fop);
1039 
1041 
1042  /*
1043  * IO fop is finalized (m0_io_fop_fini()) through rpc sessions code
1044  * using m0_rpc_item::m0_rpc_item_ops::rio_free().
1045  * see m0_io_item_free().
1046  */
1047 
1048  iofops_tlink_fini(fop);
1049 
1050  /*
1051  * ioreq_bob_fini() is not done here so that struct ioreq_fop
1052  * can be retrieved from struct m0_rpc_item using bob_of() and
1053  * magic numbers can be checked.
1054  */
1055 
1056  fop->irf_tioreq = NULL;
1057  fop->irf_ast.sa_cb = NULL;
1058  fop->irf_ast.sa_mach = NULL;
1059 
1060  M0_LEAVE();
1061 }
1062 
1063 #undef M0_TRACE_SUBSYSTEM
1064 
1065 /*
1066  * Local variables:
1067  * c-indentation-style: "K&R"
1068 
1069  * c-basic-offset: 8
1070  * tab-width: 8
1071  * fill-column: 80
1072  * scroll-step: 1
1073  * End:
1074  */
1075 /*
1076  * vim: tabstop=8 shiftwidth=8 noexpandtab textwidth=80 nowrap
1077  */
struct m0_file ioo_flock
static void m0_atomic64_inc(struct m0_atomic64 *a)
uint32_t b_nlink
Definition: md_fops.h:81
M0_INTERNAL void m0_ivec_cursor_init(struct m0_ivec_cursor *cur, const struct m0_indexvec *ivec)
Definition: vec.c:707
M0_INTERNAL int m0_rpc_post(struct m0_rpc_item *item)
Definition: rpc.c:63
uint32_t rit_opcode
Definition: item.h:474
M0_INTERNAL void m0_chan_wait(struct m0_clink *link)
Definition: chan.c:336
uint64_t c_flags
Definition: io_fops.h:477
m0_time_t ri_resend_interval
Definition: item.h:144
uint64_t rwr_count
Definition: io_fops.h:324
static void io_bottom_half(struct m0_sm_group *grp, struct m0_sm_ast *ast)
Definition: io_req_fop.c:246
#define M0_PRE(cond)
static void application_attribute_copy(struct m0_indexvec *rep_ivec, struct target_ioreq *ti, struct m0_op_io *ioo, struct m0_buf *buf)
Definition: io_req_fop.c:134
M0_INTERNAL void m0_mutex_unlock(struct m0_mutex *mutex)
Definition: mutex.c:66
M0_INTERNAL struct m0_fop_cob_common * m0_cobfop_common_get(struct m0_fop *fop)
Definition: io_fops.c:990
M0_INTERNAL int ioreq_fop_dgmode_read(struct ioreq_fop *irfop)
Definition: io_req_fop.c:745
struct m0_fop crf_fop
static uint32_t seg_nr
Definition: net.c:119
static int(* diff[M0_PARITY_CAL_ALGO_NR])(struct m0_parity_math *math, struct m0_buf *old, struct m0_buf *new, struct m0_buf *parity, uint32_t index)
Definition: parity_math.c:290
uint32_t b_valid
Definition: md_fops.h:76
#define NULL
Definition: misc.h:38
M0_INTERNAL void m0_clink_init(struct m0_clink *link, m0_chan_cb_t cb)
Definition: chan.c:201
static struct m0_bufvec dst
Definition: xform.c:61
map
Definition: processor.c:112
struct m0_atomic64 nxr_rdbulk_nr
M0_INTERNAL bool m0__obj_is_di_enabled(struct m0_op_io *ioo)
Definition: io.c:660
M0_INTERNAL int m0_rpc_bulk_store(struct m0_rpc_bulk *rbulk, const struct m0_rpc_conn *conn, struct m0_net_buf_desc_data *to_desc, const struct m0_net_buffer_callbacks *bulk_cb)
Definition: bulk.c:520
M0_INTERNAL void m0_clink_del_lock(struct m0_clink *link)
Definition: chan.c:293
m0_bindex_t * z_index
Definition: vec.h:516
#define ergo(a, b)
Definition: misc.h:293
uint32_t rwr_repair_done
Definition: io_fops.h:333
void(* sa_cb)(struct m0_sm_group *grp, struct m0_sm_ast *)
Definition: sm.h:506
bool m0_rpc_item_is_generic_reply_fop(const struct m0_rpc_item *item)
Definition: fom_generic.c:75
M0_TL_DESCR_DEFINE(iofops, "List of IO fops", M0_INTERNAL, struct ioreq_fop, irf_link, irf_magic, M0_IOFOP_MAGIC, M0_TIOREQ_MAGIC)
M0_INTERNAL bool m0__is_oostore(struct m0_client *instance)
Definition: client.c:255
static struct m0_sm_group * grp
Definition: bytecount.c:38
M0_INTERNAL void m0_fop_init(struct m0_fop *fop, struct m0_fop_type *fopt, void *data, void(*fop_release)(struct m0_ref *))
Definition: fop.c:79
#define M0_LOG(level,...)
Definition: trace.h:167
M0_LEAVE()
const struct m0_op_io_ops * ioo_ops
struct m0_sm_ast crf_ast
struct m0_io_fop irf_iofop
Definition: pg.h:866
M0_INTERNAL void m0_sm_ast_post(struct m0_sm_group *grp, struct m0_sm_ast *ast)
Definition: sm.c:135
static void m0_atomic64_sub(struct m0_atomic64 *a, int64_t num)
struct m0_sm_group * oo_sm_grp
struct m0_vec ov_vec
Definition: vec.h:147
struct m0_chan rb_chan
Definition: bulk.h:258
static const struct m0_rpc_item_ops cc_item_ops
Definition: io_req_fop.c:567
struct m0_rpc_bulk if_rbulk
Definition: io_fops.h:177
struct m0_sm ri_sm
Definition: item.h:181
static void m0_sm_io_done_ast(struct m0_sm_group *grp, struct m0_sm_ast *ast)
Definition: io_req_fop.c:572
enum target_ioreq_type ti_req_type
struct m0_op oc_op
int32_t ri_error
Definition: item.h:161
struct m0_net_buf_desc_data * id_descs
Definition: io_fops.h:313
void * m0_fop_data(const struct m0_fop *fop)
Definition: fop.c:220
uint32_t c_cob_type
Definition: io_fops.h:474
M0_INTERNAL void m0_indexvec_free(struct m0_indexvec *ivec)
Definition: vec.c:553
struct m0_indexvec ti_trunc_ivec
Definition: pg.h:799
uint64_t m0_bindex_t
Definition: types.h:80
struct m0_fid c_cobfid
Definition: io_fops.h:465
Definition: sm.h:504
static int void * buf
Definition: dir.c:1019
static struct m0_rpc_session session
Definition: formation2.c:38
M0_INTERNAL void m0_mutex_lock(struct m0_mutex *mutex)
Definition: mutex.c:49
uint32_t ci_nr
Definition: vec.h:635
M0_ADDB2_ADD(M0_AVI_FS_CREATE, new_fid.f_container, new_fid.f_key, mode, rc)
m0_bcount_t nbe_length
Definition: net.h:1226
struct m0_net_buffer * nbe_buffer
Definition: net.h:1194
M0_INTERNAL bool m0_is_read_rep(const struct m0_fop *fop)
Definition: io_fops.c:933
static struct m0_rpc_item * item
Definition: item.c:56
M0_INTERNAL uint64_t m0__obj_lid(struct m0_obj *obj)
Definition: obj.c:126
Definition: sock.c:887
#define ITEM_ARG(item)
Definition: item.h:618
M0_INTERNAL bool m0_tlist_is_empty(const struct m0_tl_descr *d, const struct m0_tl *list)
Definition: tlist.c:96
struct m0_sm ioo_sm
#define m0_tl_endfor
Definition: tlist.h:700
struct m0_vec iv_vec
Definition: vec.h:139
return M0_RC(rc)
op
Definition: libdemo.c:64
unsigned int op_code
Definition: client.h:650
static uint32_t unit_size
Definition: layout.c:53
#define M0_ENTRY(...)
Definition: trace.h:170
Definition: buf.h:37
static struct m0_sm_ast ast[NR]
Definition: locality.c:44
uint64_t osr_xid
Definition: onwire.h:105
void(* iro_iosm_handle_executed)(struct m0_sm_group *grp, struct m0_sm_ast *ast)
Definition: pg.h:622
m0_bindex_t * iv_index
Definition: vec.h:141
int32_t m0_rpc_item_generic_reply_rc(const struct m0_rpc_item *reply)
Definition: fom_generic.c:81
int m0_obj_layout_id_to_unit_size(uint64_t layout_id)
Definition: obj.c:851
void m0_fop_put0_lock(struct m0_fop *fop)
Definition: fop.c:213
int i
Definition: dir.c:1033
bool ioo_dgmode_io_sent
const struct m0_bob_type ioo_bobtype
Definition: io_req.c:153
#define PRIu64
Definition: types.h:58
static void ioreq_fop_release(struct m0_ref *ref)
Definition: io_req_fop.c:886
Definition: client.h:641
int32_t nbe_status
Definition: net.h:1218
M0_INTERNAL bool ioreq_fop_invariant(const struct ioreq_fop *fop)
Definition: io_req_fop.c:62
struct nw_xfer_request ioo_nwxfer
struct m0_rpc_machine * m0_fop_rpc_machine(const struct m0_fop *fop)
Definition: fop.c:360
uint64_t ti_parbytes
return M0_ERR(-EOPNOTSUPP)
M0_INTERNAL const char * m0_fop_name(const struct m0_fop *fop)
Definition: fop.c:55
struct m0_op_obj ioo_oo
void * sa_datum
Definition: sm.h:508
M0_INTERNAL void m0_rpc_machine_unlock(struct m0_rpc_machine *machine)
Definition: rpc_machine.c:558
M0_INTERNAL struct m0_fop_cob_rw_reply * io_rw_rep_get(struct m0_fop *fop)
Definition: io_fops.c:1056
struct m0_fop if_fop
Definition: io_fops.h:174
M0_INTERNAL void m0_rpc_bulk_default_cb(const struct m0_net_buffer_event *evt)
Definition: bulk.c:140
Definition: trace.h:482
M0_INTERNAL struct m0_client * m0__op_instance(const struct m0_op *op)
Definition: client.c:236
Definition: cnt.h:36
const struct m0_net_buffer_callbacks client__buf_bulk_cb
Definition: io_req_fop.c:660
enum sns_repair_state ioo_sns_state
struct m0_indexvec ioo_ext
#define M0_AMB(obj, ptr, field)
Definition: misc.h:320
Definition: refs.h:34
int irf_reply_rc
Definition: pg.h:863
struct m0_io_descs crw_desc
Definition: io_fops.h:400
#define M0_ASSERT(cond)
struct m0_fid ioo_pver
struct m0_rpc_item_header2 ri_header
Definition: item.h:193
void m0_sm_state_set(struct m0_sm *mach, int state)
Definition: sm.c:478
struct m0_rpc_machine * m0_fop_session_machine(const struct m0_rpc_session *s)
Definition: fop.c:453
uint32_t c_cob_idx
Definition: io_fops.h:471
M0_INTERNAL bool m0_is_io_fop_rep(const struct m0_fop *fop)
Definition: io_fops.c:945
static struct m0_fop reply_fop
Definition: fsync.c:64
#define bob_of(ptr, type, field, bt)
Definition: bob.h:140
static void m0_atomic64_dec(struct m0_atomic64 *a)
struct m0_atomic64 nxr_ccfop_nr
struct m0_sm_ast ioo_done_ast
M0_INTERNAL int m0_indexvec_wire2mem(struct m0_io_indexvec *wire_ivec, int max_frags_nr, uint32_t bshift, struct m0_indexvec *mem_ivec)
Definition: vec.c:1058
#define ITEM_FMT
Definition: item.h:617
uint64_t pi_grpid
void * nb_app_private
Definition: net.h:1477
struct m0_obj * ioo_obj
M0_INTERNAL struct m0_obj_attr * m0_io_attr(struct m0_op_io *ioo)
Definition: utils.c:302
uint64_t b_lid
Definition: md_fops.h:90
struct m0_fop * m0_fop_get(struct m0_fop *fop)
Definition: fop.c:162
const struct m0_rpc_item_type * ri_type
Definition: item.h:200
struct m0_fid c_gobfid
Definition: io_fops.h:460
struct m0_rpc_item * ri_reply
Definition: item.h:163
struct m0_fop_mod_rep rwr_mod_rep
Definition: io_fops.h:339
struct m0_sm_group * sm_grp
Definition: sm.h:321
M0_INTERNAL uint32_t m0_fid_cob_device_id(const struct m0_fid *cob_fid)
Definition: fid_convert.c:81
struct m0_buf rwr_di_data_cksum
Definition: io_fops.h:342
struct m0_fid b_pver
Definition: md_fops.h:93
uint64_t ri_nr_sent_max
Definition: item.h:146
#define M0_POST(cond)
struct m0_0vec bb_zerovec
Definition: bulk.h:179
struct m0_fid oo_fid
struct m0_sm_ast ioo_ast
uint32_t v_nr
Definition: vec.h:51
struct m0_sm_ast irf_ast
Definition: pg.h:872
m0_net_buffer_cb_proc_t nbc_cb[M0_NET_QT_NR]
Definition: net.h:1272
M0_INTERNAL int ioreq_fop_async_submit(struct m0_io_fop *iofop, struct m0_rpc_session *session)
Definition: io_req_fop.c:672
M0_INTERNAL int m0_fop_data_alloc(struct m0_fop *fop)
Definition: fop.c:71
m0_bcount_t * v_count
Definition: vec.h:53
M0_INTERNAL void m0_fop_fini(struct m0_fop *fop)
Definition: fop.c:136
struct m0_rpc_session * ti_session
static struct m0_clink clink[RDWR_REQUEST_MAX]
M0_INTERNAL bool m0_ivec_cursor_move(struct m0_ivec_cursor *cur, m0_bcount_t count)
Definition: vec.c:718
struct m0_op_common oo_oc
static void ioreq_cc_bottom_half(struct m0_sm_group *grp, struct m0_sm_ast *ast)
Definition: io_req_fop.c:458
struct m0_io_indexvec ct_io_ivec
Definition: io_fops.h:509
#define FID_P(f)
Definition: fid.h:77
M0_INTERNAL bool addr_is_network_aligned(void *addr)
Definition: utils.c:29
uint64_t rb_id
Definition: bulk.h:267
M0_INTERNAL struct m0_op * m0__ioo_to_op(struct m0_op_io *ioo)
Definition: client.c:249
struct m0_bob_type iofop_bobtype
Definition: io_req_fop.c:47
static void ioreq_cc_rpc_item_cb(struct m0_rpc_item *item)
Definition: io_req_fop.c:532
struct m0_bufvec z_bvec
Definition: vec.h:514
static int64_t m0_atomic64_get(const struct m0_atomic64 *a)
void(* rio_replied)(struct m0_rpc_item *item)
Definition: item.h:300
int32_t ioo_rc
M0_INTERNAL uint32_t m0_indexvec_pack(struct m0_indexvec *iv)
Definition: vec.c:521
struct m0_fop_type m0_fop_cob_readv_fopt
Definition: io_fops.c:71
M0_INTERNAL size_t m0_rpc_bulk_buf_length(struct m0_rpc_bulk *rbulk)
Definition: bulk.c:550
uint64_t ti_databytes
M0_INTERNAL size_t m0_rpc_bulk_store_del_unqueued(struct m0_rpc_bulk *rbulk)
Definition: bulk.c:190
M0_INTERNAL int ioreq_cc_fop_init(struct target_ioreq *ti)
Definition: io_req_fop.c:810
struct target_ioreq * irf_tioreq
Definition: pg.h:881
struct m0_ref f_ref
Definition: fop.h:80
M0_INTERNAL int m0_indexvec_mem2wire(struct m0_indexvec *mem_ivec, int max_frags_nr, uint32_t bshift, struct m0_io_indexvec *wire_ivec)
Definition: vec.c:1087
M0_INTERNAL void m0_rpc_machine_lock(struct m0_rpc_machine *machine)
Definition: rpc_machine.c:551
struct m0_fid ti_fid
static void ioreq_pgiomap_find(struct m0_op_io *ioo, uint64_t grpid, uint64_t *cursor, struct pargrp_iomap **out)
Definition: io_req_fop.c:718
struct cc_req_fop ti_cc_fop
M0_INTERNAL void m0_clink_add(struct m0_chan *chan, struct m0_clink *link)
Definition: chan.c:228
const struct m0_rpc_item_ops * ri_ops
Definition: item.h:149
M0_INTERNAL bool m0__obj_is_cksum_validation_allowed(struct m0_op_io *ioo)
Definition: io.c:665
struct m0_mutex nxr_lock
struct m0_rpc_session * ri_session
Definition: item.h:147
M0_INTERNAL m0_bcount_t m0_io_count(const struct m0_io_indexvec *io_info)
Definition: vec.c:999
struct m0_fop_type m0_fop_cob_create_fopt
Definition: io_fops.c:75
struct m0_entity ob_entity
Definition: client.h:789
page_attr
M0_INTERNAL int ioreq_fop_init(struct ioreq_fop *fop, struct target_ioreq *ti, enum page_attr pattr)
Definition: io_req_fop.c:977
#define _0C(exp)
Definition: assert.h:311
M0_INTERNAL m0_bindex_t m0_ivec_cursor_index(const struct m0_ivec_cursor *cur)
Definition: vec.c:733
M0_INTERNAL void m0_clink_fini(struct m0_clink *link)
Definition: chan.c:208
m0_bcount_t rb_bytes
Definition: bulk.h:260
M0_INTERNAL bool m0_rpc_bulk_is_empty(struct m0_rpc_bulk *rbulk)
Definition: bulk.c:539
struct m0_atomic64 nxr_iofop_nr
static struct m0_fop * fop
Definition: item.c:57
M0_TL_DEFINE(iofops, M0_INTERNAL, struct ioreq_fop)
static struct m0 instance
Definition: main.c:78
struct m0_fop * m0_rpc_item_to_fop(const struct m0_rpc_item *item)
Definition: fop.c:346
static struct m0_be_seg * seg
Definition: btree.c:40
uint64_t ioo_iomap_nr
static uint32_t ioreq_sm_state(const struct io_request *req)
Definition: file.c:975
struct m0_fid c_pver
Definition: io_fops.h:468
struct m0_tl rb_buflist
Definition: bulk.h:256
M0_INTERNAL void m0_io_fop_fini(struct m0_io_fop *iofop)
Definition: io_fops.c:897
M0_INTERNAL struct m0_file * m0_client_fop_to_file(struct m0_fop *fop)
Definition: io_req_fop.c:88
M0_INTERNAL int m0_io_fop_init(struct m0_io_fop *iofop, const struct m0_fid *gfid, struct m0_fop_type *ftype, void(*fop_release)(struct m0_ref *))
Definition: io_fops.c:865
struct nw_xfer_request * ti_nwxfer
uint64_t ct_size
Definition: io_fops.h:507
#define out(...)
Definition: gen.c:41
M0_INTERNAL void m0_rpc_bulk_store_del(struct m0_rpc_bulk *rbulk)
Definition: bulk.c:215
Definition: file.h:81
uint64_t oa_layout_id
Definition: client.h:752
M0_INTERNAL bool m0_is_read_fop(const struct m0_fop *fop)
Definition: io_fops.c:916
static uint64_t pargrp_id_find(m0_bindex_t index, const struct io_request *req, const struct io_req_fop *ir_fop)
Definition: file.c:638
M0_INTERNAL struct m0_fop_cob_rw * io_rw_get(struct m0_fop *fop)
Definition: io_fops.c:1037
Definition: pg.h:859
M0_INTERNAL void ioreq_fop_fini(struct ioreq_fop *fop)
Definition: io_req_fop.c:1036
struct m0_fop_type m0_fop_cob_truncate_fopt
Definition: io_fops.c:77
struct m0_indexvec ti_goff_ivec
Definition: pg.h:820
struct m0_rpc_machine * ri_rmachine
Definition: item.h:160
static struct m0_dtm_oper_descr reply
Definition: transmit.c:94
struct m0_fop_type m0_fop_cob_writev_fopt
Definition: io_fops.c:72
#define LOGMSG(ioo, rc, tireq)
M0_INTERNAL uint64_t m0_sm_id_get(const struct m0_sm *sm)
Definition: sm.c:1021
void sync_record_update(struct m0_reqh_service_ctx *service, struct m0_entity *ent, struct m0_op *op, struct m0_be_tx_remid *btr)
Definition: sync.c:788
#define m0_tl_for(name, head, obj)
Definition: tlist.h:695
enum page_attr irf_pattr
Definition: pg.h:869
void m0_free(void *data)
Definition: memory.c:146
static const struct m0_rpc_item_ops item_ops
Definition: io_req_fop.c:563
struct m0_rpc_item f_item
Definition: fop.h:83
struct m0_fop_cob c_body
Definition: io_fops.h:456
uint32_t sm_state
Definition: sm.h:307
struct m0_bufvec ioo_attr
struct m0_io_indexvec crw_ivec
Definition: io_fops.h:411
static void io_rpc_item_cb(struct m0_rpc_item *item)
Definition: io_req_fop.c:419
struct m0_pdclust_src_addr src
Definition: fd.c:108
int32_t rc
Definition: trigger_fop.h:47
struct m0_indexvec ti_ivec
Definition: pg.h:793
struct m0_rpc_conn * s_conn
Definition: session.h:312
struct m0_be_tx_remid fmr_remid
Definition: fom_generic.h:243
fop_type
Definition: stats_ut_svc.c:51
Definition: fop.h:79
struct m0_mutex rb_mutex
Definition: bulk.h:251
struct pargrp_iomap ** ioo_iomaps
uint64_t crw_flags
Definition: io_fops.h:413
#define FID_F
Definition: fid.h:75
static bool should_ioreq_sm_complete(struct m0_op_io *ioo)
Definition: io_req_fop.c:71
struct m0_fop * rep_fop
Definition: dir.c:334
M0_INTERNAL void * m0_extent_vec_get_checksum_addr(void *cksum_buf_vec, m0_bindex_t off, void *ivec, m0_bindex_t unit_sz, m0_bcount_t cs_sz)
Definition: cksum_utils.c:107
static void ioreq_cc_fop_release(struct m0_ref *ref)
Definition: io_req_fop.c:800
static void client_passive_recv(const struct m0_net_buffer_event *evt)
Definition: io_req_fop.c:593
M0_INTERNAL struct m0_reqh_service_ctx * m0_reqh_service_ctx_from_session(struct m0_rpc_session *session)
M0_BOB_DEFINE(M0_INTERNAL, &iofop_bobtype, ioreq_fop)