Motr  M0
io_nw_xfer.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 
23 #include "motr/client.h"
24 #include "motr/client_internal.h"
25 #include "motr/addb.h"
26 #include "motr/pg.h"
27 #include "motr/io.h"
28 
29 #include "lib/memory.h" /* m0_alloc, m0_free */
30 #include "lib/errno.h" /* ENOMEM */
31 #include "lib/finject.h" /* M0_FI_ */
32 #include "lib/cksum_utils.h"
33 #include "fid/fid.h" /* m0_fid */
34 #include "rpc/rpclib.h" /* m0_rpc_ */
35 #include "lib/ext.h" /* struct m0_ext */
36 #include "lib/misc.h" /* m0_extent_vec_get_checksum_addr */
37 #include "fop/fom_generic.h" /* m0_rpc_item_generic_reply_rc */
38 #include "sns/parity_repair.h" /* m0_sns_repair_spare_map*/
39 #include "fd/fd.h" /* m0_fd_fwd_map m0_fd_bwd_map */
40 #include "motr/addb.h"
41 #include "rpc/item.h"
42 #include "rpc/rpc_internal.h"
43 
44 #define M0_TRACE_SUBSYSTEM M0_TRACE_SUBSYS_CLIENT
45 #include "lib/trace.h" /* M0_LOG */
46 
50 
54 
56 const struct m0_bob_type nwxfer_bobtype = {
57  .bt_name = "nw_xfer_request_bobtype",
58  .bt_magix_offset = offsetof(struct nw_xfer_request, nxr_magic),
59  .bt_magix = M0_NWREQ_MAGIC,
60  .bt_check = NULL,
61 };
62 
63 const struct m0_bob_type tioreq_bobtype = {
64  .bt_name = "target_ioreq",
65  .bt_magix_offset = offsetof(struct target_ioreq, ti_magic),
66  .bt_magix = M0_TIOREQ_MAGIC,
67  .bt_check = NULL,
68 };
69 
70 static void to_op_io_map(const struct m0_op *op,
71  struct m0_op_io *ioo)
72 {
73  uint64_t oid = m0_sm_id_get(&op->op_sm);
74  uint64_t ioid = m0_sm_id_get(&ioo->ioo_sm);
75 
76  if (ioo->ioo_addb2_mapped++ == 0)
78 }
79 
80 static void m0_op_io_to_rpc_map(const struct m0_op_io *ioo,
81  const struct m0_rpc_item *item)
82 {
83  uint64_t rid = m0_sm_id_get(&item->ri_sm);
84  uint64_t ioid = m0_sm_id_get(&ioo->ioo_sm);
85  M0_ADDB2_ADD(M0_AVI_IOO_TO_RPC, ioid, rid);
86 }
87 
98 static uint32_t io_di_size(struct m0_op_io *ioo)
99 {
100  uint32_t rc = 0;
101  const struct m0_fid *fid;
102  const struct m0_di_ops *di_ops;
103  struct m0_file *file;
104 
105  M0_PRE(ioo != NULL);
106 
107  #ifndef ENABLE_DATA_INTEGRITY
108  return M0_RC(rc);
109  #endif
110  /* Get di details (workaround!) by setting the dom be NULL*/
111  file = &ioo->ioo_flock;
112  fid = &ioo->ioo_oo.oo_fid;
114  di_ops = file->fi_di_ops;
115 
116  if (di_ops->do_out_shift(file) == 0)
117  return M0_RC(0);
118 
120 
121  return M0_RC(rc);
122 }
123 
126  uint32_t *row,
127  uint32_t *col)
128 {
129  uint64_t pg_id;
130  struct m0_pdclust_layout *play;
131 
132  M0_PRE(map != NULL);
133  M0_PRE(row != NULL);
134  M0_PRE(col != NULL);
135 
136  play = pdlayout_get(map->pi_ioo);
137 
138  pg_id = page_id(index, map->pi_ioo->ioo_obj);
139  *row = pg_id % rows_nr(play, map->pi_ioo->ioo_obj);
140  *col = pg_id / rows_nr(play, map->pi_ioo->ioo_obj);
141 }
142 
151 static int dgmode_rwvec_alloc_init(struct target_ioreq *ti)
152 {
153  int rc;
154  uint64_t cnt;
155  struct dgmode_rwvec *dg;
156  struct m0_pdclust_layout *play;
157  struct m0_op_io *ioo;
158 
159  M0_ENTRY();
160  M0_PRE(ti != NULL);
161  M0_PRE(ti->ti_dgvec == NULL);
162 
163  ioo = bob_of(ti->ti_nwxfer, struct m0_op_io, ioo_nwxfer,
164  &ioo_bobtype);
165 
166  M0_ALLOC_PTR(dg);
167  if (dg == NULL) {
168  rc = -ENOMEM;
169  goto failed;
170  }
171 
172  play = pdlayout_get(ioo);
173  dg->dr_tioreq = ti;
174 
175  cnt = page_nr(ioo->ioo_iomap_nr
176  * layout_unit_size(play)
177  * (layout_n(play) + layout_k(play)),
178  ioo->ioo_obj);
179  rc = m0_indexvec_alloc(&dg->dr_ivec, cnt);
180  if (rc != 0)
181  goto failed;
182 
183  M0_ALLOC_ARR(dg->dr_bufvec.ov_buf, cnt);
184  if (dg->dr_bufvec.ov_buf == NULL) {
185  rc = -ENOMEM;
186  goto failed;
187  }
188 
189  M0_ALLOC_ARR(dg->dr_bufvec.ov_vec.v_count, cnt);
190  if (dg->dr_bufvec.ov_vec.v_count == NULL) {
191  rc = -ENOMEM;
192  goto failed;
193  }
194 
196  if (dg->dr_auxbufvec.ov_buf == NULL) {
197  rc = -ENOMEM;
198  goto failed;
199  }
200 
202  if (dg->dr_auxbufvec.ov_vec.v_count == NULL) {
203  rc = -ENOMEM;
204  goto failed;
205  }
206 
208  if (dg->dr_pageattrs == NULL) {
209  rc = -ENOMEM;
210  goto failed;
211  }
212 
213  /*
214  * This value is incremented every time a new segment is added
215  * to this index vector.
216  */
217  dg->dr_ivec.iv_vec.v_nr = 0;
218 
219  ti->ti_dgvec = dg;
220  return M0_RC(0);
221 failed:
222  ti->ti_dgvec = NULL;
223  if (dg->dr_bufvec.ov_buf != NULL)
224  m0_free(dg->dr_bufvec.ov_buf);
225  if (dg->dr_bufvec.ov_vec.v_count != NULL)
226  m0_free(dg->dr_bufvec.ov_vec.v_count);
227  if (dg->dr_auxbufvec.ov_buf != NULL)
229  if (dg->dr_auxbufvec.ov_vec.v_count != NULL)
231  m0_free(dg);
232  return M0_ERR(rc);
233 }
234 
245 {
246  M0_ENTRY();
247 
248  M0_PRE(dg != NULL);
249 
250  dg->dr_tioreq = NULL;
251  /*
252  * Will need to go through array of parity groups to find out
253  * exact number of segments allocated for the index vector.
254  * Instead, a fixed number of segments is enough to avoid
255  * triggering the assert from m0_indexvec_free().
256  * The memory allocator knows the size of memory area held by
257  * dg->dr_ivec.iv_index and dg->dr_ivec.iv_vec.v_count.
258  */
259  if (dg->dr_ivec.iv_vec.v_nr == 0)
260  ++dg->dr_ivec.iv_vec.v_nr;
261 
263  m0_free(dg->dr_bufvec.ov_buf);
264  m0_free(dg->dr_bufvec.ov_vec.v_count);
267  m0_free(dg->dr_pageattrs);
268  m0_free(dg);
269 }
270 
279 static uint64_t tioreqs_hash_func(const struct m0_htable *htable, const void *k)
280 {
281  const uint64_t *key;
282  M0_PRE(htable != NULL);
283  M0_PRE(htable->h_bucket_nr > 0);
284  M0_PRE(k != NULL);
285 
286  key = (uint64_t *)k;
287 
288  return *key % htable->h_bucket_nr;
289 }
290 
299 static bool tioreq_key_eq(const void *key1, const void *key2)
300 {
301  const uint64_t *k1 = (uint64_t *)key1;
302  const uint64_t *k2 = (uint64_t *)key2;
303 
304  M0_PRE(k1 != NULL);
305  M0_PRE(k2 != NULL);
306 
307  return *k1 == *k2;
308 }
309 
310 M0_HT_DESCR_DEFINE(tioreqht, "Hash of target_ioreq objects", M0_INTERNAL,
311  struct target_ioreq, ti_link, ti_magic,
313  ti_fid.f_container, tioreqs_hash_func, tioreq_key_eq);
314 
315 M0_HT_DEFINE(tioreqht, M0_INTERNAL, struct target_ioreq, uint64_t);
316 
324 static bool target_ioreq_invariant(const struct target_ioreq *ti)
325 {
326  return M0_RC(ti != NULL &&
327  _0C(target_ioreq_bob_check(ti)) &&
328  _0C(ti->ti_session != NULL) &&
329  _0C(ti->ti_nwxfer != NULL) &&
330  _0C(ti->ti_bufvec.ov_buf != NULL) &&
331  _0C(ti->ti_auxbufvec.ov_buf != NULL) &&
332  _0C(m0_fid_is_valid(&ti->ti_fid)) &&
333  m0_tl_forall(iofops, iofop, &ti->ti_iofops,
334  ioreq_fop_invariant(iofop)));
335 }
336 
340 M0_INTERNAL bool nw_xfer_request_invariant(const struct nw_xfer_request *xfer)
341 {
342  return xfer != NULL &&
343  _0C(nw_xfer_request_bob_check(xfer)) &&
344  _0C(xfer->nxr_state < NXS_STATE_NR) &&
345 
347  xfer->nxr_rc == 0 && xfer->nxr_bytes == 0 &&
348  m0_atomic64_get(&xfer->nxr_iofop_nr) == 0)) &&
349 
350  _0C(ergo(xfer->nxr_state == NXS_INFLIGHT,
351  !tioreqht_htable_is_empty(&xfer->nxr_tioreqs_hash))) &&
352 
353  _0C(ergo(xfer->nxr_state == NXS_COMPLETE,
354  m0_atomic64_get(&xfer->nxr_iofop_nr) == 0 &&
355  m0_atomic64_get(&xfer->nxr_rdbulk_nr) == 0)) &&
356 
357  m0_htable_forall(tioreqht, tioreq, &xfer->nxr_tioreqs_hash,
358  target_ioreq_invariant(tioreq));
359 }
360 
365 {
366  struct m0_op_io *ioo;
367  unsigned int opcode;
368 
369  M0_ENTRY("target_ioreq %p", ti);
370 
372  M0_PRE(iofops_tlist_is_empty(&ti->ti_iofops));
373 
374  ioo = bob_of(ti->ti_nwxfer, struct m0_op_io,
377  target_ioreq_bob_fini(ti);
378  tioreqht_tlink_fini(ti);
379  iofops_tlist_fini(&ti->ti_iofops);
380  ti->ti_ops = NULL;
381  ti->ti_session = NULL;
382  ti->ti_nwxfer = NULL;
383 
384  /* Resets the number of segments in vector. */
385  if (ti->ti_ivec.iv_vec.v_nr == 0)
386  ti->ti_ivec.iv_vec.v_nr = ti->ti_bufvec.ov_vec.v_nr;
387 
389  if (opcode == M0_OC_FREE)
391  m0_free0(&ti->ti_bufvec.ov_buf);
392  m0_free0(&ti->ti_bufvec.ov_vec.v_count);
395 
396  /* For the write path the ti_attrbuf which is m0_buf will be freed by
397  * RPC layer, so no need to explicitly free it m0_buf_free(&ti->ti_attrbuf);
398  * TODO: Further validate this by checking if memory is actually freed.
399  */
400 
401  m0_free0(&ti->ti_pageattrs);
402 
403  if (ti->ti_dgvec != NULL)
405  if (ti->ti_cc_fop_inited) {
406  struct m0_rpc_item *item = &ti->ti_cc_fop.crf_fop.f_item;
407  M0_LOG(M0_DEBUG, "item="ITEM_FMT" osr_xid=%"PRIu64,
409  ti->ti_cc_fop_inited = false;
411  }
412 
413  if ( opcode == M0_OC_WRITE ) {
414  m0_buf_free( &ti->ti_attrbuf );
415  m0_free( (void *)ti->ti_cksum_seg_b_nob );
416  } else if ( opcode == M0_OC_READ )
418 
419  m0_free(ti);
420  M0_LEAVE();
421 }
422 
424 {
425  struct ioreq_fop *irfop;
426 
427  m0_tl_for (iofops, &ti->ti_iofops, irfop) {
429  } m0_tl_endfor;
430 }
431 
436  struct m0_fid *fid)
437 {
438  struct target_ioreq *ti;
439 
440  M0_ENTRY("nw_xfer_request %p, fid %p", xfer, fid);
441 
443  M0_PRE(fid != NULL);
444 
445  ti = tioreqht_htable_lookup(&xfer->nxr_tioreqs_hash, &fid->f_container);
446  /* WARN: Searches only with the container but compares the whole fid. */
447  M0_ASSERT(ergo(ti != NULL, m0_fid_cmp(fid, &ti->ti_fid) == 0));
448 
449  M0_LEAVE();
450  return ti;
451 }
452 
453 /*
454  * For partially parity groups only data units present in the truncate range
455  * will be truncated. For fully spanned parity group both data and parity
456  * units will be truncated.
457  */
458 static bool should_unit_be_truncated(bool partial,
459  enum m0_pdclust_unit_type unit_type,
460  enum page_attr flags)
461 {
462  return (!partial || unit_type == M0_PUT_DATA) &&
463  (flags & PA_WRITE);
464 }
465 
479 static void target_ioreq_seg_add(struct target_ioreq *ti,
480  const struct m0_pdclust_src_addr *src,
481  const struct m0_pdclust_tgt_addr *tgt,
482  m0_bindex_t gob_offset,
484  struct pargrp_iomap *map)
485 {
486  uint32_t seg;
487  uint32_t tseg;
488  m0_bindex_t toff;
489  m0_bindex_t goff;
490  m0_bindex_t pgstart;
491  m0_bindex_t pgend;
492  m0_bindex_t unit_sz;
493  struct data_buf *buf;
494  struct m0_op_io *ioo;
495  struct m0_pdclust_layout *play;
496  uint64_t frame;
497  uint64_t unit;
498  struct m0_indexvec *ivec;
499  struct m0_indexvec *trunc_ivec = NULL;
500  struct m0_indexvec *goff_ivec = NULL;
501  struct m0_bufvec *bvec;
502  struct m0_bufvec *auxbvec;
503  enum m0_pdclust_unit_type unit_type;
504  enum page_attr *pattr;
505  uint64_t cnt;
506  unsigned int opcode;
507  m0_bcount_t grp_size;
508  uint64_t page_size;
509  struct m0_ext goff_span_ext;
510  bool is_goff_in_range;
511  void *dst_attr = NULL;
512  uint32_t b_nob;
513 
514  M0_PRE(tgt != NULL);
515  frame = tgt->ta_frame;
516  M0_PRE(src != NULL);
517  unit = src->sa_unit;
518  M0_ENTRY("tio req %p, gob_offset %" PRIu64 ", count %"PRIu64
519  " frame %" PRIu64 " unit %"PRIu64,
520  ti, gob_offset, count, frame, unit);
521 
522  M0_PRE(ti != NULL);
523  M0_PRE(map != NULL);
525 
526  ti->ti_goff = gob_offset;
527 
528  ioo = bob_of(ti->ti_nwxfer, struct m0_op_io,
529  ioo_nwxfer, &ioo_bobtype);
531  play = pdlayout_get(ioo);
532 
533  page_size = m0__page_size(ioo);
534  grp_size = data_size(play) * map->pi_grpid;
535  unit_type = m0_pdclust_unit_classify(play, unit);
536  M0_ASSERT(M0_IN(unit_type, (M0_PUT_DATA, M0_PUT_PARITY)));
537 
538  unit_sz = layout_unit_size(play);
539  toff = target_offset(frame, play, gob_offset);
540  pgstart = toff;
541  goff = unit_type == M0_PUT_DATA ? gob_offset : 0;
542 
544  "[gpos %" PRIu64 ", count %" PRIu64 "] [%" PRIu64 ", %" PRIu64 "]"
545  "->[%" PRIu64 ",%" PRIu64 "] %c", gob_offset, count, src->sa_group,
547  unit_type == M0_PUT_DATA ? 'D' : 'P');
548 
549  /* Use ti_dgvec as long as it is dgmode-read/write. */
550  if (ioreq_sm_state(ioo) == IRS_DEGRADED_READING ||
553  M0_ASSERT(ti->ti_dgvec != NULL);
554  ivec = &ti->ti_dgvec->dr_ivec;
555  bvec = &ti->ti_dgvec->dr_bufvec;
556  auxbvec = &ti->ti_dgvec->dr_auxbufvec;
557  pattr = ti->ti_dgvec->dr_pageattrs;
558  cnt = page_nr(ioo->ioo_iomap_nr * layout_unit_size(play) *
559  (layout_n(play) + layout_k(play)), ioo->ioo_obj);
560  M0_LOG(M0_DEBUG, "map_nr=%" PRIu64 " req state=%u cnt=%"PRIu64,
561  ioo->ioo_iomap_nr, ioreq_sm_state(ioo), cnt);
562  } else {
563  ivec = &ti->ti_ivec;
564  trunc_ivec = &ti->ti_trunc_ivec;
565  bvec = &ti->ti_bufvec;
566  auxbvec = &ti->ti_auxbufvec;
567  dst_attr = ti->ti_attrbuf.b_addr;
568  goff_ivec = &ti->ti_goff_ivec;
569  pattr = ti->ti_pageattrs;
570  cnt = page_nr(ioo->ioo_iomap_nr * layout_unit_size(play) *
571  layout_n(play), ioo->ioo_obj);
572  M0_LOG(M0_DEBUG, "map_nr=%" PRIu64 " req state=%u cnt=%"PRIu64,
573  ioo->ioo_iomap_nr, ioreq_sm_state(ioo), cnt);
574  }
575 
576  while (pgstart < toff + count) {
577  pgend = min64u(pgstart + page_size,
578  toff + count);
579  seg = SEG_NR(ivec);
580 
581  /* Save COB offsets in ti_ivec */
582  INDEX(ivec, seg) = pgstart;
583  COUNT(ivec, seg) = pgend - pgstart;
584 
585  if (unit_type == M0_PUT_DATA) {
586  uint32_t row = map->pi_max_row;
587  uint32_t col = map->pi_max_col;
588 
589  page_pos_get(map, goff, grp_size, &row, &col);
590  M0_ASSERT(row <= map->pi_max_row);
591  M0_ASSERT(col <= map->pi_max_col);
592  buf = map->pi_databufs[row][col];
593 
594  pattr[seg] |= PA_DATA;
595  M0_LOG(M0_DEBUG, "Data seg %u added", seg);
596  } else {
597  buf = map->pi_paritybufs[page_id(goff, ioo->ioo_obj)]
598  [unit - layout_n(play)];
599  pattr[seg] |= PA_PARITY;
600  M0_LOG(M0_DEBUG, "Parity seg %u added", seg);
601  }
602  buf->db_tioreq = ti;
603  if (buf->db_flags & PA_WRITE)
605 
606  if (opcode == M0_OC_FREE &&
607  should_unit_be_truncated(map->pi_trunc_partial,
608  unit_type, buf->db_flags)) {
609  tseg = SEG_NR(trunc_ivec);
610  INDEX(trunc_ivec, tseg) = pgstart;
611  COUNT(trunc_ivec, tseg) = pgend - pgstart;
612  ++trunc_ivec->iv_vec.v_nr;
613  M0_LOG(M0_DEBUG, "Seg id %d [%" PRIu64 ", %" PRIu64 "]"
614  "added to target ioreq with "FID_F,
615  tseg, INDEX(trunc_ivec, tseg),
616  COUNT(trunc_ivec, tseg),
617  FID_P(&ti->ti_fid));
618  }
619 
620  if (opcode == M0_OC_FREE && !map->pi_trunc_partial)
621  pattr[seg] |= PA_TRUNC;
622 
623  M0_ASSERT(addr_is_network_aligned(buf->db_buf.b_addr));
624  bvec->ov_buf[seg] = buf->db_buf.b_addr;
625  bvec->ov_vec.v_count[seg] = COUNT(ivec, seg);
626  if (map->pi_rtype == PIR_READOLD &&
627  unit_type == M0_PUT_DATA) {
628  M0_ASSERT(buf->db_auxbuf.b_addr != NULL);
629  auxbvec->ov_buf[seg] = buf->db_auxbuf.b_addr;
630  auxbvec->ov_vec.v_count[seg] = page_size;
631  }
632  pattr[seg] |= buf->db_flags;
633  M0_LOG(M0_DEBUG, "pageaddr=%p, auxpage=%p,"
634  " index=%6" PRIu64 ", size=%4"PRIu64
635  " grpid=%3" PRIu64 " flags=%4x for "FID_F,
636  bvec->ov_buf[seg], auxbvec->ov_buf[seg],
637  INDEX(ivec, seg), COUNT(ivec, seg),
638  map->pi_grpid, pattr[seg],
639  FID_P(&ti->ti_fid));
640  M0_LOG(M0_DEBUG, "Seg id %d [%" PRIu64 ", %"PRIu64
641  "] added to target_ioreq with "FID_F
642  " with flags 0x%x: ", seg,
643  INDEX(ivec, seg), COUNT(ivec, seg),
644  FID_P(&ti->ti_fid), pattr[seg]);
646  goff_span_ext.e_start = ioo->ioo_ext.iv_index[0];
647  goff_span_ext.e_end = ioo->ioo_ext.iv_index[ioo->ioo_ext.iv_vec.v_nr - 1]
648  + ioo->ioo_ext.iv_vec.v_count[ioo->ioo_ext.iv_vec.v_nr - 1];
649  /* If ioo_attr struct is not allocated then skip checksum computation */
650  is_goff_in_range = m0_ext_is_in(&goff_span_ext, goff) &&
652  if (dst_attr != NULL && unit_type == M0_PUT_DATA &&
653  opcode == M0_OC_WRITE && is_goff_in_range) {
654  void *src_attr;
655  m0_bcount_t cs_sz;
656 
657  cs_sz = ioo->ioo_attr.ov_vec.v_count[0];
658  /* This we can do as page_size <= unit_sz */
660  COUNT(ivec, seg),
661  unit_sz, cs_sz );
662  if (b_nob) {
663  /* This function will get checksum address from application provided
664  * buffer. Checksum is corresponding to on gob offset and ioo_ext and
665  * this function helps to locate exact address for the above.
666  * Note: ioo_ext is span of offset for which ioo_attr is provided and
667  * goff should lie within that span
668  */
669  src_attr = m0_extent_vec_get_checksum_addr( &ioo->ioo_attr, goff,
670  &ioo->ioo_ext, unit_sz, cs_sz);
671  M0_ASSERT(b_nob == cs_sz);
672  memcpy((char *)dst_attr + ti->ti_cksum_copied, src_attr, b_nob);
673 
674  /* Track checksum copied as we need to do overallocation for
675  * ti_attrbuf for traget and while sending FOP we use this
676  * counter to send the actual checksum size.
677  */
678  ti->ti_cksum_copied += b_nob;
679 
680  /* Make sure we are not exceeding the allocated buffer size */
682  }
683 
685  } else if (goff_ivec != NULL && unit_type == M0_PUT_DATA &&
686  opcode == M0_OC_READ && is_goff_in_range) {
695  INDEX(goff_ivec, seg) = goff;
696  COUNT(goff_ivec, seg) = COUNT(ivec, seg);
697  goff_ivec->iv_vec.v_nr++;
698  }
699 
700  goff += COUNT(ivec, seg);
701  ++ivec->iv_vec.v_nr;
702  pgstart = pgend;
703  }
704  M0_LEAVE();
705 }
706 
710 M0_INTERNAL struct m0_fid target_fid(struct m0_op_io *ioo,
711  struct m0_pdclust_tgt_addr *tgt)
712 {
713  struct m0_fid fid;
714 
716  &ioo->ioo_oo.oo_fid, tgt->ta_obj,
717  &fid);
718  return fid;
719 }
720 
730 static inline struct m0_rpc_session *
731 target_session(struct m0_op_io *ioo, struct m0_fid tfid)
732 {
733  struct m0_op *op;
734  struct m0_pool_version *pv;
735  struct m0_client *instance;
736 
737  M0_PRE(ioo != NULL);
738  op = &ioo->ioo_oo.oo_oc.oc_op;
740  pv = m0_pool_version_find(&instance->m0c_pools_common, &ioo->ioo_pver);
741  M0_ASSERT(pv != NULL);
742 
744  pv, m0_fid_cob_device_id(&tfid));
745 }
746 
760 static int bulk_buffer_add(struct ioreq_fop *irfop,
761  struct m0_net_domain *dom,
762  struct m0_rpc_bulk_buf **rbuf,
763  uint32_t *delta,
764  uint32_t maxsize)
765 {
766  int rc;
767  int seg_nr;
768  struct m0_op_io *ioo;
769  struct m0_indexvec *ivec;
770 
771  M0_PRE(irfop != NULL);
772  M0_PRE(dom != NULL);
773  M0_PRE(rbuf != NULL);
774  M0_PRE(delta != NULL);
775  M0_PRE(maxsize > 0);
776  M0_ENTRY("ioreq_fop %p net_domain %p delta_size %d",
777  irfop, dom, *delta);
778 
779  ioo = bob_of(irfop->irf_tioreq->ti_nwxfer, struct m0_op_io,
780  ioo_nwxfer, &ioo_bobtype);
781  ivec = M0_IN(ioreq_sm_state(ioo), (IRS_READING, IRS_WRITING)) ?
782  &irfop->irf_tioreq->ti_ivec :
783  &irfop->irf_tioreq->ti_dgvec->dr_ivec;
785  SEG_NR(ivec));
786  *delta += io_desc_size(dom);
787 
788  if (m0_io_fop_size_get(&irfop->irf_iofop.if_fop) + *delta < maxsize) {
790  0, dom, NULL, rbuf);
791  if (rc != 0) {
792  *delta -= io_desc_size(dom);
793  return M0_ERR(rc);
794  }
795  } else {
796  rc = -ENOSPC;
797  *delta -= io_desc_size(dom);
798  }
799 
800  M0_POST(ergo(rc == 0, *rbuf != NULL));
801  return M0_RC(rc);
802 }
803 
810 static void irfop_fini(struct ioreq_fop *irfop)
811 {
812  M0_ENTRY("ioreq_fop %p", irfop);
813 
814  M0_PRE(irfop != NULL);
815 
817  ioreq_fop_fini(irfop);
818  m0_free(irfop);
819 
820  M0_LEAVE();
821 }
822 
827 static void *buf_aux_chk_get(struct m0_bufvec *aux, enum page_attr p_attr,
828  uint32_t seg_idx, bool rd_in_wr)
829 {
830  return (p_attr == PA_DATA && rd_in_wr && aux != NULL &&
831  aux->ov_buf[seg_idx] != NULL) ? aux->ov_buf[seg_idx] : NULL;
832 }
833 
844  enum page_attr filter)
845 {
846  int rc = 0;
847  uint32_t seg = 0;
848  /* Number of segments in one m0_rpc_bulk_buf structure. */
849  uint32_t bbsegs;
850  uint32_t maxsize;
851  uint32_t delta;
852  uint32_t fop_cksm_nob;
853  uint32_t dispatched_cksm_nob = 0;
854  enum page_attr rw;
855  enum page_attr *pattr;
856  struct m0_bufvec *bvec;
857  struct m0_bufvec *auxbvec;
858  struct m0_op_io *ioo;
859  struct m0_obj_attr *io_attr;
860  struct m0_indexvec *ivec;
861  struct ioreq_fop *irfop;
862  struct m0_net_domain *ndom;
863  struct m0_rpc_bulk_buf *rbuf;
864  struct m0_io_fop *iofop;
865  struct m0_fop_cob_rw *rw_fop;
866  struct nw_xfer_request *xfer;
867  /* Is it in the READ phase of WRITE request. */
868  bool read_in_write = false;
869  void *buf;
870  void *bufnext;
871  m0_bcount_t max_seg_size;
872  m0_bcount_t xfer_len;
874  uint32_t segnext;
875  uint32_t ndom_max_segs;
876  struct m0_client *instance;
877 
878  M0_ENTRY("prepare io fops for target ioreq %p filter 0x%x, tfid "FID_F,
879  ti, filter, FID_P(&ti->ti_fid));
880 
882  M0_PRE(M0_IN(filter, (PA_DATA, PA_PARITY)));
883 
885  if (rc != 0 && (!M0_IN(rc, (-ECANCELED, -EINVAL))))
886  return M0_ERR(rc);
887 
888  xfer = ti->ti_nwxfer;
889  ioo = bob_of(xfer, struct m0_op_io, ioo_nwxfer, &ioo_bobtype);
890  M0_ASSERT(M0_IN(ioreq_sm_state(ioo),
893 
894  if (ioo->ioo_oo.oo_oc.oc_op.op_code == M0_OC_WRITE &&
896  read_in_write = true;
897 
898  if (M0_IN(ioreq_sm_state(ioo), (IRS_READING, IRS_WRITING))) {
899  ivec = &ti->ti_ivec;
900  bvec = &ti->ti_bufvec;
901  auxbvec = &ti->ti_auxbufvec;
902  pattr = ti->ti_pageattrs;
903  } else {
904  if (ti->ti_dgvec == NULL) {
905  return M0_RC(0);
906  }
907  ivec = &ti->ti_dgvec->dr_ivec;
908  bvec = &ti->ti_dgvec->dr_bufvec;
909  auxbvec = &ti->ti_dgvec->dr_auxbufvec;
910  pattr = ti->ti_dgvec->dr_pageattrs;
911  }
912 
917  PA_READ;
919 
920  max_seg_size = m0_net_domain_get_max_buffer_segment_size(ndom);
921 
922  ndom_max_segs = m0_net_domain_get_max_buffer_segments(ndom);
923 
924  while (seg < SEG_NR(ivec)) {
925  delta = 0;
926  bbsegs = 0;
927 
928  M0_LOG(M0_DEBUG, "pageattr = %u, filter = %u, rw = %u",
929  pattr[seg], filter, rw);
930 
931  if (!(pattr[seg] & filter) || !(pattr[seg] & rw) ||
932  (pattr[seg] & PA_TRUNC)) {
933  ++seg;
934  continue;
935  }
936 
937  M0_ALLOC_PTR(irfop);
938  if (irfop == NULL) {
939  rc = M0_ERR(-ENOMEM);
940  goto err;
941  }
942  rc = ioreq_fop_init(irfop, ti, filter);
943  if (rc != 0) {
944  m0_free(irfop);
945  goto err;
946  }
947  fop_cksm_nob = 0;
948 
949  iofop = &irfop->irf_iofop;
950  rw_fop = io_rw_get(&iofop->if_fop);
951 
952  rc = bulk_buffer_add(irfop, ndom, &rbuf, &delta, maxsize);
953  if (rc != 0) {
954  ioreq_fop_fini(irfop);
955  m0_free(irfop);
956  goto err;
957  }
958  delta += io_seg_size();
959 
960 
961  /*
962  * Adds io segments and io descriptor only if it fits within
963  * permitted size.
964  */
965  /* TODO: can this loop become a function call?
966  * -- too many levels of indentation */
967  while (seg < SEG_NR(ivec) &&
968  m0_io_fop_size_get(&iofop->if_fop) + delta < maxsize &&
969  bbsegs < ndom_max_segs) {
970 
971  /*
972  * Adds a page to rpc bulk buffer only if it passes
973  * through the filter.
974  */
975  if (pattr[seg] & rw && pattr[seg] & filter &&
976  !(pattr[seg] & PA_TRUNC)) {
977  delta += io_seg_size() + io_di_size(ioo);
978 
979  buf = buf_aux_chk_get(auxbvec, filter, seg,
980  read_in_write);
981 
982  if (buf == NULL) {
983  buf = bvec->ov_buf[seg];
984  /* Add the size for checksum generated for every segment, skip parity */
985  if ((filter == PA_DATA) && m0__obj_is_di_enabled(ioo) &&
986  (ioo->ioo_oo.oo_oc.oc_op.op_code == M0_OC_WRITE)) {
987  delta += ti->ti_cksum_seg_b_nob[seg];
988  fop_cksm_nob += ti->ti_cksum_seg_b_nob[seg];
989  }
990  }
991 
992  xfer_len = COUNT(ivec, seg);
993  offset = INDEX(ivec, seg);
994 
995  /*
996  * Accommodate multiple pages in a single
997  * net buffer segment, if they are consecutive
998  * pages.
999  */
1000  segnext = seg + 1;
1001  while (segnext < SEG_NR(ivec) &&
1002  xfer_len < max_seg_size) {
1003  bufnext = buf_aux_chk_get(auxbvec,
1004  filter,
1005  segnext,
1006  read_in_write);
1007  if (bufnext == NULL)
1008  bufnext = bvec->ov_buf[segnext];
1009 
1010  if (buf + xfer_len == bufnext) {
1011  xfer_len += COUNT(ivec, ++seg);
1012  segnext = seg + 1;
1013  } else
1014  break;
1015  }
1016 
1018  xfer_len,
1019  offset, ndom);
1020 
1021  if (rc == -EMSGSIZE) {
1022  /*
1023  * Fix the number of segments in
1024  * current m0_rpc_bulk_buf structure.
1025  */
1026  rbuf->bb_nbuf->nb_buffer.ov_vec.v_nr =
1027  bbsegs;
1028  rbuf->bb_zerovec.z_bvec.ov_vec.v_nr =
1029  bbsegs;
1030  bbsegs = 0;
1031 
1032  delta -= io_seg_size() - io_di_size(ioo);
1033 
1034  if ((filter == PA_DATA) && m0__obj_is_di_enabled(ioo) &&
1035  (ioo->ioo_oo.oo_oc.oc_op.op_code == M0_OC_WRITE)) {
1036  delta -= ti->ti_cksum_seg_b_nob[seg];
1037  fop_cksm_nob -= ti->ti_cksum_seg_b_nob[seg];
1038  }
1039 
1040  /*
1041  * Buffer must be 4k aligned to be
1042  * used by network hw
1043  */
1045  rc = bulk_buffer_add(irfop, ndom,
1046  &rbuf, &delta, maxsize);
1047  if (rc == -ENOSPC)
1048  break;
1049  else if (rc != 0)
1050  goto fini_fop;
1051 
1052  /*
1053  * Since current bulk buffer is full,
1054  * new bulk buffer is added and
1055  * existing segment is attempted to
1056  * be added to new bulk buffer.
1057  */
1058  continue;
1059  } else if (rc == 0)
1060  ++bbsegs;
1061  }
1062 
1063  ++seg;
1064  }
1065 
1066  if (m0_io_fop_byte_count(iofop) == 0) {
1067  irfop_fini(irfop);
1068  continue;
1069  }
1070 
1071  rbuf->bb_nbuf->nb_buffer.ov_vec.v_nr = bbsegs;
1072  rbuf->bb_zerovec.z_bvec.ov_vec.v_nr = bbsegs;
1073 
1074  rw_fop->crw_fid = ti->ti_fid;
1075  rw_fop->crw_pver = ioo->ioo_pver;
1076  rw_fop->crw_index = ti->ti_obj;
1077  /* In case of partially spanned units in a parity group,
1078  * degraded read and read-verify mode expects zero-filled
1079  * units from server side.
1080  */
1082  if (ioreq_sm_state(ioo) != IRS_DEGRADED_READING &&
1083  !instance->m0c_config->mc_is_read_verify &&
1084  ioo->ioo_flags & M0_OOF_NOHOLE)
1085  rw_fop->crw_flags |= M0_IO_FLAG_NOHOLE;
1086 
1087  /* Assign the checksum buffer for traget */
1088  if (filter == PA_DATA && m0__obj_is_di_enabled(ioo)) {
1089  if (m0_is_write_fop(&iofop->if_fop)) {
1090  M0_ASSERT(fop_cksm_nob != 0);
1091  /* RPC layer to free crw_di_data_cksum */
1092  if ( m0_buf_alloc(&rw_fop->crw_di_data_cksum, fop_cksm_nob) != 0 )
1093  goto fini_fop;
1094 
1095  memcpy( rw_fop->crw_di_data_cksum.b_addr,
1096  ti->ti_attrbuf.b_addr + dispatched_cksm_nob,
1097  fop_cksm_nob );
1098  dispatched_cksm_nob += fop_cksm_nob;
1099  M0_ASSERT(dispatched_cksm_nob <= ti->ti_cksum_copied);
1100  }
1101  else {
1102  rw_fop->crw_di_data_cksum.b_addr = NULL;
1103  rw_fop->crw_di_data_cksum.b_nob = 0;
1104  }
1105 
1106  rw_fop->crw_cksum_size = (read_in_write ||
1107  !m0__obj_is_di_enabled(ioo)) ?
1108  0 : ioo->ioo_attr.ov_vec.v_count[0];
1109  }
1110  else {
1111  rw_fop->crw_di_data_cksum.b_addr = NULL;
1112  rw_fop->crw_di_data_cksum.b_nob = 0;
1113  rw_fop->crw_cksum_size = 0;
1114  }
1115 
1116 
1117  if (ioo->ioo_flags & M0_OOF_SYNC)
1118  rw_fop->crw_flags |= M0_IO_FLAG_SYNC;
1119  io_attr = m0_io_attr(ioo);
1120  rw_fop->crw_lid = io_attr->oa_layout_id;
1121 
1122  /*
1123  * XXX(Sining): This is a bit tricky: m0_io_fop_prepare in
1124  * ioservice/io_fops.c calls io_fop_di_prepare which has only
1125  * file system in mind and uses super block and file related
1126  * information to do something (it returns 0 directly for user
1127  * space). This is not the case for Client kernel mode!!
1128  *
1129  * Simply return 0 just like it does for user space at this
1130  * moment.
1131  */
1132  rc = m0_io_fop_prepare(&iofop->if_fop);
1133  if (rc != 0)
1134  goto fini_fop;
1135 
1136  if (m0_is_read_fop(&iofop->if_fop))
1139  &iofop->if_rbulk));
1140 
1142  iofops_tlist_add(&ti->ti_iofops, irfop);
1143 
1144  M0_LOG(M0_DEBUG,
1145  "fop=%p bulk=%p (%s) @"FID_F" io fops = %"PRIu64
1146  " read bulks = %" PRIu64 ", list_len=%d",
1147  &iofop->if_fop, &iofop->if_rbulk,
1148  m0_is_read_fop(&iofop->if_fop) ? "r" : "w",
1149  FID_P(&ti->ti_fid),
1150  m0_atomic64_get(&xfer->nxr_iofop_nr),
1152  (int)iofops_tlist_length(&ti->ti_iofops));
1153  }
1154 
1155  return M0_RC(0);
1156 
1157 fini_fop:
1158  irfop_fini(irfop);
1159 err:
1160  m0_tl_teardown(iofops, &ti->ti_iofops, irfop) {
1161  irfop_fini(irfop);
1162  }
1163 
1164  return M0_ERR(rc);
1165 }
1166 static int target_cob_fop_prepare(struct target_ioreq *ti);
1167 static const struct target_ioreq_ops tioreq_ops = {
1169  .tio_iofops_prepare = target_ioreq_iofops_prepare,
1170  .tio_cc_fops_prepare = target_cob_fop_prepare,
1171 };
1172 
1173 static int target_cob_fop_prepare(struct target_ioreq *ti)
1174 {
1175  int rc;
1176  M0_ENTRY("ti = %p type = %d", ti, ti->ti_req_type);
1178 
1179  rc = ioreq_cc_fop_init(ti);
1180  return M0_RC(rc);
1181 }
1182 
1195 static int target_ioreq_init(struct target_ioreq *ti,
1196  struct nw_xfer_request *xfer,
1197  const struct m0_fid *cobfid,
1198  uint64_t ta_obj,
1199  struct m0_rpc_session *session,
1200  uint64_t size)
1201 {
1202  int rc;
1203  struct m0_op_io *ioo;
1204  struct m0_op *op;
1205  struct m0_client *instance;
1206  uint32_t nr;
1207 
1208  M0_PRE(cobfid != NULL);
1209  M0_ENTRY("target_ioreq %p, nw_xfer_request %p, "FID_F,
1210  ti, xfer, FID_P(cobfid));
1211 
1212  M0_PRE(ti != NULL);
1213  M0_PRE(xfer != NULL);
1214  M0_PRE(session != NULL);
1215  M0_PRE(size > 0);
1216 
1217  ti->ti_rc = 0;
1218  ti->ti_ops = &tioreq_ops;
1219  ti->ti_fid = *cobfid;
1220  ti->ti_nwxfer = xfer;
1221  ti->ti_dgvec = NULL;
1222  ti->ti_req_type = TI_NONE;
1223  M0_SET0(&ti->ti_cc_fop);
1224  ti->ti_cc_fop_inited = false;
1225 
1226  /*
1227  * Target object is usually in ONLINE state unless explicitly
1228  * told otherwise.
1229  */
1230  ti->ti_state = M0_PNDS_ONLINE;
1231  ti->ti_session = session;
1232  ti->ti_parbytes = 0;
1233  ti->ti_databytes = 0;
1234 
1235  ioo = bob_of(xfer, struct m0_op_io, ioo_nwxfer,
1236  &ioo_bobtype);
1237  op = &ioo->ioo_oo.oo_oc.oc_op;
1239  M0_PRE(instance != NULL);
1240 
1241  ti->ti_obj = ta_obj;
1242 
1243  iofops_tlist_init(&ti->ti_iofops);
1244  tioreqht_tlink_init(ti);
1245  target_ioreq_bob_init(ti);
1246 
1247  nr = page_nr(size, ioo->ioo_obj);
1248  rc = m0_indexvec_alloc(&ti->ti_ivec, nr);
1249  if (rc != 0)
1250  goto out;
1251 
1252  if (op->op_code == M0_OC_READ) {
1254  ti->ti_goff_ivec.iv_vec.v_nr = 0;
1255  if (rc != 0)
1256  goto fail;
1257  }
1258 
1259  if (op->op_code == M0_OC_FREE) {
1261  if (rc != 0)
1262  goto fail;
1263  }
1264 
1265  ti->ti_bufvec.ov_vec.v_nr = nr;
1266  M0_ALLOC_ARR(ti->ti_bufvec.ov_vec.v_count, nr);
1267  if (ti->ti_bufvec.ov_vec.v_count == NULL)
1268  goto fail;
1269 
1270  M0_ALLOC_ARR(ti->ti_bufvec.ov_buf, nr);
1271  if (ti->ti_bufvec.ov_buf == NULL)
1272  goto fail;
1273 
1274  /* Memory allocation for checksum computation */
1275  if (op->op_code == M0_OC_WRITE && m0__obj_is_di_enabled(ioo)) {
1276  uint32_t b_nob;
1277 
1278  ti->ti_attrbuf.b_addr = NULL;
1279  b_nob = (size * ioo->ioo_attr.ov_vec.v_count[0]) /
1281  rc = m0_buf_alloc(&ti->ti_attrbuf, b_nob);
1282  if (rc != 0)
1283  goto fail;
1284  ti->ti_cksum_copied = 0;
1286  }
1287  else {
1288  ti->ti_attrbuf.b_addr = NULL;
1289  ti->ti_attrbuf.b_nob = 0;
1290  ti->ti_cksum_copied = 0;
1291  ti->ti_cksum_seg_b_nob = NULL;
1292  }
1293  /*
1294  * For READOLD method, an extra bufvec is needed to remember
1295  * the addresses of auxillary buffers so those auxillary
1296  * buffers can be used in rpc bulk transfer to avoid polluting
1297  * real data buffers which are the application's memory for IO
1298  * in case zero copy method is in use.
1299  */
1300  ti->ti_auxbufvec.ov_vec.v_nr = nr;
1302  if (ti->ti_auxbufvec.ov_vec.v_count == NULL)
1303  goto fail;
1304 
1306  if (ti->ti_auxbufvec.ov_buf == NULL)
1307  goto fail;
1308 
1309  if (M0_FI_ENABLED("no-mem-err"))
1310  goto fail;
1312  if (ti->ti_pageattrs == NULL)
1313  goto fail;
1314 
1315  /*
1316  * This value is incremented when new segments are added to the
1317  * index vector in target_ioreq_seg_add().
1318  */
1319  ti->ti_ivec.iv_vec.v_nr = 0;
1320  ti->ti_trunc_ivec.iv_vec.v_nr = 0;
1321 
1323  return M0_RC(0);
1324 fail:
1325  m0_indexvec_free(&ti->ti_ivec);
1326  if (op->op_code == M0_OC_READ)
1328  if (op->op_code == M0_OC_FREE)
1330  m0_free(ti->ti_bufvec.ov_vec.v_count);
1331  m0_free(ti->ti_bufvec.ov_buf);
1334 
1335 out:
1336  return M0_ERR(-ENOMEM);
1337 }
1338 
1351 static int nw_xfer_tioreq_get(struct nw_xfer_request *xfer,
1352  struct m0_fid *fid,
1353  uint64_t ta_obj,
1354  struct m0_rpc_session *session,
1355  uint64_t size,
1356  struct target_ioreq **out)
1357 {
1358  int rc = 0;
1359  struct target_ioreq *ti;
1360  struct m0_op_io *ioo;
1361  struct m0_op *op;
1362  struct m0_client *instance;
1363 
1364  M0_PRE(fid != NULL);
1365  M0_ENTRY("nw_xfer_request %p, "FID_F, xfer, FID_P(fid));
1366 
1367  M0_PRE(session != NULL);
1368  M0_PRE(out != NULL);
1370 
1371  ioo = bob_of(xfer, struct m0_op_io, ioo_nwxfer, &ioo_bobtype);
1372  op = &ioo->ioo_oo.oo_oc.oc_op;
1374  M0_PRE(instance != NULL);
1375 
1376  ti = target_ioreq_locate(xfer, fid);
1377  if (ti == NULL) {
1378  M0_ALLOC_PTR(ti);
1379  if (ti == NULL)
1380  return M0_ERR(-ENOMEM);
1381 
1382  rc = target_ioreq_init(ti, xfer, fid, ta_obj, session, size);
1383  if (rc == 0) {
1384  tioreqht_htable_add(&xfer->nxr_tioreqs_hash, ti);
1385  M0_LOG(M0_INFO, "New target_ioreq %p added for "FID_F,
1386  ti, FID_P(fid));
1387  } else {
1388  m0_free(ti);
1389  return M0_ERR_INFO(rc, "target_ioreq_init() failed");
1390  }
1391  }
1392 
1393  if (ti->ti_dgvec == NULL && M0_IN(ioreq_sm_state(ioo),
1396 
1397  *out = ti;
1398 
1399  return M0_RC(rc);
1400 }
1401 
1406 static void databufs_set_dgw_mode(struct pargrp_iomap *iomap,
1407  struct m0_pdclust_layout *play,
1408  struct m0_ext *ext)
1409 {
1410  uint32_t row_start;
1411  uint32_t row_end;
1412  uint32_t row;
1413  uint32_t col;
1414  m0_bcount_t grp_off;
1415  struct data_buf *dbuf;
1416 
1417  grp_off = data_size(play) * iomap->pi_grpid;
1418  page_pos_get(iomap, ext->e_start, grp_off, &row_start, &col);
1419  page_pos_get(iomap, ext->e_end - 1, grp_off, &row_end, &col);
1420 
1421  for (row = row_start; row <= row_end; ++row) {
1422  dbuf = iomap->pi_databufs[row][col];
1423  if (dbuf->db_flags & PA_WRITE)
1424  dbuf->db_flags |= PA_DGMODE_WRITE;
1425  }
1426 }
1427 
1431 static void paritybufs_set_dgw_mode(struct pargrp_iomap *iomap,
1432  struct m0_op_io *ioo,
1433  uint64_t unit)
1434 {
1435  uint32_t row;
1436  uint32_t col;
1437  struct data_buf *dbuf;
1438  struct m0_pdclust_layout *play = pdlayout_get(ioo);
1439  uint64_t unit_size = layout_unit_size(play);
1440 
1441  parity_page_pos_get(iomap, unit * unit_size, &row, &col);
1442  for (; row < rows_nr(play, ioo->ioo_obj); ++row) {
1443  dbuf = iomap->pi_paritybufs[row][col];
1444  if (m0_pdclust_is_replicated(play) &&
1445  iomap->pi_databufs[row][0] == NULL)
1446  continue;
1447  if (dbuf->db_flags & PA_WRITE)
1448  dbuf->db_flags |= PA_DGMODE_WRITE;
1449  }
1450 }
1451 
1460 static int nw_xfer_io_distribute(struct nw_xfer_request *xfer)
1461 {
1462  bool do_cobs = true;
1463  int rc = 0;
1464  unsigned int op_code;
1465  uint64_t i;
1466  uint64_t unit;
1467  uint64_t unit_size;
1468  uint64_t count;
1469  uint64_t pgstart;
1470  struct m0_op *op;
1471  /* Extent representing a data unit. */
1472  struct m0_ext u_ext;
1473  /* Extent representing resultant extent. */
1474  struct m0_ext r_ext;
1475  /* Extent representing a segment from index vector. */
1476  struct m0_ext v_ext;
1477  struct m0_op_io *ioo;
1478  struct target_ioreq *ti;
1479  struct m0_ivec_cursor cursor;
1480  struct m0_pdclust_layout *play;
1481  enum m0_pdclust_unit_type unit_type;
1482  struct m0_pdclust_src_addr src;
1483  struct m0_pdclust_tgt_addr tgt;
1484  struct m0_bitmap units_spanned;
1485  struct pargrp_iomap *iomap;
1486  struct m0_client *instance;
1487 
1488  M0_ENTRY("nw_xfer_request %p", xfer);
1489 
1491 
1492  ioo = bob_of(xfer, struct m0_op_io, ioo_nwxfer, &ioo_bobtype);
1493  op = &ioo->ioo_oo.oo_oc.oc_op;
1494  op_code = op->op_code,
1495  play = pdlayout_get(ioo);
1496  unit_size = layout_unit_size(play);
1498 
1499  /*
1500  * In non-oostore mode, all cobs are created on object creation.
1501  * In oostore mode, CROW is enabled and cobs are created automatically
1502  * at the server side on the 1st write request. But, because of SNS,
1503  * we need to create cobs for the spare units, and to make sure all cobs
1504  * are created for all units in the parity group touched by the update
1505  * request. See more below.
1506  */
1507  if (!m0__is_oostore(instance) || op_code == M0_OC_READ)
1508  do_cobs = false;
1509  /*
1510  * In replicated layout (N == 1), all units in the parity group are
1511  * always spanned. And there are no spare units, so...
1512  */
1513  if (ioo->ioo_pbuf_type == M0_PBUF_IND)
1514  do_cobs = false;
1515 
1516  if (do_cobs) {
1517  rc = m0_bitmap_init(&units_spanned, m0_pdclust_size(play));
1518  if (rc != 0)
1519  return M0_ERR(rc);
1520  }
1521 
1522  for (i = 0; i < ioo->ioo_iomap_nr; ++i) {
1523  count = 0;
1524  iomap = ioo->ioo_iomaps[i];
1525  pgstart = data_size(play) * iomap->pi_grpid;
1526  src.sa_group = iomap->pi_grpid;
1527 
1528  M0_LOG(M0_DEBUG, "xfer=%p map=%p [grpid=%" PRIu64 " state=%u]",
1529  xfer, iomap, iomap->pi_grpid, iomap->pi_state);
1530 
1531  if (do_cobs)
1532  m0_bitmap_reset(&units_spanned);
1533 
1534  /* traverse parity group ivec by units */
1535  m0_ivec_cursor_init(&cursor, &iomap->pi_ivec);
1536  while (!m0_ivec_cursor_move(&cursor, count)) {
1537  unit = (m0_ivec_cursor_index(&cursor) - pgstart) /
1538  unit_size;
1539 
1540  u_ext.e_start = pgstart + unit * unit_size;
1541  u_ext.e_end = u_ext.e_start + unit_size;
1542 
1543  v_ext.e_start = m0_ivec_cursor_index(&cursor);
1544  v_ext.e_end = v_ext.e_start +
1545  m0_ivec_cursor_step(&cursor);
1546 
1547  m0_ext_intersection(&u_ext, &v_ext, &r_ext);
1548  M0_ASSERT(m0_ext_is_valid(&r_ext));
1549  count = m0_ext_length(&r_ext);
1550 
1551  unit_type = m0_pdclust_unit_classify(play, unit);
1552  M0_ASSERT(unit_type == M0_PUT_DATA);
1553 
1555  databufs_set_dgw_mode(iomap, play, &r_ext);
1556 
1557  src.sa_unit = unit;
1558  rc = xfer->nxr_ops->nxo_tioreq_map(xfer, &src, &tgt,
1559  &ti);
1560  if (rc != 0)
1561  goto err;
1562 
1563  ti->ti_ops->tio_seg_add(ti, &src, &tgt, r_ext.e_start,
1564  m0_ext_length(&r_ext), iomap);
1565  if (op_code == M0_OC_WRITE && do_cobs &&
1566  ti->ti_req_type == TI_READ_WRITE)
1567  m0_bitmap_set(&units_spanned, unit, true);
1568 
1569  }
1570 
1571  M0_ASSERT(ergo(M0_IN(op_code, (M0_OC_READ, M0_OC_WRITE)),
1572  m0_vec_count(&ioo->ioo_ext.iv_vec) ==
1573  m0_vec_count(&ioo->ioo_data.ov_vec)));
1574 
1575  /* process parity units */
1576  if (M0_IN(ioo->ioo_pbuf_type, (M0_PBUF_DIR,
1577  M0_PBUF_IND)) ||
1579  iomap->pi_state == PI_DEGRADED)) {
1580 
1581  for (unit = 0; unit < layout_k(play); ++unit) {
1582  src.sa_unit = layout_n(play) + unit;
1584  src.sa_unit) == M0_PUT_PARITY);
1585 
1586  rc = xfer->nxr_ops->nxo_tioreq_map(xfer, &src,
1587  &tgt, &ti);
1588  if (rc != 0)
1589  goto err;
1590 
1592  paritybufs_set_dgw_mode(iomap, ioo,
1593  unit);
1594 
1595  if (op_code == M0_OC_WRITE && do_cobs)
1596  m0_bitmap_set(&units_spanned,
1597  src.sa_unit, true);
1598 
1599  ti->ti_ops->tio_seg_add(ti, &src, &tgt, pgstart,
1600  layout_unit_size(play),
1601  iomap);
1602  }
1603 
1604  if (!do_cobs)
1605  continue; /* to next iomap */
1606 
1607  /*
1608  * Create cobs for all units not spanned by the
1609  * IO request (data or spare units).
1610  *
1611  * If some data unit is not present in the group (hole
1612  * or not complete last group), we still need to create
1613  * cob for it. Otherwise, during SNS-repair the receiver
1614  * will wait forever for this unit without knowing that
1615  * its size is actually zero.
1616  */
1617  for (unit = 0; unit < m0_pdclust_size(play); ++unit) {
1618  if (m0_bitmap_get(&units_spanned, unit))
1619  continue;
1620 
1621  src.sa_unit = unit;
1622  rc = xfer->nxr_ops->nxo_tioreq_map(xfer, &src,
1623  &tgt, &ti);
1624  if (rc != 0)
1625  M0_LOG(M0_ERROR, "[%p] map=%p "
1626  "nxo_tioreq_map() failed: rc=%d",
1627  ioo, iomap, rc);
1628  /*
1629  * Skip the case when some other parity group
1630  * has spanned the particular target already.
1631  */
1632  if (ti->ti_req_type != TI_NONE)
1633  continue;
1634 
1635  ti->ti_req_type = TI_COB_CREATE;
1636  }
1637  }
1638  }
1639 
1640  if (do_cobs)
1641  m0_bitmap_fini(&units_spanned);
1642 
1643  M0_ASSERT(ergo(M0_IN(op_code, (M0_OC_READ, M0_OC_WRITE)),
1644  m0_vec_count(&ioo->ioo_ext.iv_vec) ==
1645  m0_vec_count(&ioo->ioo_data.ov_vec)));
1646 
1647  return M0_RC(0);
1648 err:
1649  m0_htable_for(tioreqht, ti, &xfer->nxr_tioreqs_hash) {
1650  tioreqht_htable_del(&xfer->nxr_tioreqs_hash, ti);
1651  target_ioreq_fini(ti);
1652  m0_free0(&ti);
1653  } m0_htable_endfor;
1654 
1655  return M0_ERR(rc);
1656 }
1657 
1667 static void nw_xfer_req_complete(struct nw_xfer_request *xfer, bool rmw)
1668 {
1669  struct m0_client *instance;
1670  struct m0_op_io *ioo;
1671  struct target_ioreq *ti;
1672  struct ioreq_fop *irfop;
1673  struct m0_fop *fop;
1674  struct m0_rpc_item *item;
1675 
1676  M0_ENTRY("nw_xfer_request %p, rmw %s", xfer,
1677  rmw ? (char *)"true" : (char *)"false");
1678 
1679  M0_PRE(xfer != NULL);
1680  xfer->nxr_state = NXS_COMPLETE;
1681  ioo = bob_of(xfer, struct m0_op_io, ioo_nwxfer, &ioo_bobtype);
1683 
1685  /*
1686  * Ignore the following invariant check as there exists cases in which
1687  * io fops are created sucessfully for some target services but fail
1688  * for some services in nxo_dispatch (for example, session/connection
1689  * to a service is invalid, resulting a 'dirty' op in which
1690  * nr_iofops != 0 and nxr_state == NXS_COMPLETE.
1691  *
1692  * M0_PRE_EX(m0_op_io_invariant(ioo));
1693  */
1694 
1695  m0_htable_for(tioreqht, ti, &xfer->nxr_tioreqs_hash) {
1696 
1697  /* Maintains only the first error encountered. */
1698  if (xfer->nxr_rc == 0)
1699  xfer->nxr_rc = ti->ti_rc;
1700 
1701  xfer->nxr_bytes += ti->ti_databytes;
1702  ti->ti_databytes = 0;
1703 
1704  if (m0__is_oostore(instance) &&
1705  ti->ti_req_type == TI_COB_CREATE &&
1707  ti->ti_req_type = TI_NONE;
1708  continue;
1709  }
1710 
1711  if (m0__is_oostore(instance) &&
1712  ti->ti_req_type == TI_COB_TRUNCATE &&
1714  ti->ti_req_type = TI_NONE;
1715  }
1716 
1717  m0_tl_teardown(iofops, &ti->ti_iofops, irfop) {
1718  fop = &irfop->irf_iofop.if_fop;
1720  M0_LOG(M0_DEBUG, "[%p] fop %p, ref %llu, "
1721  "item %p[%u], ri_error %d, ri_state %d",
1722  ioo, fop,
1723  (unsigned long long)m0_ref_read(&fop->f_ref),
1725  item->ri_sm.sm_state);
1726 
1727  /* Maintains only the first error encountered. */
1728  if (xfer->nxr_rc == 0 &&
1730  xfer->nxr_rc = item->ri_error;
1731  M0_LOG(M0_DEBUG, "[%p] nwxfer rc = %d",
1732  ioo, xfer->nxr_rc);
1733  }
1734 
1737  item->ri_rmachine != NULL));
1738  if (item->ri_rmachine == NULL) {
1739  M0_ASSERT(ti->ti_session != NULL);
1742  }
1743 
1744  M0_LOG(M0_DEBUG,
1745  "[%p] item %p, target fid "FID_F"fop %p, "
1746  "ref %llu", ioo, item, FID_P(&ti->ti_fid), fop,
1747  (unsigned long long)m0_ref_read(&fop->f_ref));
1749  }
1750 
1751  } m0_htable_endfor;
1752 
1755  M0_LOG(M0_INFO, "Number of bytes %s = %"PRIu64,
1756  ioreq_sm_state(ioo) == IRS_READ_COMPLETE ? "read" : "written",
1757  xfer->nxr_bytes);
1758 
1759  /*
1760  * This function is invoked from 4 states - IRS_READ_COMPLETE,
1761  * IRS_WRITE_COMPLETE, IRS_DEGRADED_READING, IRS_DEGRADED_WRITING.
1762  * And the state change is applicable only for healthy state IO,
1763  * meaning for states IRS_READ_COMPLETE and IRS_WRITE_COMPLETE.
1764  */
1765  if (M0_IN(ioreq_sm_state(ioo),
1768  if (!rmw)
1770  else if (ioreq_sm_state(ioo) == IRS_READ_COMPLETE)
1771  xfer->nxr_bytes = 0;
1772  }
1773 
1774  /*
1775  * nxo_dispatch may fail if connections to services have not been
1776  * established yet. In this case, ioo_rc contains error code and
1777  * xfer->nxr_rc == 0, don't overwrite ioo_rc.
1778  *
1779  * TODO: merge this with op->op_sm.sm_rc ?
1780  */
1781  if (xfer->nxr_rc != 0)
1782  ioo->ioo_rc = xfer->nxr_rc;
1783 
1784  M0_LEAVE();
1785 }
1786 
1795 static int nw_xfer_req_dispatch(struct nw_xfer_request *xfer)
1796 {
1797  int rc = 0;
1798  int post_error = 0;
1799  int ri_error;
1800  uint64_t nr_dispatched = 0;
1801  struct ioreq_fop *irfop;
1802  struct m0_op_io *ioo;
1803  struct m0_op *op;
1804  struct target_ioreq *ti;
1805  struct m0_client *instance;
1806 
1807  M0_ENTRY();
1808 
1809  M0_PRE(xfer != NULL);
1810  ioo = bob_of(xfer, struct m0_op_io, ioo_nwxfer, &ioo_bobtype);
1811  op = &ioo->ioo_oo.oo_oc.oc_op;
1813  M0_PRE(instance != NULL);
1814 
1815  to_op_io_map(op, ioo);
1816 
1817  /* FOPs' preparation */
1818  m0_htable_for(tioreqht, ti, &xfer->nxr_tioreqs_hash) {
1819  if (ti->ti_state != M0_PNDS_ONLINE) {
1820  M0_LOG(M0_INFO, "Skipped iofops prepare for "FID_F,
1821  FID_P(&ti->ti_fid));
1822  continue;
1823  }
1824  ti->ti_start_time = m0_time_now();
1825  if (ti->ti_req_type == TI_COB_CREATE &&
1826  ioreq_sm_state(ioo) == IRS_WRITING) {
1827  rc = ti->ti_ops->tio_cc_fops_prepare(ti);
1828  if (rc != 0)
1829  return M0_ERR_INFO(rc, "[%p] cob create fop"
1830  "failed", ioo);
1831  continue;
1832  }
1833 
1834  if (ioreq_sm_state(ioo) == IRS_TRUNCATE) {
1835  if (ti->ti_req_type == TI_READ_WRITE) {
1837  rc = ti->ti_ops->tio_cc_fops_prepare(ti);
1838  if (rc != 0)
1839  return M0_ERR(rc);
1840  }
1841  continue;
1842  }
1843  rc = ti->ti_ops->tio_iofops_prepare(ti, PA_DATA) ?:
1845  if (rc != 0)
1846  return M0_ERR(rc);
1847  } m0_htable_endfor;
1848 
1849  /* Submit io FOPs */
1850  m0_htable_for(tioreqht, ti, &xfer->nxr_tioreqs_hash) {
1851  struct m0_rpc_item *item = &ti->ti_cc_fop.crf_fop.f_item;
1852 
1853  /* Skips the target device if it is not online. */
1854  if (ti->ti_state != M0_PNDS_ONLINE) {
1855  M0_LOG(M0_INFO, "Skipped device "FID_F,
1856  FID_P(&ti->ti_fid));
1857  continue;
1858  }
1859  if (ti->ti_req_type == TI_COB_CREATE &&
1860  ioreq_sm_state(ioo) == IRS_WRITING) {
1861  /*
1862  * An error returned by rpc post has been ignored.
1863  * It will be handled in the respective bottom half.
1864  */
1865  M0_LOG(M0_DEBUG, "item="ITEM_FMT" osr_xid=%"PRIu64,
1867  rc = m0_rpc_post(item);
1868  M0_CNT_INC(nr_dispatched);
1869  m0_op_io_to_rpc_map(ioo, item);
1870  continue;
1871  }
1872  if (op->op_code == M0_OC_FREE &&
1873  ioreq_sm_state(ioo) == IRS_TRUNCATE &&
1874  ti->ti_req_type == TI_COB_TRUNCATE) {
1875  if (ti->ti_trunc_ivec.iv_vec.v_nr > 0) {
1876  /*
1877  * An error returned by rpc post has been
1878  * ignored. It will be handled in the
1879  * io_bottom_half().
1880  */
1881  M0_LOG(M0_DEBUG, "item="ITEM_FMT
1882  " osr_xid=%"PRIu64,
1883  ITEM_ARG(item),
1885  rc = m0_rpc_post(item);
1886  M0_CNT_INC(nr_dispatched);
1887  m0_op_io_to_rpc_map(ioo, item);
1888  }
1889  continue;
1890  }
1891  m0_tl_for (iofops, &ti->ti_iofops, irfop) {
1893  ti->ti_session);
1895  M0_LOG(M0_DEBUG, "[%p] Submitted fop for device "
1896  FID_F"@%p, item %p, fop_nr=%llu, rc=%d, "
1897  "ri_error=%d", ioo, FID_P(&ti->ti_fid), irfop,
1898  &irfop->irf_iofop.if_fop.f_item,
1899  (unsigned long long)
1900  m0_atomic64_get(&xfer->nxr_iofop_nr),
1901  rc, ri_error);
1902 
1903  /* XXX: noisy */
1904  m0_op_io_to_rpc_map(ioo,
1905  &irfop->irf_iofop.if_fop.f_item);
1906 
1907  if (rc != 0)
1908  goto out;
1909  m0_atomic64_inc(&instance->m0c_pending_io_nr);
1910  if (ri_error == 0)
1911  M0_CNT_INC(nr_dispatched);
1912  else if (post_error == 0)
1913  post_error = ri_error;
1914  } m0_tl_endfor;
1915  } m0_htable_endfor;
1916 
1917 out:
1918  if (rc == 0 && nr_dispatched == 0 && post_error == 0) {
1919  /* No fop has been dispatched.
1920  *
1921  * This might happen in dgmode reading:
1922  * In 'parity verify' mode, a whole parity group, including
1923  * data and parity units are all read from ioservices.
1924  * If some units failed to read, no need to read extra unit.
1925  * The units needed for recvoery are ready.
1926  */
1928  M0_ASSERT(op->op_code == M0_OC_READ &&
1929  instance->m0c_config->mc_is_read_verify);
1931  } else if (rc == 0)
1932  xfer->nxr_state = NXS_INFLIGHT;
1933 
1934  M0_LOG(M0_DEBUG, "[%p] nxr_iofop_nr %llu, nxr_rdbulk_nr %llu, "
1935  "nr_dispatched %llu", ioo,
1936  (unsigned long long)m0_atomic64_get(&xfer->nxr_iofop_nr),
1937  (unsigned long long)m0_atomic64_get(&xfer->nxr_rdbulk_nr),
1938  (unsigned long long)nr_dispatched);
1939 
1940  return M0_RC(rc);
1941 }
1942 
2002 static bool should_spare_be_mapped(struct m0_op_io *ioo,
2003  enum m0_pool_nd_state dev_state)
2004 {
2005  return (M0_IN(ioreq_sm_state(ioo),
2007  dev_state == M0_PNDS_SNS_REPAIRED)
2008  ||
2010  (dev_state == M0_PNDS_SNS_REPAIRED ||
2011  (dev_state == M0_PNDS_SNS_REPAIRING &&
2012  ioo->ioo_sns_state == SRS_REPAIR_DONE)));
2013 
2014 }
2015 
2027 static int nw_xfer_tioreq_map(struct nw_xfer_request *xfer,
2028  const struct m0_pdclust_src_addr *src,
2029  struct m0_pdclust_tgt_addr *tgt,
2030  struct target_ioreq **tio)
2031 {
2032  int rc;
2033  struct m0_fid tfid;
2034  const struct m0_fid *gfid;
2035  struct m0_op_io *ioo;
2036  struct m0_rpc_session *session;
2037  struct m0_pdclust_layout *play;
2038  struct m0_pdclust_instance *play_instance;
2039  enum m0_pool_nd_state dev_state;
2040  enum m0_pool_nd_state dev_state_prev;
2041  uint32_t spare_slot;
2042  uint32_t spare_slot_prev;
2043  struct m0_pdclust_src_addr spare;
2044  struct m0_poolmach *pm;
2045 
2046  M0_ENTRY("nw_xfer_request=%p", xfer);
2047 
2049  M0_PRE(src != NULL);
2050  M0_PRE(tgt != NULL);
2051  M0_PRE(tio != NULL);
2052 
2053  ioo = bob_of(xfer, struct m0_op_io, ioo_nwxfer, &ioo_bobtype);
2054 
2055  play = pdlayout_get(ioo);
2056  M0_PRE(play != NULL);
2057  play_instance = pdlayout_instance(layout_instance(ioo));
2058  M0_PRE(play_instance != NULL);
2059 
2060  spare = *src;
2061  m0_fd_fwd_map(play_instance, src, tgt);
2062  tfid = target_fid(ioo, tgt);
2063  M0_LOG(M0_DEBUG, "src_id[%" PRIu64 ":%" PRIu64 "] -> "
2064  "dest_id[%" PRIu64 ":%" PRIu64 "] @ tfid="FID_F,
2066  FID_P(&tfid));
2067 
2068  pm = ioo_to_poolmach(ioo);
2069  M0_ASSERT(pm != NULL);
2070  rc = m0_poolmach_device_state(pm, tgt->ta_obj, &dev_state);
2071  if (rc != 0)
2072  return M0_RC(rc);
2073 
2074  if (M0_FI_ENABLED("poolmach_client_repaired_device1")) {
2075  if (tfid.f_container == 1)
2076  dev_state = M0_PNDS_SNS_REPAIRED;
2077  }
2078 
2079  M0_LOG(M0_INFO, "[%p] tfid="FID_F" dev_state=%d\n",
2080  ioo, FID_P(&tfid), dev_state);
2081 
2082  if (should_spare_be_mapped(ioo, dev_state)) {
2083  gfid = &ioo->ioo_oo.oo_fid;
2084  rc = m0_sns_repair_spare_map(pm, gfid, play, play_instance,
2085  src->sa_group, src->sa_unit,
2086  &spare_slot, &spare_slot_prev);
2087  if (rc != 0)
2088  return M0_RC(rc);
2089 
2090  /* Check if there is an effective-failure. */
2091  if (spare_slot_prev != src->sa_unit) {
2092  spare.sa_unit = spare_slot_prev;
2093  m0_fd_fwd_map(play_instance, &spare, tgt);
2094  tfid = target_fid(ioo, tgt);
2096  &dev_state_prev);
2097  if (rc != 0)
2098  return M0_RC(rc);
2099  } else
2100  dev_state_prev = M0_PNDS_SNS_REPAIRED;
2101 
2102  if (dev_state_prev == M0_PNDS_SNS_REPAIRED) {
2103  spare.sa_unit = spare_slot;
2104  m0_fd_fwd_map(play_instance, &spare, tgt);
2105  tfid = target_fid(ioo, tgt);
2106  }
2107  dev_state = dev_state_prev;
2108  }
2109 
2110  session = target_session(ioo, tfid);
2111 
2112  rc = nw_xfer_tioreq_get(xfer, &tfid, tgt->ta_obj, session,
2113  layout_unit_size(play) * ioo->ioo_iomap_nr, tio);
2114 
2115  if (M0_IN(ioreq_sm_state(ioo), (IRS_DEGRADED_READING,
2117  dev_state != M0_PNDS_SNS_REPAIRED)
2118  (*tio)->ti_state = dev_state;
2119 
2120  return M0_RC(rc);
2121 }
2122 
2123 static const struct nw_xfer_ops xfer_ops = {
2125  .nxo_complete = nw_xfer_req_complete,
2126  .nxo_dispatch = nw_xfer_req_dispatch,
2127  .nxo_tioreq_map = nw_xfer_tioreq_map,
2128 };
2129 
2130 M0_INTERNAL void nw_xfer_request_init(struct nw_xfer_request *xfer)
2131 {
2132  uint64_t bucket_nr;
2133  struct m0_op_io *ioo;
2134  struct m0_pdclust_layout *play;
2135 
2136  M0_ENTRY("nw_xfer_request : %p", xfer);
2137 
2138  M0_PRE(xfer != NULL);
2139 
2140  ioo = bob_of(xfer, struct m0_op_io, ioo_nwxfer, &ioo_bobtype);
2141  nw_xfer_request_bob_init(xfer);
2142  xfer->nxr_rc = 0;
2143  xfer->nxr_bytes = 0;
2144  m0_atomic64_set(&xfer->nxr_iofop_nr, 0);
2145  m0_atomic64_set(&xfer->nxr_rdbulk_nr, 0);
2146  xfer->nxr_state = NXS_INITIALIZED;
2147  xfer->nxr_ops = &xfer_ops;
2148  m0_mutex_init(&xfer->nxr_lock);
2149 
2150  play = pdlayout_get(ioo);
2151  bucket_nr = layout_n(play) + 2 * layout_k(play);
2152  xfer->nxr_rc = tioreqht_htable_init(&xfer->nxr_tioreqs_hash,
2153  bucket_nr);
2154 
2156  M0_LEAVE();
2157 }
2158 
2159 M0_INTERNAL void nw_xfer_request_fini(struct nw_xfer_request *xfer)
2160 {
2161  M0_ENTRY("nw_xfer_request : %p", xfer);
2162 
2163  M0_PRE(xfer != NULL);
2164  M0_PRE(M0_IN(xfer->nxr_state, (NXS_COMPLETE, NXS_INITIALIZED)));
2166  M0_LOG(M0_DEBUG, "nw_xfer_request : %p, nxr_rc = %d",
2167  xfer, xfer->nxr_rc);
2168 
2169  xfer->nxr_ops = NULL;
2170  m0_mutex_fini(&xfer->nxr_lock);
2171  nw_xfer_request_bob_fini(xfer);
2172  tioreqht_htable_fini(&xfer->nxr_tioreqs_hash);
2173 
2174  M0_LEAVE();
2175 }
2176 
2177 #undef M0_TRACE_SUBSYSTEM
2178 
2179 /*
2180  * Local variables:
2181  * c-indentation-style: "K&R"
2182 
2183  * c-basic-offset: 8
2184  * tab-width: 8
2185  * fill-column: 80
2186  * scroll-step: 1
2187  * End:
2188  */
2189 /*
2190  * vim: tabstop=8 shiftwidth=8 noexpandtab textwidth=80 nowrap
2191  */
struct m0_file ioo_flock
M0_INTERNAL m0_bcount_t m0_net_domain_get_max_buffer_segment_size(struct m0_net_domain *dom)
static void m0_atomic64_inc(struct m0_atomic64 *a)
static uint64_t tioreqs_hash_func(const struct m0_htable *htable, const void *k)
Definition: io_nw_xfer.c:279
M0_INTERNAL void m0_ivec_cursor_init(struct m0_ivec_cursor *cur, const struct m0_indexvec *ivec)
Definition: vec.c:707
Definition: pg.h:130
M0_INTERNAL int m0_rpc_post(struct m0_rpc_item *item)
Definition: rpc.c:63
static bool target_ioreq_invariant(const struct target_ioreq *ti)
Definition: io_nw_xfer.c:324
uint32_t rit_opcode
Definition: item.h:474
uint64_t crw_lid
Definition: io_fops.h:394
static size_t nr
Definition: dump.c:1505
#define M0_PRE(cond)
M0_INTERNAL bool m0_pdclust_is_replicated(struct m0_pdclust_layout *play)
Definition: pdclust.c:829
#define M0_ALLOC_ARR(arr, nr)
Definition: memory.h:84
M0_INTERNAL m0_bcount_t m0_ext_length(const struct m0_ext *ext)
Definition: ext.c:42
M0_INTERNAL int m0_bitmap_init(struct m0_bitmap *map, size_t nr)
Definition: bitmap.c:86
static void nw_xfer_req_complete(struct nw_xfer_request *xfer, bool rmw)
Definition: io_nw_xfer.c:1667
M0_INTERNAL m0_bcount_t m0_io_fop_byte_count(struct m0_io_fop *iofop)
Definition: io_fops.c:1925
#define COUNT(ivec, i)
Definition: file.c:392
struct m0_buf ti_attrbuf
Definition: pg.h:810
#define m0_htable_for(name, var, htable)
Definition: hash.h:483
struct m0_fop crf_fop
M0_INTERNAL struct m0_rpc_session * m0_obj_container_id_to_session(struct m0_pool_version *pv, uint64_t container_id)
Definition: cob.c:925
M0_INTERNAL int m0_indexvec_alloc(struct m0_indexvec *ivec, uint32_t len)
Definition: vec.c:532
m0_bindex_t e_end
Definition: ext.h:40
static uint32_t seg_nr
Definition: net.c:119
int const char const void size_t int flags
Definition: dir.c:328
uint64_t sa_group
Definition: pdclust.h:241
static const uint64_t k1
Definition: hash_fnc.c:34
static int bulk_buffer_add(struct ioreq_fop *irfop, struct m0_net_domain *dom, struct m0_rpc_bulk_buf **rbuf, uint32_t *delta, uint32_t maxsize)
Definition: io_nw_xfer.c:760
#define NULL
Definition: misc.h:38
M0_INTERNAL void m0_bitmap_fini(struct m0_bitmap *map)
Definition: bitmap.c:97
static void paritybufs_set_dgw_mode(struct pargrp_iomap *iomap, struct m0_op_io *ioo, uint64_t unit)
Definition: io_nw_xfer.c:1431
map
Definition: processor.c:112
M0_INTERNAL m0_bcount_t m0_ivec_cursor_step(const struct m0_ivec_cursor *cur)
Definition: vec.c:726
struct m0_atomic64 nxr_rdbulk_nr
M0_INTERNAL bool m0__obj_is_di_enabled(struct m0_op_io *ioo)
Definition: io.c:660
uint32_t crw_index
Definition: io_fops.h:388
struct m0_bufvec nb_buffer
Definition: net.h:1322
#define ergo(a, b)
Definition: misc.h:293
Definition: storage.c:103
M0_HT_DEFINE(tioreqht, M0_INTERNAL, struct target_ioreq, uint64_t)
void * b_addr
Definition: buf.h:39
M0_INTERNAL struct m0_pool_version * m0_pool_version_find(struct m0_pools_common *pc, const struct m0_fid *id)
Definition: pool.c:586
struct m0_file file
Definition: di.c:36
M0_INTERNAL bool m0__is_oostore(struct m0_client *instance)
Definition: client.c:255
M0_INTERNAL int m0_rpc_bulk_buf_databuf_add(struct m0_rpc_bulk_buf *rbuf, void *buf, m0_bcount_t count, m0_bindex_t index, struct m0_net_domain *netdom)
Definition: bulk.c:331
struct m0_pool_version * pv
Definition: dir.c:629
struct data_buf *** pi_paritybufs
M0_INTERNAL void nw_xfer_request_init(struct nw_xfer_request *xfer)
Definition: io_nw_xfer.c:2130
#define M0_LOG(level,...)
Definition: trace.h:167
M0_LEAVE()
struct m0_io_fop irf_iofop
Definition: pg.h:866
static bool should_spare_be_mapped(struct m0_op_io *ioo, enum m0_pool_nd_state dev_state)
Definition: io_nw_xfer.c:2002
static uint32_t layout_k(const struct m0_pdclust_layout *play)
Definition: file.c:520
int(* nxo_tioreq_map)(struct nw_xfer_request *xfer, const struct m0_pdclust_src_addr *src, struct m0_pdclust_tgt_addr *tgt, struct target_ioreq **tio)
static int nw_xfer_tioreq_map(struct nw_xfer_request *xfer, const struct m0_pdclust_src_addr *src, struct m0_pdclust_tgt_addr *tgt, struct target_ioreq **tio)
Definition: io_nw_xfer.c:2027
M0_INTERNAL int m0_sns_repair_spare_map(struct m0_poolmach *pm, const struct m0_fid *fid, struct m0_pdclust_layout *pl, struct m0_pdclust_instance *pi, uint64_t group, uint64_t unit, uint32_t *spare_slot_out, uint32_t *spare_slot_out_prev)
uint64_t(* do_out_shift)(const struct m0_file *file)
Definition: di.h:109
struct m0_vec ov_vec
Definition: vec.h:147
static int dgmode_rwvec_alloc_init(struct target_ioreq *ti)
Definition: io_nw_xfer.c:151
struct m0_rpc_bulk if_rbulk
Definition: io_fops.h:177
struct m0_sm ri_sm
Definition: item.h:181
M0_INTERNAL int ioreq_cc_fop_init(struct target_ioreq *ti)
Definition: io_req_fop.c:810
static int target_cob_fop_prepare(struct target_ioreq *ti)
Definition: io_nw_xfer.c:1173
static uint32_t io_seg_size(void)
Definition: file.c:6449
uint64_t ta_obj
Definition: pdclust.h:256
enum target_ioreq_type ti_req_type
struct m0_indexvec_varr ti_bufvec
struct m0_op oc_op
struct m0_net_domain * ntm_dom
Definition: net.h:853
int32_t ri_error
Definition: item.h:161
static void dgmode_rwvec_dealloc_fini(struct dgmode_rwvec *dg)
Definition: io_nw_xfer.c:244
M0_INTERNAL void m0_indexvec_free(struct m0_indexvec *ivec)
Definition: vec.c:553
static struct m0_rpc_session * target_session(struct m0_op_io *ioo, struct m0_fid tfid)
Definition: io_nw_xfer.c:731
struct m0_indexvec ti_trunc_ivec
Definition: pg.h:799
M0_BOB_DEFINE(M0_INTERNAL, &nwxfer_bobtype, nw_xfer_request)
uint64_t m0_bindex_t
Definition: types.h:80
uint64_t ti_obj
struct m0_varr ti_pageattrs
static void * buf_aux_chk_get(struct m0_bufvec *aux, enum page_attr p_attr, uint32_t seg_idx, bool rd_in_wr)
Definition: io_nw_xfer.c:827
uint64_t m0_bcount_t
Definition: types.h:77
M0_INTERNAL int m0_poolmach_device_state(struct m0_poolmach *pm, uint32_t device_index, enum m0_pool_nd_state *state_out)
Definition: pool_machine.c:816
static int void * buf
Definition: dir.c:1019
static struct m0_rpc_session session
Definition: formation2.c:38
#define M0_SET0(obj)
Definition: misc.h:64
M0_INTERNAL m0_bcount_t m0_extent_get_checksum_nob(m0_bindex_t ext_start, m0_bindex_t ext_length, m0_bindex_t unit_sz, m0_bcount_t cs_size)
Definition: cksum_utils.c:85
Definition: di.h:92
M0_ADDB2_ADD(M0_AVI_FS_CREATE, new_fid.f_container, new_fid.f_key, mode, rc)
M0_INTERNAL int m0_fid_cmp(const struct m0_fid *fid0, const struct m0_fid *fid1)
Definition: fid.c:170
struct m0_fid crw_pver
Definition: io_fops.h:391
static struct m0_rpc_item * item
Definition: item.c:56
void ** ov_buf
Definition: vec.h:149
void target_ioreq_cancel(struct target_ioreq *ti)
Definition: io_nw_xfer.c:423
int ioo_addb2_mapped
const char * bt_name
Definition: bob.h:73
M0_INTERNAL uint64_t m0__obj_lid(struct m0_obj *obj)
Definition: obj.c:126
struct m0_indexvec pi_ivec
Definition: pg.h:340
Definition: sock.c:887
static m0_bcount_t count
Definition: xcode.c:167
#define ITEM_ARG(item)
Definition: item.h:618
M0_INTERNAL void m0_rpc_bulk_buflist_empty(struct m0_rpc_bulk *rbulk)
Definition: bulk.c:279
struct m0_sm ioo_sm
struct m0_buf crw_di_data_cksum
Definition: io_fops.h:418
struct target_ioreq * dr_tioreq
#define SEG_NR(ivec)
Definition: file.c:393
enum m0_pool_nd_state ti_state
#define m0_tl_endfor
Definition: tlist.h:700
struct m0_fid fid
Definition: di.c:46
struct m0_vec iv_vec
Definition: vec.h:139
M0_INTERNAL int ioreq_fop_init(struct ioreq_fop *fop, struct target_ioreq *ti, enum page_attr pattr)
Definition: io_req_fop.c:977
return M0_RC(rc)
op
Definition: libdemo.c:64
unsigned int op_code
Definition: client.h:650
static struct target_ioreq * target_ioreq_locate(struct nw_xfer_request *xfer, struct m0_fid *fid)
Definition: io_nw_xfer.c:435
static uint32_t unit_size
Definition: layout.c:53
#define M0_ENTRY(...)
Definition: trace.h:170
uint64_t osr_xid
Definition: onwire.h:105
m0_bindex_t * iv_index
Definition: vec.h:141
Definition: filter.py:1
int opcode
Definition: crate.c:301
int m0_obj_layout_id_to_unit_size(uint64_t layout_id)
Definition: obj.c:851
int i
Definition: dir.c:1033
void m0_fop_rpc_machine_set(struct m0_fop *fop, struct m0_rpc_machine *mach)
Definition: fop.c:352
M0_INTERNAL m0_bcount_t m0_rpc_session_get_max_item_payload_size(const struct m0_rpc_session *session)
Definition: session.c:775
m0_pdclust_unit_type
Definition: pdclust.h:89
enum page_attr db_flags
const struct m0_bob_type ioo_bobtype
Definition: io_req.c:153
#define PRIu64
Definition: types.h:58
struct m0_rpc_machine * c_rpc_machine
Definition: conn.h:278
struct m0_fid crw_fid
Definition: io_fops.h:385
Definition: client.h:641
static uint32_t rows_nr(struct m0_pdclust_layout *play)
Definition: file.c:691
M0_INTERNAL bool m0_ext_is_valid(const struct m0_ext *ext)
Definition: ext.c:90
struct nw_xfer_request ioo_nwxfer
#define M0_ERR_INFO(rc, fmt,...)
Definition: trace.h:215
int(* nxo_distribute)(struct nw_xfer_request *xfer)
uint64_t ti_parbytes
return M0_ERR(-EOPNOTSUPP)
static int target_ioreq_init(struct target_ioreq *ti, struct nw_xfer_request *xfer, const struct m0_fid *cobfid, uint64_t ta_obj, struct m0_rpc_session *session, uint64_t size)
Definition: io_nw_xfer.c:1195
M0_INTERNAL bool nw_xfer_request_invariant(const struct nw_xfer_request *xfer)
Definition: io_nw_xfer.c:340
struct m0_op_obj ioo_oo
M0_INTERNAL struct m0_poolmach * ioo_to_poolmach(struct m0_op_io *ioo)
Definition: io.c:75
struct m0_fop if_fop
Definition: io_fops.h:174
Definition: trace.h:482
M0_INTERNAL struct m0_client * m0__op_instance(const struct m0_op *op)
Definition: client.c:236
Definition: cnt.h:36
static int key
Definition: locality.c:283
enum sns_repair_state ioo_sns_state
struct m0_indexvec ioo_ext
void m0_rpc_item_cancel(struct m0_rpc_item *item)
Definition: item.c:932
M0_INTERNAL void ioreq_sm_state_set_locked(struct m0_op_io *ioo, int state)
Definition: io_req.c:193
#define m0_tl_teardown(name, head, obj)
Definition: tlist.h:708
int(* tio_cc_fops_prepare)(struct target_ioreq *ti)
static int nw_xfer_tioreq_get(struct nw_xfer_request *xfer, struct m0_fid *fid, uint64_t ta_obj, struct m0_rpc_session *session, uint64_t size, struct target_ioreq **out)
Definition: io_nw_xfer.c:1351
static uint32_t io_di_size(struct m0_op_io *ioo)
Definition: io_nw_xfer.c:98
struct m0_net_buffer * bb_nbuf
Definition: bulk.h:177
M0_INTERNAL void nw_xfer_request_fini(struct nw_xfer_request *xfer)
Definition: io_nw_xfer.c:2159
enum pargrp_iomap_state pi_state
#define m0_free0(pptr)
Definition: memory.h:77
void(* tio_seg_add)(struct target_ioreq *ti, const struct m0_pdclust_src_addr *src, const struct m0_pdclust_tgt_addr *tgt, m0_bindex_t gob_offset, m0_bcount_t count, struct pargrp_iomap *map)
static uint64_t page_nr(m0_bcount_t size)
Definition: file.c:492
uint32_t * ti_cksum_seg_b_nob
Definition: pg.h:814
M0_INTERNAL size_t m0_io_fop_size_get(struct m0_fop *fop)
Definition: io_fops.c:1589
struct m0_net_transfer_mc rm_tm
Definition: rpc_machine.h:88
m0_bcount_t b_nob
Definition: buf.h:38
static uint64_t page_id(m0_bindex_t offset)
Definition: file.c:686
#define M0_ASSERT(cond)
struct m0_fid ioo_pver
m0_time_t m0_time_now(void)
Definition: time.c:134
struct m0_rpc_item_header2 ri_header
Definition: item.h:193
m0_pool_nd_state
Definition: pool_machine.h:57
static struct m0_pdclust_instance * pdlayout_instance(const struct m0_layout_instance *li)
Definition: file.c:504
const struct nw_xfer_ops * nxr_ops
uint64_t ta_frame
Definition: pdclust.h:254
#define m0_htable_forall(name, var, htable,...)
Definition: hash.h:465
#define bob_of(ptr, type, field, bt)
Definition: bob.h:140
struct m0_bufvec ioo_data
static struct m0_bufvec bvec
Definition: xcode.c:169
M0_INTERNAL int ioreq_fop_async_submit(struct m0_io_fop *iofop, struct m0_rpc_session *session)
Definition: io_req_fop.c:672
static struct m0_stob_domain * dom
Definition: storage.c:38
#define ITEM_FMT
Definition: item.h:617
struct m0_varr dr_pageattrs
uint64_t pi_grpid
static void databufs_set_dgw_mode(struct pargrp_iomap *iomap, struct m0_pdclust_layout *play, struct m0_ext *ext)
Definition: io_nw_xfer.c:1406
struct m0_obj * ioo_obj
M0_INTERNAL bool ioreq_fop_invariant(const struct ioreq_fop *fop)
Definition: io_req_fop.c:62
M0_INTERNAL struct m0_obj_attr * m0_io_attr(struct m0_op_io *ioo)
Definition: utils.c:302
static int nw_xfer_req_dispatch(struct nw_xfer_request *xfer)
Definition: io_nw_xfer.c:1795
enum m0_pbuf_type ioo_pbuf_type
static void irfop_fini(struct ioreq_fop *irfop)
Definition: io_nw_xfer.c:810
const struct m0_rpc_item_type * ri_type
Definition: item.h:200
static uint64_t layout_unit_size(const struct m0_pdclust_layout *play)
Definition: file.c:525
M0_INTERNAL int m0_buf_alloc(struct m0_buf *buf, size_t size)
Definition: buf.c:43
struct m0_sm_group * sm_grp
Definition: sm.h:321
m0_bcount_t crw_cksum_size
Definition: io_fops.h:415
M0_INTERNAL uint32_t m0_fid_cob_device_id(const struct m0_fid *cob_fid)
Definition: fid_convert.c:81
M0_INTERNAL void m0_mutex_init(struct m0_mutex *mutex)
Definition: mutex.c:35
const struct m0_bob_type tioreq_bobtype
Definition: io_nw_xfer.c:49
static void to_op_io_map(const struct m0_op *op, struct m0_op_io *ioo)
Definition: io_nw_xfer.c:70
uint64_t f_container
Definition: fid.h:39
#define M0_POST(cond)
struct m0_0vec bb_zerovec
Definition: bulk.h:179
uint32_t ioo_flags
struct m0_fid oo_fid
M0_INTERNAL void m0_bitmap_set(struct m0_bitmap *map, size_t idx, bool val)
Definition: bitmap.c:139
uint32_t v_nr
Definition: vec.h:51
static m0_bindex_t offset
Definition: dump.c:173
struct m0_indexvec_varr dr_bufvec
struct m0_htable nxr_tioreqs_hash
M0_INTERNAL void m0_buf_free(struct m0_buf *buf)
Definition: buf.c:55
m0_bcount_t * v_count
Definition: vec.h:53
struct m0_rpc_session * ti_session
static const struct m0_di_ops di_ops[M0_DI_NR]
Definition: di.c:128
M0_INTERNAL bool m0_ivec_cursor_move(struct m0_ivec_cursor *cur, m0_bcount_t count)
Definition: vec.c:718
static uint64_t min64u(uint64_t a, uint64_t b)
Definition: arith.h:66
struct m0_tl ti_iofops
struct m0_op_common oo_oc
static void page_pos_get(struct pargrp_iomap *map, m0_bindex_t index, uint32_t *row, uint32_t *col)
Definition: file.c:725
M0_INTERNAL bool m0_ext_is_in(const struct m0_ext *ext, m0_bindex_t index)
Definition: ext.c:48
#define FID_P(f)
Definition: fid.h:77
static uint64_t data_size(const struct m0_pdclust_layout *play)
Definition: file.c:550
static const struct nw_xfer_ops xfer_ops
Definition: io_nw_xfer.c:2123
M0_INTERNAL bool addr_is_network_aligned(void *addr)
Definition: utils.c:29
M0_INTERNAL struct m0_op * m0__ioo_to_op(struct m0_op_io *ioo)
Definition: client.c:249
M0_INTERNAL m0_bcount_t m0_vec_count(const struct m0_vec *vec)
Definition: vec.c:53
const struct target_ioreq_ops * ti_ops
static const uint64_t k2
Definition: hash_fnc.c:35
struct m0_bufvec z_bvec
Definition: vec.h:514
static uint32_t layout_n(const struct m0_pdclust_layout *play)
Definition: file.c:515
static struct m0_pdclust_layout * pdlayout_get(const struct io_request *req)
Definition: file.c:510
static int64_t m0_atomic64_get(const struct m0_atomic64 *a)
const struct m0_bob_type nwxfer_bobtype
Definition: io_nw_xfer.c:48
M0_INTERNAL uint32_t m0_pdclust_size(const struct m0_pdclust_layout *pl)
Definition: pdclust.c:372
uint64_t sa_unit
Definition: pdclust.h:243
int32_t ioo_rc
M0_INTERNAL int m0_rpc_session_validate(struct m0_rpc_session *session)
Definition: session.c:573
M0_INTERNAL size_t m0_rpc_bulk_buf_length(struct m0_rpc_bulk *rbulk)
Definition: bulk.c:550
uint64_t ti_databytes
struct m0_pdclust_tgt_addr tgt
Definition: fd.c:110
static uint8_t fail[DATA_UNIT_COUNT_MAX+PARITY_UNIT_COUNT_MAX]
M0_INTERNAL int64_t m0_ref_read(const struct m0_ref *ref)
Definition: refs.c:44
struct target_ioreq * irf_tioreq
Definition: pg.h:881
#define M0_CNT_INC(cnt)
Definition: arith.h:226
m0_bcount_t ti_cksum_copied
Definition: pg.h:811
#define M0_FI_ENABLED(tag)
Definition: finject.h:231
struct m0_ref f_ref
Definition: fop.h:80
Definition: ext.h:37
Definition: fid.h:38
m0_bindex_t e_start
Definition: ext.h:39
M0_INTERNAL void ioreq_fop_fini(struct ioreq_fop *fop)
Definition: io_req_fop.c:1036
struct m0_fid ti_fid
static struct m0_layout_instance * layout_instance(const struct io_request *req)
Definition: file.c:498
#define M0_ALLOC_PTR(ptr)
Definition: memory.h:86
struct cc_req_fop ti_cc_fop
static bool should_unit_be_truncated(bool partial, enum m0_pdclust_unit_type unit_type, enum page_attr flags)
Definition: io_nw_xfer.c:458
struct m0_mutex nxr_lock
m0_bindex_t ti_goff
Definition: pg.h:785
M0_HT_DESCR_DEFINE(tioreqht, "Hash of target_ioreq objects", M0_INTERNAL, struct target_ioreq, ti_link, ti_magic, M0_TIOREQ_MAGIC, M0_TLIST_HEAD_MAGIC, ti_fid.f_container, tioreqs_hash_func, tioreq_key_eq)
struct m0_bufvec dr_auxbufvec
Definition: pg.h:707
enum nw_xfer_state nxr_state
static void m0_op_io_to_rpc_map(const struct m0_op_io *ioo, const struct m0_rpc_item *item)
Definition: io_nw_xfer.c:80
static int nw_xfer_io_distribute(struct nw_xfer_request *xfer)
Definition: io_nw_xfer.c:1460
struct m0_rpc_item * m0_fop_to_rpc_item(const struct m0_fop *fop)
Definition: fop.c:338
static void parity_page_pos_get(struct pargrp_iomap *map, m0_bindex_t index, uint32_t *row, uint32_t *col)
Definition: io_nw_xfer.c:124
M0_INTERNAL bool m0_bitmap_get(const struct m0_bitmap *map, size_t idx)
Definition: bitmap.c:105
M0_INTERNAL enum m0_pdclust_unit_type m0_pdclust_unit_classify(const struct m0_pdclust_layout *pl, int unit)
Definition: pdclust.c:425
m0_bcount_t size
Definition: di.c:39
page_attr
#define _0C(exp)
Definition: assert.h:311
M0_INTERNAL m0_bindex_t m0_ivec_cursor_index(const struct m0_ivec_cursor *cur)
Definition: vec.c:733
struct data_buf *** pi_databufs
M0_INTERNAL void m0_mutex_fini(struct m0_mutex *mutex)
Definition: mutex.c:42
struct m0_indexvec dr_ivec
Definition: pg.h:700
void m0_fop_put_lock(struct m0_fop *fop)
Definition: fop.c:199
struct m0_atomic64 nxr_iofop_nr
static struct m0_fop * fop
Definition: item.c:57
M0_INTERNAL int32_t m0_net_domain_get_max_buffer_segments(struct m0_net_domain *dom)
static struct m0 instance
Definition: main.c:78
M0_INTERNAL void m0_bitmap_reset(struct m0_bitmap *map)
Definition: bitmap.c:149
static struct m0_be_seg * seg
Definition: btree.c:40
uint64_t ioo_iomap_nr
static uint32_t ioreq_sm_state(const struct io_request *req)
Definition: file.c:975
static int target_ioreq_iofops_prepare(struct target_ioreq *ti, enum page_attr filter)
Definition: io_nw_xfer.c:843
M0_INTERNAL void m0_ext_intersection(const struct m0_ext *e0, const struct m0_ext *e1, struct m0_ext *result)
Definition: ext.c:81
M0_INTERNAL struct m0_fid target_fid(struct m0_op_io *ioo, struct m0_pdclust_tgt_addr *tgt)
Definition: io_nw_xfer.c:710
struct nw_xfer_request * ti_nwxfer
#define out(...)
Definition: gen.c:41
Definition: file.h:81
M0_INTERNAL uint64_t m0__page_size(const struct m0_op_io *ioo)
Definition: utils.c:41
uint64_t oa_layout_id
Definition: client.h:752
M0_INTERNAL bool m0_is_read_fop(const struct m0_fop *fop)
Definition: io_fops.c:916
static bool tioreq_key_eq(const void *key1, const void *key2)
Definition: io_nw_xfer.c:299
struct m0_fid gfid
Definition: dir.c:626
M0_INTERNAL struct m0_fop_cob_rw * io_rw_get(struct m0_fop *fop)
Definition: io_fops.c:1037
Definition: pg.h:859
static int32_t min32(int32_t a, int32_t b)
Definition: arith.h:36
M0_INTERNAL bool m0_is_write_fop(const struct m0_fop *fop)
Definition: io_fops.c:922
M0_INTERNAL bool m0_fid_is_valid(const struct m0_fid *fid)
Definition: fid.c:96
void target_ioreq_fini(struct target_ioreq *ti)
Definition: io_nw_xfer.c:364
M0_INTERNAL void m0_fd_fwd_map(struct m0_pdclust_instance *pi, const struct m0_pdclust_src_addr *src, struct m0_pdclust_tgt_addr *tgt)
Definition: fd.c:838
struct m0_indexvec ti_goff_ivec
Definition: pg.h:820
M0_INTERNAL int m0_io_fop_prepare(struct m0_fop *fop)
Definition: io_fops.c:1513
struct m0_bufvec ti_auxbufvec
Definition: pg.h:807
struct m0_rpc_machine * ri_rmachine
Definition: item.h:160
static void m0_atomic64_add(struct m0_atomic64 *a, int64_t num)
M0_INTERNAL int m0_rpc_bulk_buf_add(struct m0_rpc_bulk *rbulk, uint32_t segs_nr, m0_bcount_t length, struct m0_net_domain *netdom, struct m0_net_buffer *nb, struct m0_rpc_bulk_buf **out)
Definition: bulk.c:291
M0_INTERNAL uint64_t m0_sm_id_get(const struct m0_sm *sm)
Definition: sm.c:1021
#define m0_tl_for(name, head, obj)
Definition: tlist.h:695
void m0_free(void *data)
Definition: memory.c:146
#define m0_htable_endfor
Definition: hash.h:491
static const struct target_ioreq_ops tioreq_ops
Definition: io_nw_xfer.c:1167
struct m0_rpc_item f_item
Definition: fop.h:83
uint32_t sm_state
Definition: sm.h:307
struct m0_bufvec ioo_attr
struct m0_pdclust_src_addr src
Definition: fd.c:108
struct dgmode_rwvec * ti_dgvec
int32_t rc
Definition: trigger_fop.h:47
uint64_t h_bucket_nr
Definition: hash.h:178
static uint32_t io_desc_size(struct m0_net_domain *ndom)
Definition: file.c:6439
struct m0_indexvec ti_ivec
Definition: pg.h:793
#define M0_POST_EX(cond)
#define offsetof(typ, memb)
Definition: misc.h:29
M0_INTERNAL bool m0_sm_group_is_locked(const struct m0_sm_group *grp)
Definition: sm.c:107
M0_INTERNAL void m0_poolmach_gob2cob(struct m0_poolmach *pm, const struct m0_fid *gfid, uint32_t idx, struct m0_fid *cob_fid)
struct m0_rpc_conn * s_conn
Definition: session.h:312
static uint64_t target_offset(uint64_t frame, struct m0_pdclust_layout *play, m0_bindex_t gob_offset)
Definition: file.c:571
int(* tio_iofops_prepare)(struct target_ioreq *ti, enum page_attr filter)
Definition: fop.h:79
struct pargrp_iomap ** ioo_iomaps
const struct m0_di_ops * fi_di_ops
Definition: file.h:92
uint64_t crw_flags
Definition: io_fops.h:413
#define FID_F
Definition: fid.h:75
m0_time_t ti_start_time
Definition: pg.h:759
Definition: vec.h:145
M0_INTERNAL void m0_file_init(struct m0_file *file, const struct m0_fid *fid, struct m0_rm_domain *dom, enum m0_di_types di_type)
Definition: file.c:477
static void m0_atomic64_set(struct m0_atomic64 *a, int64_t num)
M0_INTERNAL void * m0_extent_vec_get_checksum_addr(void *cksum_buf_vec, m0_bindex_t off, void *ivec, m0_bindex_t unit_sz, m0_bcount_t cs_sz)
Definition: cksum_utils.c:107
m0_bcount_t b_nob
Definition: buf.h:230
Definition: idx_mock.c:47
#define m0_tl_forall(name, var, head,...)
Definition: tlist.h:735
static void target_ioreq_seg_add(struct target_ioreq *ti, const struct m0_pdclust_src_addr *src, const struct m0_pdclust_tgt_addr *tgt, m0_bindex_t gob_offset, m0_bcount_t count, struct pargrp_iomap *map)
Definition: io_nw_xfer.c:479