Motr  M0
io_fops.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2013-2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 
23 #define M0_TRACE_SUBSYSTEM M0_TRACE_SUBSYS_IOSERVICE
24 #include "lib/trace.h"
25 
26 #include "lib/errno.h"
27 #include "lib/memory.h"
28 #include "lib/vec.h" /* m0_0vec */
29 #include "lib/misc.h" /* M0_IN */
30 #include "lib/tlist.h"
31 #include "reqh/reqh.h"
32 #include "motr/magic.h"
33 #include "fop/fop_item_type.h"
34 #include "rpc/item.h"
35 #include "rpc/rpc_opcodes.h"
36 #include "rpc/rpc.h"
37 #include "fop/fom_generic.h"
38 #include "file/file.h"
39 #include "lib/finject.h"
40 #include "cob/cob.h"
41 #include "mdservice/fsync_foms.h" /* m0_fsync_fom_conf */
42 #include "mdservice/fsync_fops.h" /* m0_fsync_fom_ops */
43 #include "mdservice/fsync_fops_xc.h" /* m0_fop_fsync_xc */
44 #include "ioservice/io_addb2.h"
45 #include "ioservice/io_foms.h"
46 #include "ioservice/io_fops.h"
47 #include "ioservice/io_fops_xc.h"
48 #include "ioservice/cob_foms.h"
49 #ifndef __KERNEL__
50  #include "motr/client_internal.h"
51 #else
53 #endif
54 
55 /* tlists and tlist APIs referred from rpc layer. */
56 M0_TL_DESCR_DECLARE(rpcbulk, M0_EXTERN);
57 M0_TL_DESCR_DECLARE(rpcitem, M0_EXTERN);
58 M0_TL_DECLARE(rpcbulk, M0_INTERNAL, struct m0_rpc_bulk_buf);
59 M0_TL_DECLARE(rpcitem, M0_INTERNAL, struct m0_rpc_item);
60 
61 static struct m0_fid *io_fop_fid_get(struct m0_fop *fop);
62 
63 static void io_item_replied (struct m0_rpc_item *item);
64 static void io_fop_replied (struct m0_fop *fop, struct m0_fop *bkpfop);
65 static void io_fop_desc_get (struct m0_fop *fop,
66  struct m0_net_buf_desc_data **desc);
67 static int io_fop_coalesce (struct m0_fop *res_fop, uint64_t size);
68 static void item_io_coalesce(struct m0_rpc_item *head, struct m0_list *list,
69  uint64_t size);
70 
85 
86 M0_EXPORTED(m0_fop_cob_writev_fopt);
87 M0_EXPORTED(m0_fop_cob_readv_fopt);
88 
89 static struct m0_fop_type *ioservice_fops[] = {
104 };
105 
106 /* Used for IO REQUEST items only. */
109 };
110 
111 static const struct m0_rpc_item_type_ops io_item_type_ops = {
113  .rito_io_coalesce = item_io_coalesce,
114 };
115 
117  struct m0_fol *fol)
118 {
119  struct m0_fop_cob_writev_rep *wfop;
120 
121  M0_PRE(frag != NULL);
122 
123  wfop = frag->ffrp_rep;
124  switch(frag->ffrp_fop_code) {
126  M0_ASSERT(wfop->c_rep.rwr_rc == 0);
127  break;
128  }
129  return 0;
130 }
131 
132 M0_INTERNAL void m0_dump_cob_attr(const struct m0_cob_attr *attr)
133 {
134  uint32_t valid = attr->ca_valid;
135 #define level M0_DEBUG
136  M0_LOG(level, "pfid = "FID_F, FID_P(&attr->ca_pfid));
137  M0_LOG(level, "tfid = "FID_F, FID_P(&attr->ca_tfid));
138  if (valid & M0_COB_MODE)
139  M0_LOG(level, "mode = %o", attr->ca_mode);
140  if (valid & M0_COB_UID)
141  M0_LOG(level, "uid = %u", attr->ca_uid);
142  if (valid & M0_COB_GID)
143  M0_LOG(level, "gid = %u", attr->ca_gid);
144  if (valid & M0_COB_ATIME)
145  M0_LOG(level, "atime = %llu",
146  (unsigned long long)attr->ca_atime);
147  if (valid & M0_COB_MTIME)
148  M0_LOG(level, "mtime = %llu",
149  (unsigned long long)attr->ca_mtime);
150  if (valid & M0_COB_CTIME)
151  M0_LOG(level, "ctime = %llu",
152  (unsigned long long)attr->ca_ctime);
153  if (valid & M0_COB_NLINK)
154  M0_LOG(level, "nlink = %u", attr->ca_nlink);
155  if (valid & M0_COB_RDEV)
156  M0_LOG(level, "rdev = %llu", (unsigned long long)attr->ca_rdev);
157  if (valid & M0_COB_SIZE)
158  M0_LOG(level, "size = %llu", (unsigned long long)attr->ca_size);
159  if (valid & M0_COB_BLKSIZE)
160  M0_LOG(level, "blksize = %llu",
161  (unsigned long long)attr->ca_blksize);
162  if (valid & M0_COB_BLOCKS)
163  M0_LOG(level, "blocks = %llu",
164  (unsigned long long)attr->ca_blocks);
165  if (valid & M0_COB_LID)
166  M0_LOG(level, "lid = %llu", (unsigned long long)attr->ca_lid);
167  if (valid & M0_COB_PVER)
168  M0_LOG(level, "pver = "FID_F, FID_P(&attr->ca_pver));
169 #undef level
170 }
171 
172 #ifndef __KERNEL__
175 M0_BASSERT(sizeof(struct m0_fop_cob_create) ==
176  sizeof(struct m0_fop_cob_delete));
177 
178 static int io_fol_cd_rec_frag_op(struct m0_fop_fol_frag *frag,
179  struct m0_fol *fol, bool undo)
180 {
181  int result;
182  struct m0_fop *fop;
183  struct m0_reqh *reqh = container_of(fol, struct m0_reqh, rh_fol);
184  struct m0_fom *fom;
185  int delete;
186  struct m0_rpc_machine *rpcmach;
187 
188  M0_PRE(reqh != NULL);
189  M0_PRE(frag != NULL);
192 
193  rpcmach = m0_reqh_rpc_mach_tlist_head(&reqh->rh_rpc_machines);
194  M0_ASSERT(rpcmach != NULL);
195 
197  if (undo)
198  delete = 1 - delete;
199  fop = m0_fop_alloc(delete ?
201  frag->ffrp_fop, rpcmach);
202  result = fop != NULL ? m0_cob_fom_create(fop, &fom, reqh) : -ENOMEM;
203  if (result == 0) {
204  fom->fo_local = true;
205  m0_fom_queue(fom);
206  }
207  return result;
208 }
209 #else
210 static int io_fol_cd_rec_frag_op(struct m0_fop_fol_frag *frag,
211  struct m0_fol *fol, bool undo)
212 {
213  return 0;
214 }
215 #endif
216 
217 static int io_fol_cd_rec_frag_undo(struct m0_fop_fol_frag *frag,
218  struct m0_fol *fol)
219 {
220  return io_fol_cd_rec_frag_op(frag, fol, true);
221 }
222 
223 static int io_fol_cd_rec_frag_redo(struct m0_fop_fol_frag *frag,
224  struct m0_fol *fol)
225 {
226  return io_fol_cd_rec_frag_op(frag, fol, false);
227 }
228 
231  .fto_io_coalesce = io_fop_coalesce,
232  .fto_io_desc_get = io_fop_desc_get,
233  .fto_undo = io_fol_frag_undo_redo_op,
234  .fto_redo = io_fol_frag_undo_redo_op,
235 };
236 
239  .fto_redo = io_fol_cd_rec_frag_redo,
240 };
241 
242 extern struct m0_reqh_service_type m0_ios_type;
243 extern const struct m0_fom_type_ops cob_fom_type_ops;
244 extern const struct m0_fom_type_ops io_fom_type_ops;
245 
246 extern struct m0_sm_conf io_conf;
247 extern struct m0_sm_state_descr io_phases[];
248 extern const struct m0_sm_conf cob_ops_conf;
249 extern struct m0_sm_state_descr cob_ops_phases[];
250 
251 M0_INTERNAL void m0_ioservice_fop_fini(void)
252 {
260 
275 
276 #ifndef __KERNEL__
278 #endif
279 }
280 
281 M0_INTERNAL int m0_ioservice_fop_init(void)
282 {
283  const struct m0_sm_conf *p_cob_ops_conf;
284 #ifndef __KERNEL__
285  p_cob_ops_conf = &cob_ops_conf;
290 
292 
297 
299 #else
300  p_cob_ops_conf = &m0_generic_conf;
301 #endif
303  .name = "read",
305  .xt = m0_fop_cob_readv_xc,
306  .rpc_flags = M0_RPC_ITEM_TYPE_REQUEST,
307  .fop_ops = &io_fop_rwv_ops,
308 #ifndef __KERNEL__
309  .fom_ops = &io_fom_type_ops,
310  .sm = &io_conf,
311  .svc_type = &m0_ios_type,
312 #endif
314 
316  .name = "write",
318  .xt = m0_fop_cob_writev_xc,
319  .rpc_flags = M0_RPC_MUTABO_REQ,
320  .fop_ops = &io_fop_rwv_ops,
321 #ifndef __KERNEL__
322  .fom_ops = &io_fom_type_ops,
323  .sm = &io_conf,
324  .svc_type = &m0_ios_type,
325 #endif
327 
329  .name = "read-reply",
331  .xt = m0_fop_cob_readv_rep_xc,
332  .rpc_flags = M0_RPC_ITEM_TYPE_REPLY);
333 
335  .name = "write-reply",
337  .xt = m0_fop_cob_writev_rep_xc,
338  .rpc_flags = M0_RPC_ITEM_TYPE_REPLY);
339 
341  .name = "cob-create",
343  .xt = m0_fop_cob_create_xc,
344  .rpc_flags = M0_RPC_MUTABO_REQ,
345  .fop_ops = &io_fop_cd_ops,
346 #ifndef __KERNEL__
347  .fom_ops = &cob_fom_type_ops,
348  .svc_type = &m0_ios_type,
349 #endif
350  .sm = p_cob_ops_conf);
351 
353  .name = "cob-delete",
355  .xt = m0_fop_cob_delete_xc,
356  .rpc_flags = M0_RPC_MUTABO_REQ,
357  .fop_ops = &io_fop_cd_ops,
358 #ifndef __KERNEL__
359  .fom_ops = &cob_fom_type_ops,
360  .svc_type = &m0_ios_type,
361 #endif
362  .sm = p_cob_ops_conf);
363 
365  .name = "cob-truncate",
367  .xt = m0_fop_cob_truncate_xc,
368  .rpc_flags = M0_RPC_MUTABO_REQ,
369  .fop_ops = &io_fop_cd_ops,
370 #ifndef __KERNEL__
371  .fom_ops = &cob_fom_type_ops,
372  .svc_type = &m0_ios_type,
373 #endif
374  .sm = p_cob_ops_conf);
375 
377  .name = "cob-reply",
379  .xt = m0_fop_cob_op_reply_xc,
380  .rpc_flags = M0_RPC_ITEM_TYPE_REPLY);
381 
383  .name = "getattr",
385  .xt = m0_fop_cob_getattr_xc,
386  .rpc_flags = M0_RPC_ITEM_TYPE_REQUEST,
387 #ifndef __KERNEL__
388  .fom_ops = &cob_fom_type_ops,
389  .svc_type = &m0_ios_type,
390 #endif
391  .sm = p_cob_ops_conf);
392 
394  .name = "getattr-reply",
396  .xt = m0_fop_cob_getattr_reply_xc,
397  .rpc_flags = M0_RPC_ITEM_TYPE_REPLY);
398 
400  .name = "fsync-ios",
402  .xt = m0_fop_fsync_xc,
403 #ifndef __KERNEL__
404  .svc_type = &m0_ios_type,
405  .sm = &m0_fsync_fom_conf,
406  .fom_ops = &m0_fsync_fom_ops,
407 #endif
408  .rpc_flags = M0_RPC_ITEM_TYPE_REQUEST);
410  .name = "setattr",
412  .xt = m0_fop_cob_setattr_xc,
413  .rpc_flags = M0_RPC_MUTABO_REQ,
414  .fop_ops = NULL,
415 #ifndef __KERNEL__
416  .fom_ops = &cob_fom_type_ops,
417  .svc_type = &m0_ios_type,
418 #endif
419  .sm = p_cob_ops_conf);
420 
422  .name = "setattr-reply",
424  .xt = m0_fop_cob_setattr_reply_xc,
425  .rpc_flags = M0_RPC_ITEM_TYPE_REPLY);
426 
434 }
435 
815 struct ioseg {
816  /* Magic constant to verify sanity of structure. */
817  uint64_t is_magic;
818  /* Index in target object to start io from. */
820  /* Number of bytes in io segment. */
822  /* Starting address of buffer. */
823  void *is_buf;
824  /*
825  * Linkage to have such IO segments in a list hanging off
826  * io_seg_set::iss_list.
827  */
829 };
830 
832 struct io_seg_set {
834  uint64_t iss_magic;
836  struct m0_tl iss_list;
837 };
838 
839 M0_TL_DESCR_DEFINE(iosegset, "list of coalesced io segments", static,
840  struct ioseg, is_linkage, is_magic,
842 
843 M0_TL_DEFINE(iosegset, static, struct ioseg);
844 
845 static void ioseg_get(const struct m0_0vec *zvec, uint32_t seg_index,
846  struct ioseg *seg)
847 {
848  M0_PRE(zvec != NULL);
849  M0_PRE(seg_index < zvec->z_bvec.ov_vec.v_nr);
850  M0_PRE(seg != NULL);
851 
852  seg->is_index = zvec->z_index[seg_index];
853  seg->is_size = zvec->z_bvec.ov_vec.v_count[seg_index];
854  seg->is_buf = zvec->z_bvec.ov_buf[seg_index];
855 }
856 
857 static bool io_fop_invariant(struct m0_io_fop *iofop)
858 {
859  return _0C(iofop != NULL) &&
860  _0C(iofop->if_magic == M0_IO_FOP_MAGIC) &&
862  iofop->if_fop.f_type == ioservice_fops[i]));
863 }
864 
865 M0_INTERNAL int m0_io_fop_init(struct m0_io_fop *iofop,
866  const struct m0_fid *gfid,
867  struct m0_fop_type *ftype,
868  void (*fop_release)(struct m0_ref *))
869 {
870  int rc;
871  struct m0_fop_cob_rw *rw;
872 
873  M0_PRE(iofop != NULL);
874  M0_PRE(ftype != NULL);
875  M0_PRE(gfid != NULL);
876 
877  M0_LOG(M0_DEBUG, "iofop %p", iofop);
878 
879  m0_fop_init(&iofop->if_fop, ftype, NULL,
881  rc = m0_fop_data_alloc(&iofop->if_fop);
882  if (rc == 0) {
884  iofop->if_magic = M0_IO_FOP_MAGIC;
885 
886  m0_rpc_bulk_init(&iofop->if_rbulk);
887  rw = io_rw_get(&iofop->if_fop);
888  rw->crw_gfid = *gfid;
889  if (ftype == &m0_fop_cob_writev_fopt)
890  rw->crw_flags |= M0_IO_FLAG_CROW;
891 
892  M0_POST(io_fop_invariant(iofop));
893  }
894  return M0_RC(rc);
895 }
896 
897 M0_INTERNAL void m0_io_fop_fini(struct m0_io_fop *iofop)
898 {
899  M0_PRE(io_fop_invariant(iofop));
900  m0_rpc_bulk_fini(&iofop->if_rbulk);
901  m0_fop_fini(&iofop->if_fop);
902 }
903 
904 M0_INTERNAL struct m0_rpc_bulk *m0_fop_to_rpcbulk(const struct m0_fop *fop)
905 {
906  struct m0_io_fop *iofop;
907 
908  M0_PRE(fop != NULL);
909 
910  iofop = container_of(fop, struct m0_io_fop, if_fop);
911  return &iofop->if_rbulk;
912 }
913 
916 M0_INTERNAL bool m0_is_read_fop(const struct m0_fop *fop)
917 {
918  M0_PRE(fop != NULL);
919  return fop->f_type == &m0_fop_cob_readv_fopt;
920 }
921 
922 M0_INTERNAL bool m0_is_write_fop(const struct m0_fop *fop)
923 {
924  M0_PRE(fop != NULL);
925  return fop->f_type == &m0_fop_cob_writev_fopt;
926 }
927 
928 M0_INTERNAL bool m0_is_io_fop(const struct m0_fop *fop)
929 {
931 }
932 
933 M0_INTERNAL bool m0_is_read_rep(const struct m0_fop *fop)
934 {
935  M0_PRE(fop != NULL);
937 }
938 
939 M0_INTERNAL bool m0_is_write_rep(const struct m0_fop *fop)
940 {
941  M0_PRE(fop != NULL);
943 }
944 
945 M0_INTERNAL bool m0_is_io_fop_rep(const struct m0_fop *fop)
946 {
948 }
949 
950 M0_INTERNAL bool m0_is_cob_create_fop(const struct m0_fop *fop)
951 {
952  M0_PRE(fop != NULL);
955 }
956 
957 M0_INTERNAL bool m0_is_cob_delete_fop(const struct m0_fop *fop)
958 {
959  M0_PRE(fop != NULL);
962 }
963 
964 M0_INTERNAL bool m0_is_cob_truncate_fop(const struct m0_fop *fop)
965 {
966  M0_PRE(fop != NULL);
969 }
970 
971 M0_INTERNAL bool m0_is_cob_getattr_fop(const struct m0_fop *fop)
972 {
973  M0_PRE(fop != NULL);
976 }
977 
978 M0_INTERNAL bool m0_is_cob_setattr_fop(const struct m0_fop *fop)
979 {
980  M0_PRE(fop != NULL);
983 }
984 
985 M0_INTERNAL bool m0_is_cob_create_delete_fop(const struct m0_fop *fop)
986 {
988 }
989 
990 M0_INTERNAL struct m0_fop_cob_common *m0_cobfop_common_get(struct m0_fop *fop)
991 {
992  struct m0_fop_cob_create *cc;
993  struct m0_fop_cob_delete *cd;
994  struct m0_fop_cob_truncate *ct;
995  struct m0_fop_cob_getattr *cg;
996  struct m0_fop_cob_setattr *cs;
997 
998  M0_PRE(fop != NULL);
999  M0_PRE(fop->f_type != NULL);
1000 
1001  if (m0_is_cob_create_fop(fop)) {
1002  cc = m0_fop_data(fop);
1003  return &cc->cc_common;
1004  } else if (m0_is_cob_delete_fop(fop)) {
1005  cd = m0_fop_data(fop);
1006  return &cd->cd_common;
1007  } else if (fop->f_type == &m0_fop_cob_truncate_fopt) {
1008  ct = m0_fop_data(fop);
1009  return &ct->ct_common;
1010  } else if (m0_is_cob_getattr_fop(fop)) {
1011  cg = m0_fop_data(fop);
1012  return &cg->cg_common;
1013  } else if (m0_is_cob_setattr_fop(fop)) {
1014  cs = m0_fop_data(fop);
1015  return &cs->cs_common;
1016  } else
1017  M0_IMPOSSIBLE("Invalid fop type!");
1018 }
1019 
1020 M0_INTERNAL uint32_t m0_io_fop_segs_nr(struct m0_fop *fop, uint32_t index)
1021 {
1022  struct m0_fop_cob_rw *rwfop;
1023  m0_bcount_t used_size;
1024  uint32_t segs_nr;
1025  m0_bcount_t max_seg_size;
1026 
1027  rwfop = io_rw_get(fop);
1028  used_size = rwfop->crw_desc.id_descs[index].bdd_used;
1031  segs_nr = (used_size + max_seg_size - 1) / max_seg_size;
1032  M0_LOG(M0_DEBUG, "segs_nr %d", segs_nr);
1033 
1034  return segs_nr;
1035 }
1036 
1037 M0_INTERNAL struct m0_fop_cob_rw *io_rw_get(struct m0_fop *fop)
1038 {
1039  struct m0_fop_cob_readv *rfop;
1040  struct m0_fop_cob_writev *wfop;
1041 
1042  M0_PRE(fop != NULL);
1043  M0_ASSERT_INFO(m0_is_io_fop(fop), "%s %i %i",
1044  fop->f_type != NULL ? fop->f_type->ft_name : "untyped",
1046 
1047  if (m0_is_read_fop(fop)) {
1048  rfop = m0_fop_data(fop);
1049  return &rfop->c_rwv;
1050  } else {
1051  wfop = m0_fop_data(fop);
1052  return &wfop->c_rwv;
1053  }
1054 }
1055 
1056 M0_INTERNAL struct m0_fop_cob_rw_reply *io_rw_rep_get(struct m0_fop *fop)
1057 {
1058  struct m0_fop_cob_readv_rep *rfop;
1059  struct m0_fop_cob_writev_rep *wfop;
1060 
1061  M0_PRE(fop != NULL);
1063 
1064  if (m0_is_read_rep(fop)) {
1065  rfop = m0_fop_data(fop);
1066  return &rfop->c_rep;
1067  } else {
1068  wfop = m0_fop_data(fop);
1069  return &wfop->c_rep;
1070  }
1071 }
1072 
1073 static struct m0_0vec *io_0vec_get(struct m0_rpc_bulk_buf *rbuf)
1074 {
1075  M0_PRE(rbuf != NULL);
1076 
1077  return &rbuf->bb_zerovec;
1078 }
1079 
1080 static void ioseg_unlink_free(struct ioseg *ioseg)
1081 {
1082  M0_PRE(ioseg != NULL);
1083  M0_PRE(iosegset_tlink_is_in(ioseg));
1084 
1085  iosegset_tlist_del(ioseg);
1086  m0_free(ioseg);
1087 }
1088 
1092 __attribute__((unused))
1093 static bool io_fop_type_equal(const struct m0_fop *fop1,
1094  const struct m0_fop *fop2)
1095 {
1096  M0_PRE(fop1 != NULL);
1097  M0_PRE(fop2 != NULL);
1098 
1099  return fop1->f_type == fop2->f_type;
1100 }
1101 
1102 static int io_fop_seg_init(struct ioseg **ns, const struct ioseg *cseg)
1103 {
1104  struct ioseg *new_seg = 0;
1105 
1106  M0_PRE(ns != NULL);
1107  M0_PRE(cseg != NULL);
1108 
1109  M0_ALLOC_PTR(new_seg);
1110  if (new_seg == NULL)
1111  return M0_ERR(-ENOMEM);
1112 
1113  *ns = new_seg;
1114  M0_ASSERT(new_seg != NULL); /* suppress compiler warning on next stmt */
1115  *new_seg = *cseg;
1116  iosegset_tlink_init(new_seg);
1117  return 0;
1118 }
1119 
1120 static int io_fop_seg_add_cond(struct ioseg *cseg, const struct ioseg *nseg)
1121 {
1122  int rc;
1123  struct ioseg *new_seg;
1124 
1125  M0_PRE(cseg != NULL);
1126  M0_PRE(nseg != NULL);
1127 
1128  if (nseg->is_index < cseg->is_index) {
1129  rc = io_fop_seg_init(&new_seg, nseg);
1130  if (rc != 0)
1131  return M0_RC(rc);
1132 
1133  iosegset_tlist_add_before(cseg, new_seg);
1134  } else
1135  rc = -EINVAL;
1136 
1137  return M0_RC(rc);
1138 }
1139 
1140 static void io_fop_seg_coalesce(const struct ioseg *seg,
1141  struct io_seg_set *aggr_set)
1142 {
1143  int rc;
1144  struct ioseg *new_seg;
1145  struct ioseg *ioseg;
1146 
1147  M0_PRE(seg != NULL);
1148  M0_PRE(aggr_set != NULL);
1149 
1150  /*
1151  * Coalesces all io segments in increasing order of offset.
1152  * This will create new net buffer/s which will be associated with
1153  * only one io fop and it will be sent on wire. While rest of io fops
1154  * will hang off a list m0_rpc_item::ri_compound_items.
1155  */
1156  m0_tl_for(iosegset, &aggr_set->iss_list, ioseg) {
1158  if (rc == 0 || rc == -ENOMEM)
1159  return;
1160  } m0_tl_endfor;
1161 
1162  rc = io_fop_seg_init(&new_seg, seg);
1163  if (rc != 0)
1164  return;
1165  iosegset_tlist_add_tail(&aggr_set->iss_list, new_seg);
1166 }
1167 
1168 static void io_fop_segments_coalesce(const struct m0_0vec *iovec,
1169  struct io_seg_set *aggr_set)
1170 {
1171  uint32_t i;
1172  struct ioseg seg = { 0 };
1173 
1174  M0_PRE(iovec != NULL);
1175  M0_PRE(aggr_set != NULL);
1176 
1177  /*
1178  * For each segment from incoming IO vector, check if it can
1179  * be merged with any of the existing segments from aggr_set.
1180  * If yes, merge it else, add a new entry in aggr_set.
1181  */
1182  for (i = 0; i < iovec->z_bvec.ov_vec.v_nr; ++i) {
1183  ioseg_get(iovec, i, &seg);
1184  io_fop_seg_coalesce(&seg, aggr_set);
1185  }
1186 }
1187 
1188 /*
1189  * Creates and populates net buffers as needed using the list of
1190  * coalesced io segments.
1191  */
1192 static int io_netbufs_prepare(struct m0_fop *coalesced_fop,
1193  struct io_seg_set *seg_set)
1194 {
1195  int rc;
1196  int32_t max_segs_nr;
1197  int32_t curr_segs_nr;
1198  int32_t nr;
1199  m0_bcount_t max_bufsize;
1200  m0_bcount_t curr_bufsize;
1201  uint32_t segs_nr;
1202  struct ioseg *ioseg;
1203  struct m0_net_domain *netdom;
1204  struct m0_rpc_bulk *rbulk;
1205  struct m0_rpc_bulk_buf *buf;
1206 
1207  M0_PRE(coalesced_fop != NULL);
1208  M0_PRE(seg_set != NULL);
1209  M0_PRE(!iosegset_tlist_is_empty(&seg_set->iss_list));
1210 
1211  netdom = m0_fop_domain_get(coalesced_fop);
1212  max_bufsize = m0_net_domain_get_max_buffer_size(netdom);
1213  max_segs_nr = m0_net_domain_get_max_buffer_segments(netdom);
1214  rbulk = m0_fop_to_rpcbulk(coalesced_fop);
1215  curr_segs_nr = iosegset_tlist_length(&seg_set->iss_list);
1216 
1217  while (curr_segs_nr != 0) {
1218  curr_bufsize = 0;
1219  segs_nr = 0;
1220  /*
1221  * Calculates the number of segments that can fit into max
1222  * buffer size. These are needed to add a m0_rpc_bulk_buf
1223  * structure into struct m0_rpc_bulk. Selected io segments
1224  * are removed from io segments list, hence the loop always
1225  * starts from the first element.
1226  */
1227  m0_tl_for(iosegset, &seg_set->iss_list, ioseg) {
1228  if (curr_bufsize + ioseg->is_size <= max_bufsize &&
1229  segs_nr <= max_segs_nr) {
1230  curr_bufsize += ioseg->is_size;
1231  ++segs_nr;
1232  } else
1233  break;
1234  } m0_tl_endfor;
1235 
1236  rc = m0_rpc_bulk_buf_add(rbulk, segs_nr, curr_bufsize,
1237  netdom, NULL, &buf);
1238  if (rc != 0)
1239  goto cleanup;
1240 
1241  nr = 0;
1242  m0_tl_for(iosegset, &seg_set->iss_list, ioseg) {
1244  ioseg->is_size,
1245  ioseg->is_index,
1246  netdom);
1247 
1248  /*
1249  * Since size and fragment calculations are made before
1250  * hand, this buffer addition should succeed.
1251  */
1252  M0_ASSERT(rc == 0);
1253 
1255  if (++nr == segs_nr)
1256  break;
1257  } m0_tl_endfor;
1258  M0_POST(m0_vec_count(&buf->bb_zerovec.z_bvec.ov_vec) <=
1259  max_bufsize);
1260  M0_POST(buf->bb_zerovec.z_bvec.ov_vec.v_nr <= max_segs_nr);
1261  curr_segs_nr -= segs_nr;
1262  }
1263  return 0;
1264 cleanup:
1265  M0_ASSERT(rc != 0);
1267  return M0_RC(rc);
1268 }
1269 
1270 /* Deallocates memory claimed by index vector/s from io fop wire format. */
1271 M0_INTERNAL void io_fop_ivec_dealloc(struct m0_fop *fop)
1272 {
1273  struct m0_fop_cob_rw *rw;
1274  struct m0_io_indexvec *ivec;
1275 
1276  M0_PRE(fop != NULL);
1277 
1278  rw = io_rw_get(fop);
1279  ivec = &rw->crw_ivec;
1280 
1281  m0_free(ivec->ci_iosegs);
1282  ivec->ci_nr = 0;
1283  ivec->ci_iosegs = NULL;
1284 }
1285 
1286 #define ZNR(zvec) zvec->z_bvec.ov_vec.v_nr
1287 #define ZCOUNT(zvec, i) zvec->z_bvec.ov_vec.v_count[i]
1288 #define ZINDEX(zvec, i) zvec->z_index[i]
1289 
1290 #define INR(ivec) ivec->ci_nr
1291 #define IINDEX(ivec, i) ivec->ci_iosegs[i].ci_index
1292 #define ICOUNT(ivec, i) ivec->ci_iosegs[i].ci_count
1293 
1294 static uint32_t iosegs_nr(struct m0_rpc_bulk *rbulk)
1295 {
1296  struct m0_rpc_bulk_buf *buf;
1297  uint32_t cnt = 0;
1298  m0_bindex_t index = 0;
1299  m0_bcount_t count = 0;
1300 
1301  m0_tl_for(rpcbulk, &rbulk->rb_buflist, buf) {
1302  uint32_t i = 0;
1303  uint32_t nr = 0;
1304  struct m0_0vec *zvec = &buf->bb_zerovec;
1305 
1306  if (index == 0) {
1307  index = ZINDEX(zvec, 0);
1308  count = ZCOUNT(zvec, 0);
1309  i = 1;
1310  }
1311  for (; i < ZNR(zvec); ++i) {
1312  if (index + count == ZINDEX(zvec, i))
1313  ++nr;
1314  index = ZINDEX(zvec, i);
1315  count = ZCOUNT(zvec, i);
1316  }
1317  cnt += ZNR(zvec) - nr;
1318  } m0_tl_endfor;
1319 
1320  return cnt;
1321 }
1322 
1323 static void iosegs_squeeze(struct m0_rpc_bulk *rbulk,
1324  struct m0_io_indexvec *ivec)
1325 {
1326  m0_bindex_t index = 0;
1327  struct m0_rpc_bulk_buf *buf;
1328 
1329  m0_tl_for(rpcbulk, &rbulk->rb_buflist, buf) {
1330  uint32_t j = 0;
1331  struct m0_0vec *zvec = &buf->bb_zerovec;
1332 
1333  if (IINDEX(ivec, 0) == 0 && ICOUNT(ivec, 0) == 0) {
1334  IINDEX(ivec, 0) = ZINDEX(zvec, 0);
1335  ICOUNT(ivec, 0) = ZCOUNT(zvec, 0);
1336  j = 1;
1337  }
1338  for (; j < ZNR(zvec); ++j) {
1339  if (IINDEX(ivec, index) + ICOUNT(ivec, index) ==
1340  ZINDEX(zvec, j)) {
1341  ICOUNT(ivec, index) += ZCOUNT(zvec, j);
1342  } else {
1343  ++index;
1344  IINDEX(ivec, index) = ZINDEX(zvec, j);
1345  ICOUNT(ivec, index) = ZCOUNT(zvec, j);
1346  }
1347  }
1348  } m0_tl_endfor;
1349 }
1350 
1351 /* Populates index vector/s from io fop wire format. */
1352 static int io_fop_ivec_prepare(struct m0_fop *res_fop,
1353  struct m0_rpc_bulk *rbulk)
1354 {
1355  struct m0_fop_cob_rw *rw;
1356  struct m0_io_indexvec *ivec;
1357 
1358  M0_PRE(res_fop != NULL);
1359  M0_PRE(rbulk != NULL);
1361 
1362  rw = io_rw_get(res_fop);
1363  ivec = &rw->crw_ivec;
1364  ivec->ci_nr = iosegs_nr(rbulk);
1365 
1366  M0_ALLOC_ARR(ivec->ci_iosegs, ivec->ci_nr);
1367  if (ivec->ci_iosegs == NULL)
1368  return M0_ERR(-ENOMEM);
1369 
1370  iosegs_squeeze(rbulk, ivec);
1371 
1372  return 0;
1373 }
1374 
1375 static int io_fop_di_prepare(struct m0_fop *fop)
1376 {
1377  uint64_t size;
1378  struct m0_fop_cob_rw *rw;
1379  struct m0_io_indexvec *io_info;
1380  struct m0_bufvec cksum_data;
1381  struct m0_rpc_bulk *rbulk;
1382  struct m0_rpc_bulk_buf *rbuf;
1383  struct m0_file *file;
1384  uint64_t curr_size = 0;
1385  uint64_t todo = 0;
1386  int rc = 0;
1387  struct m0_indexvec io_vec;
1388 
1389  if (M0_FI_ENABLED("skip_di_for_ut"))
1390  return 0;
1391 #ifndef ENABLE_DATA_INTEGRITY
1392  return M0_RC(rc);
1393 #endif
1394  M0_PRE(fop != NULL);
1395 
1396  rbulk = m0_fop_to_rpcbulk(fop);
1397  M0_ASSERT(rbulk != NULL);
1399  rw = io_rw_get(fop);
1400  io_info = &rw->crw_ivec;
1401 #ifndef __KERNEL__
1403 #else
1404  file = m0_fop_to_file(fop);
1405 #endif
1406  if (file->fi_di_ops->do_out_shift(file) == 0)
1407  return M0_RC(rc);
1408  rc = m0_indexvec_wire2mem(io_info, io_info->ci_nr, 0, &io_vec);
1409  if (rc != 0)
1410  return M0_RC(rc);
1411  size = m0_di_size_get(file, m0_io_count(io_info));
1412  rw->crw_di_data.b_nob = size;
1414  if (rw->crw_di_data.b_addr == NULL) {
1415  rc = -ENOMEM;
1416  goto out;
1417  }
1418  m0_tl_for (rpcbulk, &rbulk->rb_buflist, rbuf) {
1419  struct m0_indexvec ivec;
1420  uint32_t di_size;
1421  struct m0_buf buf;
1422  uint32_t curr_pos;
1423 
1424  curr_pos = m0_di_size_get(file, curr_size);
1425  todo = m0_vec_count(&rbuf->bb_zerovec.z_bvec.ov_vec);
1426  di_size = m0_di_size_get(file, todo);
1427  buf = M0_BUF_INIT(di_size, rw->crw_di_data.b_addr + curr_pos);
1428  cksum_data = (struct m0_bufvec) M0_BUFVEC_INIT_BUF(&buf.b_addr,
1429  &buf.b_nob);
1430  rc = m0_indexvec_split(&io_vec, curr_size, todo, 0, &ivec);
1431  if (rc != 0)
1432  goto out;
1433  file->fi_di_ops->do_sum(file, &ivec, &rbuf->bb_nbuf->nb_buffer,
1434  &cksum_data);
1435  curr_size += todo;
1436  m0_indexvec_free(&ivec);
1437  } m0_tl_endfor;
1438 
1439 out:
1441  return M0_RC(rc);
1442 }
1443 
1444 static void io_fop_bulkbuf_move(struct m0_fop *src, struct m0_fop *dest)
1445 {
1446  struct m0_rpc_bulk *sbulk;
1447  struct m0_rpc_bulk *dbulk;
1448  struct m0_rpc_bulk_buf *rbuf;
1449  struct m0_fop_cob_rw *srw;
1450  struct m0_fop_cob_rw *drw;
1451 
1452  M0_PRE(src != NULL);
1453  M0_PRE(dest != NULL);
1454 
1455  sbulk = m0_fop_to_rpcbulk(src);
1456  dbulk = m0_fop_to_rpcbulk(dest);
1457  m0_mutex_lock(&sbulk->rb_mutex);
1458  m0_tl_teardown(rpcbulk, &sbulk->rb_buflist, rbuf) {
1459  rpcbulk_tlist_add(&dbulk->rb_buflist, rbuf);
1460  }
1461  dbulk->rb_bytes = sbulk->rb_bytes;
1462  dbulk->rb_rc = sbulk->rb_rc;
1463  m0_mutex_unlock(&sbulk->rb_mutex);
1464 
1465  srw = io_rw_get(src);
1466  drw = io_rw_get(dest);
1467  drw->crw_desc = srw->crw_desc;
1468  drw->crw_ivec = srw->crw_ivec;
1469 }
1470 
1471 static int io_fop_desc_alloc(struct m0_fop *fop, struct m0_rpc_bulk *rbulk)
1472 {
1473  struct m0_fop_cob_rw *rw;
1474 
1475  M0_PRE(fop != NULL);
1476  M0_PRE(rbulk != NULL);
1478 
1479  rbulk = m0_fop_to_rpcbulk(fop);
1480  rw = io_rw_get(fop);
1481  rw->crw_desc.id_nr = rpcbulk_tlist_length(&rbulk->rb_buflist);
1483  return rw->crw_desc.id_descs == NULL ? M0_ERR(-ENOMEM) : 0;
1484 }
1485 
1486 static void io_fop_desc_dealloc(struct m0_fop *fop)
1487 {
1488  uint32_t i;
1489  struct m0_fop_cob_rw *rw;
1490 
1491  M0_PRE(fop != NULL);
1492 
1493  rw = io_rw_get(fop);
1494 
1495  /*
1496  * These descriptors are allocated by m0_rpc_bulk_store()
1497  * code during adding them as part of on-wire representation
1498  * of io fop. They should not be deallocated by rpc code
1499  * since it will unnecessarily pollute rpc layer code
1500  * with io details.
1501  */
1502  for (i = 0; i < rw->crw_desc.id_nr; ++i)
1504 
1505  m0_free0(&rw->crw_desc.id_descs);
1506  rw->crw_desc.id_nr = 0;
1507 }
1508 
1509 /*
1510  * Allocates memory for net buf descriptors array and index vector array
1511  * and populates the array of index vectors in io fop wire format.
1512  */
1513 M0_INTERNAL int m0_io_fop_prepare(struct m0_fop *fop)
1514 {
1515  int rc;
1516  struct m0_rpc_bulk *rbulk;
1517  enum m0_net_queue_type q;
1518  M0_ENTRY();
1519 
1520  M0_PRE(fop != NULL);
1522 
1523  rbulk = m0_fop_to_rpcbulk(fop);
1524  m0_mutex_lock(&rbulk->rb_mutex);
1525  rc = io_fop_desc_alloc(fop, rbulk);
1526  if (rc != 0) {
1527  rc = -ENOMEM;
1528  goto err;
1529  }
1530 
1531  rc = io_fop_ivec_prepare(fop, rbulk);
1532  if (rc != 0) {
1534  rc = -ENOMEM;
1535  goto err;
1536  }
1537 
1540  m0_rpc_bulk_qtype(rbulk, q);
1541  if (rc == 0 && m0_is_write_fop(fop))
1543 err:
1544  m0_mutex_unlock(&rbulk->rb_mutex);
1545  return M0_RC(rc);
1546 }
1547 
1548 /*
1549  * Creates new net buffers from aggregate list and adds them to
1550  * associated m0_rpc_bulk object. Also calls m0_io_fop_prepare() to
1551  * allocate memory for net buf desc sequence and index vector
1552  * sequence in io fop wire format.
1553  */
1555  struct io_seg_set *aggr_set)
1556 {
1557  int rc;
1558  struct m0_rpc_bulk *rbulk;
1559 
1560  M0_PRE(fop != NULL);
1561  M0_PRE(aggr_set != NULL);
1562 
1563  rbulk = m0_fop_to_rpcbulk(fop);
1564 
1565  rc = io_netbufs_prepare(fop, aggr_set);
1566  if (rc != 0) {
1567  return M0_RC(rc);
1568  }
1569 
1571  if (rc != 0)
1573 
1574  return M0_RC(rc);
1575 }
1576 
1577 /*
1578  * Deallocates memory for sequence of net buf desc and sequence of index
1579  * vectors from io fop wire format.
1580  */
1581 M0_INTERNAL void m0_io_fop_destroy(struct m0_fop *fop)
1582 {
1583  M0_PRE(fop != NULL);
1584 
1587 }
1588 
1589 M0_INTERNAL size_t m0_io_fop_size_get(struct m0_fop *fop)
1590 {
1591  struct m0_xcode_ctx ctx;
1592 
1593  M0_PRE(fop != NULL);
1594  M0_PRE(fop->f_type != NULL);
1595 
1597  return m0_xcode_length(&ctx);
1598 }
1599 
1614 static int io_fop_coalesce(struct m0_fop *res_fop, uint64_t size)
1615 {
1616  int rc;
1617  struct m0_fop *fop;
1618  struct m0_fop *bkp_fop;
1619  struct m0_tl *items_list;
1620  struct m0_0vec *iovec;
1621  struct ioseg *ioseg;
1622  struct m0_io_fop *cfop;
1623  struct io_seg_set aggr_set;
1624  struct m0_rpc_item *item;
1625  struct m0_rpc_bulk *rbulk;
1626  struct m0_rpc_bulk *bbulk;
1627  struct m0_fop_cob_rw *rw;
1628  struct m0_rpc_bulk_buf *rbuf;
1629  struct m0_net_transfer_mc *tm;
1630 
1631  M0_PRE(res_fop != NULL);
1632  M0_PRE(m0_is_io_fop(res_fop));
1633 
1634  M0_ALLOC_PTR(cfop);
1635  if (cfop == NULL)
1636  return M0_ERR(-ENOMEM);
1637 
1638  rw = io_rw_get(res_fop);
1639  rc = m0_io_fop_init(cfop, &rw->crw_gfid, res_fop->f_type, NULL);
1640  if (rc != 0) {
1641  m0_free(cfop);
1642  return M0_RC(rc);
1643  }
1644  tm = m0_fop_tm_get(res_fop);
1645  bkp_fop = &cfop->if_fop;
1647  iosegset_tlist_init(&aggr_set.iss_list);
1648 
1649  /*
1650  * Traverses the fop_list, get the IO vector from each fop,
1651  * pass it to a coalescing routine and get result back
1652  * in another list.
1653  */
1654  items_list = &res_fop->f_item.ri_compound_items;
1655  M0_ASSERT(!rpcitem_tlist_is_empty(items_list));
1656 
1657  m0_tl_for(rpcitem, items_list, item) {
1659  rbulk = m0_fop_to_rpcbulk(fop);
1660  m0_mutex_lock(&rbulk->rb_mutex);
1661  m0_tl_for(rpcbulk, &rbulk->rb_buflist, rbuf) {
1662  iovec = io_0vec_get(rbuf);
1663  io_fop_segments_coalesce(iovec, &aggr_set);
1664  } m0_tl_endfor;
1665  m0_mutex_unlock(&rbulk->rb_mutex);
1666  } m0_tl_endfor;
1667 
1668  /*
1669  * Removes m0_rpc_bulk_buf from the m0_rpc_bulk::rb_buflist and
1670  * add it to same list belonging to bkp_fop.
1671  */
1672  io_fop_bulkbuf_move(res_fop, bkp_fop);
1673 
1674  /*
1675  * Prepares net buffers from set of io segments, allocates memory
1676  * for net buf desriptors and index vectors and populates the index
1677  * vectors
1678  */
1679  rc = io_fop_desc_ivec_prepare(res_fop, &aggr_set);
1680  if (rc != 0)
1681  goto cleanup;
1682 
1683  /*
1684  * Adds the net buffers from res_fop to transfer machine and
1685  * populates res_fop with net buf descriptor/s got from network
1686  * buffer addition.
1687  */
1688  rw = io_rw_get(res_fop);
1689  rbulk = m0_fop_to_rpcbulk(res_fop);
1690  rc = m0_rpc_bulk_store(rbulk, res_fop->f_item.ri_session->s_conn,
1692  if (rc != 0) {
1693  m0_io_fop_destroy(res_fop);
1694  goto cleanup;
1695  }
1696 
1697  /*
1698  * Checks if current size of res_fop fits into the size
1699  * provided as input.
1700  */
1701  if (m0_io_fop_size_get(res_fop) > size) {
1702  m0_mutex_lock(&rbulk->rb_mutex);
1703  m0_tl_for(rpcbulk, &rbulk->rb_buflist, rbuf) {
1704  m0_net_buffer_del(rbuf->bb_nbuf, tm);
1705  } m0_tl_endfor;
1706  m0_mutex_unlock(&rbulk->rb_mutex);
1707  m0_io_fop_destroy(res_fop);
1708  goto cleanup;
1709  }
1710 
1711  /*
1712  * Removes the net buffers belonging to coalesced member fops
1713  * from transfer machine since these buffers are coalesced now
1714  * and are part of res_fop.
1715  */
1716  m0_tl_for(rpcitem, items_list, item) {
1718  if (fop == res_fop)
1719  continue;
1720  rbulk = m0_fop_to_rpcbulk(fop);
1721  m0_mutex_lock(&rbulk->rb_mutex);
1722  m0_tl_for(rpcbulk, &rbulk->rb_buflist, rbuf) {
1723  m0_net_buffer_del(rbuf->bb_nbuf, tm);
1724  } m0_tl_endfor;
1725  m0_mutex_unlock(&rbulk->rb_mutex);
1726  } m0_tl_endfor;
1727 
1728  /*
1729  * Removes the net buffers from transfer machine contained by rpc bulk
1730  * structure belonging to res_fop since they will be replaced by
1731  * new coalesced net buffers.
1732  */
1733  bbulk = m0_fop_to_rpcbulk(bkp_fop);
1734  rbulk = m0_fop_to_rpcbulk(res_fop);
1735  m0_mutex_lock(&bbulk->rb_mutex);
1736  m0_mutex_lock(&rbulk->rb_mutex);
1737  m0_tl_teardown(rpcbulk, &bbulk->rb_buflist, rbuf) {
1738  rpcbulk_tlist_add(&rbulk->rb_buflist, rbuf);
1739  m0_net_buffer_del(rbuf->bb_nbuf, tm);
1740  rbulk->rb_bytes -= m0_vec_count(&rbuf->bb_nbuf->
1741  nb_buffer.ov_vec);
1742  }
1743  m0_mutex_unlock(&rbulk->rb_mutex);
1744  m0_mutex_unlock(&bbulk->rb_mutex);
1745 
1746  M0_LOG(M0_DEBUG, "io fops coalesced successfully.");
1747  rpcitem_tlist_add(items_list, &bkp_fop->f_item);
1748  return M0_RC(rc);
1749 cleanup:
1750  M0_ASSERT(rc != 0);
1751  m0_tl_for(iosegset, &aggr_set.iss_list, ioseg) {
1753  } m0_tl_endfor;
1754  iosegset_tlist_fini(&aggr_set.iss_list);
1755  io_fop_bulkbuf_move(bkp_fop, res_fop);
1756  m0_io_fop_fini(cfop);
1757  m0_free(cfop);
1758  return M0_RC(rc);
1759 }
1760 
1761 __attribute__((unused))
1762 static struct m0_fid *io_fop_fid_get(struct m0_fop *fop)
1763 {
1764  return &(io_rw_get(fop))->crw_fid;
1765 }
1766 
1767 __attribute__((unused))
1768 static bool io_fop_fid_equal(struct m0_fop *fop1, struct m0_fop *fop2)
1769 {
1770  return m0_fid_eq(io_fop_fid_get(fop1), io_fop_fid_get(fop2));
1771 }
1772 
1773 static void io_fop_replied(struct m0_fop *fop, struct m0_fop *bkpfop)
1774 {
1775  struct m0_io_fop *cfop;
1776  struct m0_rpc_bulk *rbulk;
1777  struct m0_fop_cob_rw *srw;
1778  struct m0_fop_cob_rw *drw;
1779 
1780  M0_PRE(fop != NULL);
1781  M0_PRE(bkpfop != NULL);
1783  M0_PRE(m0_is_io_fop(bkpfop));
1784 
1785  rbulk = m0_fop_to_rpcbulk(fop);
1786  m0_mutex_lock(&rbulk->rb_mutex);
1787  M0_ASSERT(rpcbulk_tlist_is_empty(&rbulk->rb_buflist));
1788  m0_mutex_unlock(&rbulk->rb_mutex);
1789 
1790  srw = io_rw_get(bkpfop);
1791  drw = io_rw_get(fop);
1792  drw->crw_desc = srw->crw_desc;
1793  drw->crw_ivec = srw->crw_ivec;
1794  cfop = container_of(bkpfop, struct m0_io_fop, if_fop);
1795  m0_io_fop_fini(cfop);
1796  m0_free(cfop);
1797 }
1798 
1799 static void io_fop_desc_get(struct m0_fop *fop,
1800  struct m0_net_buf_desc_data **desc)
1801 {
1802  struct m0_fop_cob_rw *rw;
1803 
1804  M0_PRE(fop != NULL);
1805  M0_PRE(desc != NULL);
1806 
1807  rw = io_rw_get(fop);
1808  *desc = rw->crw_desc.id_descs;
1809 }
1810 
1811 /* Rpc item ops for IO operations. */
1812 static void io_item_replied(struct m0_rpc_item *item)
1813 {
1814  struct m0_fop *fop;
1815  struct m0_fop *rfop;
1816  struct m0_fop *bkpfop;
1817  struct m0_rpc_item *ritem;
1818  struct m0_rpc_bulk *rbulk;
1819  struct m0_fop_cob_rw_reply *reply;
1820 
1821  M0_PRE(item != NULL);
1822 
1823  if (m0_rpc_item_error(item) != 0)
1824  return;
1825 
1827  rbulk = m0_fop_to_rpcbulk(fop);
1828  rfop = m0_rpc_item_to_fop(item->ri_reply);
1829  reply = io_rw_rep_get(rfop);
1830  M0_ASSERT(ergo(reply->rwr_rc == 0,
1831  reply->rwr_count == rbulk->rb_bytes));
1832 
1833 if (0) /* if (0) is used instead of #if 0 to avoid code rot. */
1834 {
1838  /*
1839  * Restores the contents of main coalesced fop from the first
1840  * rpc item in m0_rpc_item::ri_compound_items list. This item
1841  * is inserted by io coalescing code.
1842  */
1843  if (!rpcitem_tlist_is_empty(&item->ri_compound_items)) {
1844  M0_LOG(M0_DEBUG, "Reply received for coalesced io fops.");
1845  ritem = rpcitem_tlist_pop(&item->ri_compound_items);
1846  bkpfop = m0_rpc_item_to_fop(ritem);
1847  if (fop->f_type->ft_ops->fto_fop_replied != NULL)
1848  fop->f_type->ft_ops->fto_fop_replied(fop, bkpfop);
1849  }
1850 
1851  /*
1852  * The rpc_item->ri_chan is signaled by sessions code
1853  * (rpc_item_replied()) which is why only member coalesced items
1854  * (items which were member of a parent coalesced item) are
1855  * signaled from here as they are not sent on wire but hang off
1856  * a list from parent coalesced item.
1857  */
1858  m0_tl_for(rpcitem, &item->ri_compound_items, ritem) {
1859  fop = m0_rpc_item_to_fop(ritem);
1860  rbulk = m0_fop_to_rpcbulk(fop);
1861  m0_mutex_lock(&rbulk->rb_mutex);
1862  M0_ASSERT(rbulk != NULL && m0_tlist_is_empty(&rpcbulk_tl,
1863  &rbulk->rb_buflist));
1864  /* Notifies all member coalesced items of completion status. */
1865  rbulk->rb_rc = item->ri_error;
1866  m0_mutex_unlock(&rbulk->rb_mutex);
1867  /* XXX Use rpc_item_replied()
1868  But we'll fix it later because this code path will need
1869  significant changes because of new formation code.
1870  */
1871  /* m0_chan_broadcast(&ritem->ri_chan); */
1872  } m0_tl_endfor;
1873 }
1874 }
1875 
1876 static void item_io_coalesce(struct m0_rpc_item *head, struct m0_list *list,
1877  uint64_t size)
1878 {
1879  /* Coalescing RPC items is not yet supported */
1880 if (0)
1881 {
1882  int rc;
1883  struct m0_fop *bfop;
1884  struct m0_rpc_item *item;
1885 
1886  M0_PRE(head != NULL);
1887  M0_PRE(list != NULL);
1888  M0_PRE(size > 0);
1889 
1890  if (m0_list_is_empty(list))
1891  return;
1892 
1893  /*
1894  * Traverses through the list and finds out items that match with
1895  * head on basis of fid and intent (read/write). Matching items
1896  * are removed from session->s_unbound_items list and added to
1897  * head->compound_items list.
1898  */
1899  bfop = m0_rpc_item_to_fop(head);
1900 
1901  if (rpcitem_tlist_is_empty(&head->ri_compound_items))
1902  return;
1903 
1904  /*
1905  * Add the bound item to list of compound items as this will
1906  * include the bound item's io vector in io coalescing
1907  */
1908  rpcitem_tlist_add(&head->ri_compound_items, head);
1909 
1910  rc = bfop->f_type->ft_ops->fto_io_coalesce(bfop, size);
1911  if (rc != 0) {
1912  m0_tl_teardown(rpcitem, &head->ri_compound_items, item) {
1913  (void)item; /* remove the "unused variable" warning.*/
1914  }
1915  } else {
1916  /*
1917  * Item at head is the backup item which is not present
1918  * in sessions unbound list.
1919  */
1920  rpcitem_tlist_del(head);
1921  }
1922 }
1923 }
1924 
1925 M0_INTERNAL m0_bcount_t m0_io_fop_byte_count(struct m0_io_fop *iofop)
1926 {
1927  m0_bcount_t count = 0;
1928  struct m0_rpc_bulk_buf *rbuf;
1929 
1930  M0_PRE(iofop != NULL);
1931 
1932  m0_tl_for (rpcbulk, &iofop->if_rbulk.rb_buflist, rbuf) {
1934  } m0_tl_endfor;
1935 
1936  return count;
1937 }
1938 
1939 M0_INTERNAL void m0_io_fop_release(struct m0_ref *ref)
1940 {
1941  struct m0_io_fop *iofop;
1942  struct m0_fop *fop;
1943 
1944  fop = container_of(ref, struct m0_fop, f_ref);
1945  iofop = container_of(fop, struct m0_io_fop, if_fop);
1946  M0_LOG(M0_DEBUG, "iofop %p", iofop);
1947  m0_io_fop_fini(iofop);
1948  m0_free(iofop);
1949 }
1950 
1951 #undef M0_TRACE_SUBSYSTEM
1952 
1953 /*
1954  * Local variables:
1955  * c-indentation-style: "K&R"
1956  * c-basic-offset: 8
1957  * tab-width: 8
1958  * fill-column: 80
1959  * scroll-step: 1
1960  * End:
1961  */
static int io_fol_cd_rec_frag_redo(struct m0_fop_fol_frag *frag, struct m0_fol *fol)
Definition: io_fops.c:223
M0_INTERNAL m0_bcount_t m0_net_domain_get_max_buffer_segment_size(struct m0_net_domain *dom)
#define M0_BUFVEC_INIT_BUF(addr_ptr, count_ptr)
Definition: vec.h:165
static struct ctx cc
struct m0_fop_type m0_fop_cob_readv_rep_fopt
Definition: io_fops.c:73
struct m0_fop_type m0_fop_cob_op_reply_fopt
Definition: io_fops.c:78
Definition: fol.h:140
uint32_t rit_opcode
Definition: item.h:474
static size_t nr
Definition: dump.c:1505
struct m0_tl iss_list
Definition: io_fops.c:836
#define M0_PRE(cond)
M0_INTERNAL void m0_sm_conf_init(struct m0_sm_conf *conf)
Definition: sm.c:340
#define M0_ALLOC_ARR(arr, nr)
Definition: memory.h:84
M0_TL_DEFINE(iosegset, static, struct ioseg)
struct m0_fop_type m0_fop_cob_setattr_reply_fopt
Definition: io_fops.c:84
M0_BASSERT(M0_IOSERVICE_COB_DELETE_OPCODE==M0_IOSERVICE_COB_CREATE_OPCODE+1)
M0_INTERNAL m0_bcount_t m0_io_fop_byte_count(struct m0_io_fop *iofop)
Definition: io_fops.c:1925
static m0_bindex_t seg_set(struct pargrp_iomap *map, uint32_t seg, struct m0_ivec_varr_cursor *cur, m0_bindex_t grpend)
Definition: file.c:2329
M0_INTERNAL void m0_mutex_unlock(struct m0_mutex *mutex)
Definition: mutex.c:66
struct m0_fop_cob_common ct_common
Definition: io_fops.h:504
struct m0_fop_cob_common cd_common
Definition: io_fops.h:497
static struct m0_list list
Definition: list.c:144
const struct m0_sm_conf cob_ops_conf
Definition: cob_foms.c:99
M0_INTERNAL struct m0_fop_cob_common * m0_cobfop_common_get(struct m0_fop *fop)
Definition: io_fops.c:990
static struct m0_semaphore q
Definition: rwlock.c:55
#define NULL
Definition: misc.h:38
M0_INTERNAL int m0_rpc_bulk_store(struct m0_rpc_bulk *rbulk, const struct m0_rpc_conn *conn, struct m0_net_buf_desc_data *to_desc, const struct m0_net_buffer_callbacks *bulk_cb)
Definition: bulk.c:520
struct m0_bufvec nb_buffer
Definition: net.h:1322
m0_bindex_t * z_index
Definition: vec.h:516
#define ergo(a, b)
Definition: misc.h:293
struct m0_fop_cob_rw c_rwv
Definition: io_fops.h:432
const struct m0_fom_type_ops cob_fom_type_ops
Definition: cob_foms.c:109
#define M0_FOP_TYPE_INIT(ft,...)
Definition: fop.h:307
Definition: sm.h:350
M0_INTERNAL void io_fop_ivec_dealloc(struct m0_fop *fop)
Definition: io_fops.c:1271
struct m0_tlink is_linkage
Definition: io_fops.c:828
void * b_addr
Definition: buf.h:39
#define ICOUNT(ivec, i)
Definition: io_fops.c:1292
struct m0_file file
Definition: di.c:36
M0_INTERNAL void m0_fop_init(struct m0_fop *fop, struct m0_fop_type *fopt, void *data, void(*fop_release)(struct m0_ref *))
Definition: fop.c:79
M0_INTERNAL int m0_rpc_bulk_buf_databuf_add(struct m0_rpc_bulk_buf *rbuf, void *buf, m0_bcount_t count, m0_bindex_t index, struct m0_net_domain *netdom)
Definition: bulk.c:331
int(* fto_io_coalesce)(struct m0_fop *fop, uint64_t rpc_size)
Definition: fop.h:269
#define M0_LOG(level,...)
Definition: trace.h:167
static int io_fol_frag_undo_redo_op(struct m0_fop_fol_frag *frag, struct m0_fol *fol)
Definition: io_fops.c:116
static int io_fop_seg_add_cond(struct ioseg *cseg, const struct ioseg *nseg)
Definition: io_fops.c:1120
static void io_fop_desc_dealloc(struct m0_fop *fop)
Definition: io_fops.c:1486
void fop_release(struct m0_ref *ref)
Definition: stats_ut_svc.c:148
void m0_fop_type_addb2_deinstrument(struct m0_fop_type *type)
Definition: fop.c:493
M0_INTERNAL void m0_io_fop_release(struct m0_ref *ref)
Definition: io_fops.c:1939
uint64_t(* do_out_shift)(const struct m0_file *file)
Definition: di.h:109
struct m0_vec ov_vec
Definition: vec.h:147
static int io_netbufs_prepare(struct m0_fop *coalesced_fop, struct io_seg_set *seg_set)
Definition: io_fops.c:1192
M0_INTERNAL void m0_rpc_bulk_fini(struct m0_rpc_bulk *rbulk)
Definition: bulk.c:263
struct m0_tl ri_compound_items
Definition: item.h:204
struct m0_rpc_bulk if_rbulk
Definition: io_fops.h:177
struct m0_fop_type m0_fop_fsync_ios_fopt
Definition: io_fops.c:82
uint64_t sd_allowed
Definition: sm.h:422
M0_INTERNAL bool m0_is_cob_truncate_fop(const struct m0_fop *fop)
Definition: io_fops.c:964
struct m0_fop_cob_common cs_common
Definition: io_fops.h:551
int32_t ri_error
Definition: item.h:161
struct m0_net_buf_desc_data * id_descs
Definition: io_fops.h:313
void * m0_fop_data(const struct m0_fop *fop)
Definition: fop.c:220
void m0_fop_type_fini(struct m0_fop_type *fopt)
Definition: fop.c:232
M0_INTERNAL void m0_indexvec_free(struct m0_indexvec *ivec)
Definition: vec.c:553
M0_INTERNAL void m0_sm_conf_trans_extend(const struct m0_sm_conf *base, struct m0_sm_conf *sub)
Definition: sm.c:726
static int io_fop_ivec_prepare(struct m0_fop *res_fop, struct m0_rpc_bulk *rbulk)
Definition: io_fops.c:1352
M0_INTERNAL void m0_sm_conf_extend(const struct m0_sm_state_descr *base, struct m0_sm_state_descr *sub, uint32_t nr)
Definition: sm.c:763
uint64_t m0_bindex_t
Definition: types.h:80
#define m0_exists(var, nr,...)
Definition: misc.h:134
#define M0_BITS(...)
Definition: misc.h:236
uint64_t m0_bcount_t
Definition: types.h:77
M0_INTERNAL bool m0_net_buffer_del(struct m0_net_buffer *buf, struct m0_net_transfer_mc *tm)
Definition: buf.c:261
struct m0_sm_conf io_conf
Definition: io_foms.c:822
static int void * buf
Definition: dir.c:1019
#define container_of(ptr, type, member)
Definition: misc.h:33
M0_INTERNAL void m0_mutex_lock(struct m0_mutex *mutex)
Definition: mutex.c:49
uint32_t ci_nr
Definition: vec.h:635
static struct m0_xcode_type ** xt[]
Definition: protocol.c:64
__attribute__((unused))
Definition: io_fops.c:1092
void(* do_sum)(const struct m0_file *file, const struct m0_indexvec *io_info, const struct m0_bufvec *in, struct m0_bufvec *out)
Definition: di.h:122
static void io_fop_desc_get(struct m0_fop *fop, struct m0_net_buf_desc_data **desc)
Definition: io_fops.c:1799
M0_INTERNAL bool m0_is_read_rep(const struct m0_fop *fop)
Definition: io_fops.c:933
const struct m0_sm_conf m0_generic_conf
Definition: fom_generic.c:838
static struct m0_rpc_item * item
Definition: item.c:56
M0_INTERNAL void m0_io_fop_destroy(struct m0_fop *fop)
Definition: io_fops.c:1581
void ** ov_buf
Definition: vec.h:149
Definition: sock.c:887
static m0_bcount_t count
Definition: xcode.c:167
M0_INTERNAL bool m0_tlist_is_empty(const struct m0_tl_descr *d, const struct m0_tl *list)
Definition: tlist.c:96
M0_INTERNAL void m0_rpc_bulk_buflist_empty(struct m0_rpc_bulk *rbulk)
Definition: bulk.c:279
void * is_buf
Definition: io_fops.c:823
#define m0_tl_endfor
Definition: tlist.h:700
uint64_t is_magic
Definition: io_fops.c:817
static int io_fop_desc_ivec_prepare(struct m0_fop *fop, struct io_seg_set *aggr_set)
Definition: io_fops.c:1554
return M0_RC(rc)
M0_INTERNAL struct m0_sm_conf m0_fsync_fom_conf
Definition: fsync_foms.c:123
struct m0_fop_cob_common cg_common
Definition: io_fops.h:533
M0_INTERNAL int m0_xcode_length(struct m0_xcode_ctx *ctx)
Definition: xcode.c:390
static int head(struct m0_sm *mach)
Definition: sm.c:468
const struct m0_fop_type_ops io_fop_cd_ops
Definition: io_fops.c:237
M0_INTERNAL int m0_ioservice_fop_init(void)
Definition: io_fops.c:281
const struct m0_rpc_item_ops io_req_rpc_item_ops
Definition: io_fops.c:107
#define M0_ENTRY(...)
Definition: trace.h:170
const struct m0_fom_type_ops m0_fsync_fom_ops
Definition: fsync_fops.c:62
struct m0_fop_type m0_fop_cob_getattr_fopt
Definition: io_fops.c:80
Definition: buf.h:37
M0_TL_DESCR_DEFINE(iosegset, "list of coalesced io segments", static, struct ioseg, is_linkage, is_magic, M0_IOS_IO_SEGMENT_MAGIC, M0_IOS_IO_SEGMENT_SET_MAGIC)
static void io_fop_replied(struct m0_fop *fop, struct m0_fop *bkpfop)
Definition: io_fops.c:1773
M0_INTERNAL struct m0_file * m0_client_fop_to_file(struct m0_fop *fop)
Definition: io_req_fop.c:88
uint64_t if_magic
Definition: io_fops.h:179
int opcode
Definition: crate.c:301
int i
Definition: dir.c:1033
M0_INTERNAL struct m0_rpc_bulk * m0_fop_to_rpcbulk(const struct m0_fop *fop)
Definition: io_fops.c:904
struct m0_fop_type * f_type
Definition: fop.h:81
struct m0_fid crw_fid
Definition: io_fops.h:288
M0_INTERNAL bool m0_is_cob_setattr_fop(const struct m0_fop *fop)
Definition: io_fops.c:978
static const struct m0_reqh_service_ops rpc_ops
Definition: service.c:92
struct m0_fop_type m0_fop_cob_getattr_reply_fopt
Definition: io_fops.c:81
static const struct m0_rpc_item_type_ops io_item_type_ops
Definition: io_fops.c:111
static struct m0_fop_type * ioservice_fops[]
Definition: io_fops.c:89
M0_INTERNAL int m0_cob_fom_create(struct m0_fop *fop, struct m0_fom **out, struct m0_reqh *reqh)
Definition: cob_foms.c:183
static struct m0_fid * io_fop_fid_get(struct m0_fop *fop)
return M0_ERR(-EOPNOTSUPP)
M0_INTERNAL struct m0_fop_cob_rw_reply * io_rw_rep_get(struct m0_fop *fop)
Definition: io_fops.c:1056
struct m0_fop if_fop
Definition: io_fops.h:174
Definition: cnt.h:36
const char * name
Definition: trace.c:110
M0_INTERNAL bool m0_is_cob_create_delete_fop(const struct m0_fop *fop)
Definition: io_fops.c:985
M0_INTERNAL struct m0_file * m0_fop_to_file(struct m0_fop *fop)
Definition: file.c:5998
Definition: refs.h:34
static void attr(struct m0_addb2__context *ctx, const uint64_t *v, char *buf)
Definition: dump.c:949
#define m0_tl_teardown(name, head, obj)
Definition: tlist.h:708
const char * ft_name
Definition: fop.h:225
struct m0_net_buffer * bb_nbuf
Definition: bulk.h:177
struct m0_buf crw_di_data
Definition: io_fops.h:416
#define m0_free0(pptr)
Definition: memory.h:77
struct m0_fop_type m0_fop_cob_setattr_fopt
Definition: io_fops.c:83
M0_INTERNAL size_t m0_io_fop_size_get(struct m0_fop *fop)
Definition: io_fops.c:1589
m0_bcount_t b_nob
Definition: buf.h:38
int32_t rb_rc
Definition: bulk.h:266
struct m0_io_descs crw_desc
Definition: io_fops.h:400
#define M0_ASSERT(cond)
M0_INTERNAL bool m0_mutex_is_locked(const struct m0_mutex *mutex)
Definition: mutex.c:95
M0_INTERNAL void m0_rpc_bulk_init(struct m0_rpc_bulk *rbulk)
Definition: bulk.c:247
static int io_fol_cd_rec_frag_op(struct m0_fop_fol_frag *frag, struct m0_fol *fol, bool undo)
Definition: io_fops.c:178
M0_INTERNAL bool m0_is_io_fop_rep(const struct m0_fop *fop)
Definition: io_fops.c:945
Definition: tlist.h:251
uint32_t scf_nr_states
Definition: sm.h:354
struct m0_sm_state_descr cob_ops_phases[]
Definition: cob_foms.c:86
struct m0_reqh_service_type m0_ios_type
Definition: io_service.c:112
M0_INTERNAL int m0_indexvec_wire2mem(struct m0_io_indexvec *wire_ivec, int max_frags_nr, uint32_t bshift, struct m0_indexvec *mem_ivec)
Definition: vec.c:1058
struct m0_tl rh_rpc_machines
Definition: reqh.h:135
static int io_fop_coalesce(struct m0_fop *res_fop, uint64_t size)
Definition: io_fops.c:1614
#define ZINDEX(zvec, i)
Definition: io_fops.c:1288
static void io_item_replied(struct m0_rpc_item *item)
Definition: io_fops.c:1812
Definition: vec.h:512
const struct m0_rpc_item_type * ri_type
Definition: item.h:200
struct m0_rpc_item * ri_reply
Definition: item.h:163
void * m0_alloc(size_t size)
Definition: memory.c:126
struct m0_sm_state_descr io_phases[]
Definition: io_foms.c:719
#define IINDEX(ivec, i)
Definition: io_fops.c:1291
#define M0_POST(cond)
struct m0_0vec bb_zerovec
Definition: bulk.h:179
M0_INTERNAL void m0_net_desc_free(struct m0_net_buf_desc *desc)
Definition: net.c:87
Definition: reqh.h:94
uint32_t v_nr
Definition: vec.h:51
Definition: dump.c:103
M0_INTERNAL m0_bcount_t m0_net_domain_get_max_buffer_size(struct m0_net_domain *dom)
M0_INTERNAL int m0_fop_data_alloc(struct m0_fop *fop)
Definition: fop.c:71
static int io_fol_cd_rec_frag_undo(struct m0_fop_fol_frag *frag, struct m0_fol *fol)
Definition: io_fops.c:217
m0_bcount_t * v_count
Definition: vec.h:53
M0_INTERNAL void m0_fop_fini(struct m0_fop *fop)
Definition: fop.c:136
M0_INTERNAL bool m0_list_is_empty(const struct m0_list *head)
Definition: list.c:42
struct m0_indexvec io_vec
Definition: di.c:42
m0_bcount_t is_size
Definition: io_fops.c:821
#define FID_P(f)
Definition: fid.h:77
M0_INTERNAL m0_bcount_t m0_vec_count(const struct m0_vec *vec)
Definition: vec.c:53
struct m0_fop_cob_rw_reply c_rep
Definition: io_fops.h:363
M0_INTERNAL bool m0_is_cob_create_fop(const struct m0_fop *fop)
Definition: io_fops.c:950
m0_bindex_t is_index
Definition: io_fops.c:819
struct m0_bufvec z_bvec
Definition: vec.h:514
Definition: list.h:72
void(* rio_replied)(struct m0_rpc_item *item)
Definition: item.h:300
M0_INTERNAL bool m0_fid_eq(const struct m0_fid *fid0, const struct m0_fid *fid1)
Definition: fid.c:164
static void ioseg_unlink_free(struct ioseg *ioseg)
Definition: io_fops.c:1080
Definition: fom.h:481
struct m0_fop_type m0_fop_cob_readv_fopt
Definition: io_fops.c:71
int32_t m0_rpc_item_error(const struct m0_rpc_item *item)
Definition: item.c:973
static int io_fop_seg_init(struct ioseg **ns, const struct ioseg *cseg)
Definition: io_fops.c:1102
static int io_fop_desc_alloc(struct m0_fop *fop, struct m0_rpc_bulk *rbulk)
Definition: io_fops.c:1471
struct m0_reqh reqh
Definition: rm_foms.c:48
M0_INTERNAL int m0_indexvec_split(struct m0_indexvec *in, m0_bcount_t curr_pos, m0_bcount_t nb_len, uint32_t bshift, struct m0_indexvec *out)
Definition: vec.c:1039
M0_INTERNAL m0_bcount_t m0_di_size_get(const struct m0_file *file, const m0_bcount_t size)
Definition: di.c:384
M0_INTERNAL bool m0_is_io_fop(const struct m0_fop *fop)
Definition: io_fops.c:928
struct m0_sm_state_descr * scf_state
Definition: sm.h:356
M0_TL_DESCR_DECLARE(rpcbulk, M0_EXTERN)
#define M0_FI_ENABLED(tag)
Definition: finject.h:231
struct m0_ref f_ref
Definition: fop.h:80
Definition: fid.h:38
static int io_fop_di_prepare(struct m0_fop *fop)
Definition: io_fops.c:1375
m0_net_queue_type
Definition: net.h:591
static struct m0_0vec * io_0vec_get(struct m0_rpc_bulk_buf *rbuf)
Definition: io_fops.c:1073
static void io_fop_segments_coalesce(const struct m0_0vec *iovec, struct io_seg_set *aggr_set)
Definition: io_fops.c:1168
#define M0_ALLOC_PTR(ptr)
Definition: memory.h:86
const struct m0_rpc_item_ops * ri_ops
Definition: item.h:149
#define ZCOUNT(zvec, i)
Definition: io_fops.c:1287
struct m0_net_domain * m0_fop_domain_get(const struct m0_fop *fop)
Definition: fop.c:486
struct m0_fop_type m0_fop_cob_writev_rep_fopt
Definition: io_fops.c:74
M0_INTERNAL void m0_ioservice_fop_fini(void)
Definition: io_fops.c:251
int m0_fop_type_addb2_instrument(struct m0_fop_type *type)
Definition: fop.c:461
struct m0_fop_type m0_fop_fv_notification_fopt
Definition: io_fops.c:79
uint32_t id_nr
Definition: io_fops.h:312
struct m0_rpc_session * ri_session
Definition: item.h:147
const struct m0_fop_type_ops * ft_ops
Definition: fop.h:228
M0_INTERNAL m0_bcount_t m0_io_count(const struct m0_io_indexvec *io_info)
Definition: vec.c:999
struct m0_fop_type m0_fop_cob_create_fopt
Definition: io_fops.c:75
Definition: io_fops.c:815
static void ioseg_get(const struct m0_0vec *zvec, uint32_t seg_index, struct ioseg *seg)
Definition: io_fops.c:845
struct m0_fid crw_gfid
Definition: io_fops.h:382
m0_bcount_t size
Definition: di.c:39
#define _0C(exp)
Definition: assert.h:311
m0_bcount_t rb_bytes
Definition: bulk.h:260
static void iosegs_squeeze(struct m0_rpc_bulk *rbulk, struct m0_io_indexvec *ivec)
Definition: io_fops.c:1323
uint64_t iss_magic
Definition: io_fops.c:834
M0_INTERNAL bool m0_is_write_rep(const struct m0_fop *fop)
Definition: io_fops.c:939
static struct m0_fop * fop
Definition: item.c:57
struct m0_rpc_item_type ft_rpc_item_type
Definition: fop.h:235
struct m0_fol rh_fol
Definition: reqh.h:121
M0_INTERNAL int32_t m0_net_domain_get_max_buffer_segments(struct m0_net_domain *dom)
struct m0_fop * m0_rpc_item_to_fop(const struct m0_rpc_item *item)
Definition: fop.c:346
M0_INTERNAL void m0_fom_queue(struct m0_fom *fom)
Definition: fom.c:624
struct m0_net_buf_desc bdd_desc
Definition: net_otw_types.h:47
static struct m0_be_seg * seg
Definition: btree.c:40
M0_INTERNAL void m0_rpc_bulk_qtype(struct m0_rpc_bulk *rbulk, enum m0_net_queue_type q)
Definition: bulk.c:372
#define M0_ASSERT_INFO(cond, fmt,...)
static void item_io_coalesce(struct m0_rpc_item *head, struct m0_list *list, uint64_t size)
Definition: io_fops.c:1876
struct m0_tl rb_buflist
Definition: bulk.h:256
void(* fto_fop_replied)(struct m0_fop *fop, struct m0_fop *bfop)
Definition: fop.h:267
M0_INTERNAL bool m0_is_cob_getattr_fop(const struct m0_fop *fop)
Definition: io_fops.c:971
#define level
static bool io_fop_invariant(struct m0_io_fop *iofop)
Definition: io_fops.c:857
M0_INTERNAL void m0_io_fop_fini(struct m0_io_fop *iofop)
Definition: io_fops.c:897
M0_INTERNAL int m0_io_fop_init(struct m0_io_fop *iofop, const struct m0_fid *gfid, struct m0_fop_type *ftype, void(*fop_release)(struct m0_ref *))
Definition: io_fops.c:865
Definition: nucleus.c:42
M0_INTERNAL void m0_xcode_ctx_init(struct m0_xcode_ctx *ctx, const struct m0_xcode_obj *obj)
Definition: xcode.c:373
#define out(...)
Definition: gen.c:41
Definition: file.h:81
M0_INTERNAL bool m0_is_read_fop(const struct m0_fop *fop)
Definition: io_fops.c:916
const struct m0_fop_type_ops io_fop_rwv_ops
Definition: io_fops.c:229
M0_INTERNAL bool m0_is_cob_delete_fop(const struct m0_fop *fop)
Definition: io_fops.c:957
const struct m0_net_buffer_callbacks m0_rpc__buf_bulk_cb
Definition: bulk.c:238
struct m0_fid gfid
Definition: dir.c:626
const struct m0_fom_type_ops io_fom_type_ops
Definition: io_foms.c:633
M0_INTERNAL void m0_sm_conf_fini(struct m0_sm_conf *conf)
Definition: sm.c:376
M0_INTERNAL struct m0_fop_cob_rw * io_rw_get(struct m0_fop *fop)
Definition: io_fops.c:1037
M0_INTERNAL bool m0_is_write_fop(const struct m0_fop *fop)
Definition: io_fops.c:922
struct m0_fop_type m0_fop_cob_truncate_fopt
Definition: io_fops.c:77
M0_INTERNAL int m0_io_fop_prepare(struct m0_fop *fop)
Definition: io_fops.c:1513
M0_INTERNAL void m0_dump_cob_attr(const struct m0_cob_attr *attr)
Definition: io_fops.c:132
#define M0_FOP_XCODE_OBJ(f)
Definition: fop.h:334
static struct m0_dtm_oper_descr reply
Definition: transmit.c:94
M0_INTERNAL int m0_rpc_bulk_buf_add(struct m0_rpc_bulk *rbulk, uint32_t segs_nr, m0_bcount_t length, struct m0_net_domain *netdom, struct m0_net_buffer *nb, struct m0_rpc_bulk_buf **out)
Definition: bulk.c:291
struct m0_fop_type m0_fop_cob_writev_fopt
Definition: io_fops.c:72
#define m0_tl_for(name, head, obj)
Definition: tlist.h:695
void m0_free(void *data)
Definition: memory.c:146
struct m0_rpc_item f_item
Definition: fop.h:83
#define M0_BUF_INIT(size, data)
Definition: buf.h:64
struct m0_ioseg * ci_iosegs
Definition: vec.h:636
struct m0_io_indexvec crw_ivec
Definition: io_fops.h:411
struct m0_pdclust_src_addr src
Definition: fd.c:108
static void io_fop_bulkbuf_move(struct m0_fop *src, struct m0_fop *dest)
Definition: io_fops.c:1444
int32_t rc
Definition: trigger_fop.h:47
M0_INTERNAL uint32_t m0_io_fop_segs_nr(struct m0_fop *fop, uint32_t index)
Definition: io_fops.c:1020
static void io_fop_seg_coalesce(const struct ioseg *seg, struct io_seg_set *aggr_set)
Definition: io_fops.c:1140
#define ARRAY_SIZE(a)
Definition: misc.h:45
#define M0_FOP_DEFAULT_ITEM_TYPE_OPS
Definition: fop.h:184
struct m0_rpc_conn * s_conn
Definition: session.h:312
int(* fto_undo)(struct m0_fop_fol_frag *ffrag, struct m0_fol *fol)
Definition: fop.h:273
struct m0_net_transfer_mc * m0_fop_tm_get(const struct m0_fop *fop)
Definition: fop.c:479
static uint32_t iosegs_nr(struct m0_rpc_bulk *rbulk)
Definition: io_fops.c:1294
Definition: fop.h:79
struct m0_mutex rb_mutex
Definition: bulk.h:251
M0_TL_DECLARE(rpcbulk, M0_INTERNAL, struct m0_rpc_bulk_buf)
const struct m0_di_ops * fi_di_ops
Definition: file.h:92
uint64_t crw_flags
Definition: io_fops.h:413
#define ZNR(zvec)
Definition: io_fops.c:1286
#define FID_F
Definition: fid.h:75
Definition: vec.h:145
struct m0_fop * m0_fop_alloc(struct m0_fop_type *fopt, void *data, struct m0_rpc_machine *mach)
Definition: fop.c:96
struct m0_fop_type m0_fop_cob_delete_fopt
Definition: io_fops.c:76
#define M0_IMPOSSIBLE(fmt,...)
uint32_t ffrp_fop_code
Definition: fop.h:354