Motr  M0
req.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2016-2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 
23 
30 #define M0_TRACE_SUBSYSTEM M0_TRACE_SUBSYS_DIX
31 #include "lib/trace.h"
32 #include "lib/memory.h"
33 #include "lib/buf.h"
34 #include "lib/vec.h"
35 #include "lib/finject.h"
36 #include "conf/schema.h" /* M0_CST_CAS */
37 #include "sm/sm.h"
38 #include "pool/pool.h" /* m0_pool_version_find */
39 #include "cas/client.h"
40 #include "cas/cas.h" /* m0_dix_fid_type */
41 #include "dix/layout.h"
42 #include "dix/meta.h"
43 #include "dix/req.h"
44 #include "dix/client.h"
45 #include "dix/client_internal.h" /* m0_dix_pver */
46 #include "dix/fid_convert.h"
47 #include "dix/dix_addb.h"
48 #include "dtm0/dtx.h" /* m0_dtx0_* API */
49 
50 static struct m0_sm_state_descr dix_req_states[] = {
51  [DIXREQ_INIT] = {
53  .sd_name = "init",
54  .sd_allowed = M0_BITS(DIXREQ_DISCOVERY_DONE,
58  },
60  .sd_name = "layout-discovery",
61  .sd_allowed = M0_BITS(DIXREQ_DISCOVERY_DONE,
65  },
67  .sd_name = "lid-discovery",
68  .sd_allowed = M0_BITS(DIXREQ_DISCOVERY_DONE,
71  },
73  .sd_name = "discovery-done",
74  .sd_allowed = M0_BITS(DIXREQ_INPROGRESS,
77  },
78  [DIXREQ_META_UPDATE] = {
79  .sd_name = "idxop-meta-update",
80  .sd_allowed = M0_BITS(DIXREQ_INPROGRESS, DIXREQ_FINAL,
82  },
83  [DIXREQ_INPROGRESS] = {
84  .sd_name = "in-progress",
85  .sd_allowed = M0_BITS(DIXREQ_GET_RESEND, DIXREQ_FINAL,
87  },
88  [DIXREQ_GET_RESEND] = {
89  .sd_name = "resend-get-req",
91  },
92  [DIXREQ_DEL_PHASE2] = {
93  .sd_name = "delete-phase2",
94  .sd_allowed = M0_BITS(DIXREQ_FINAL, DIXREQ_FAILURE)
95  },
96  [DIXREQ_FINAL] = {
97  .sd_name = "final",
98  .sd_flags = M0_SDF_TERMINAL,
99  },
100  [DIXREQ_FAILURE] = {
101  .sd_name = "failure",
102  .sd_flags = M0_SDF_TERMINAL | M0_SDF_FAILURE
103  }
104 };
105 
107 static struct m0_sm_trans_descr dix_req_trans[] = {
108  { "layouts-known", DIXREQ_INIT, DIXREQ_DISCOVERY_DONE },
109  { "find-layouts", DIXREQ_INIT, DIXREQ_LAYOUT_DISCOVERY },
110  { "resolve-lids", DIXREQ_INIT, DIXREQ_LID_DISCOVERY },
111  { "copy-fail", DIXREQ_INIT, DIXREQ_FAILURE },
112  { "layouts-found", DIXREQ_LAYOUT_DISCOVERY, DIXREQ_DISCOVERY_DONE },
114  { "not-found", DIXREQ_LAYOUT_DISCOVERY, DIXREQ_FAILURE },
115  { "desc-final", DIXREQ_LAYOUT_DISCOVERY, DIXREQ_FINAL },
116  { "lids-resolved", DIXREQ_LID_DISCOVERY, DIXREQ_DISCOVERY_DONE },
117  { "not-resolved", DIXREQ_LID_DISCOVERY, DIXREQ_FAILURE },
118  { "lid-final", DIXREQ_LID_DISCOVERY, DIXREQ_FINAL },
119  { "create/delete", DIXREQ_DISCOVERY_DONE, DIXREQ_META_UPDATE },
121  { "failure", DIXREQ_DISCOVERY_DONE, DIXREQ_FAILURE },
122  { "meta-updated", DIXREQ_META_UPDATE, DIXREQ_INPROGRESS },
123  { "crow-or-fail", DIXREQ_META_UPDATE, DIXREQ_FINAL },
124  { "update-fail", DIXREQ_META_UPDATE, DIXREQ_FAILURE },
125  { "get-req-fail", DIXREQ_INPROGRESS, DIXREQ_GET_RESEND },
126  { "rpc-failure", DIXREQ_INPROGRESS, DIXREQ_FAILURE },
127  { "req-processed", DIXREQ_INPROGRESS, DIXREQ_FINAL },
128  { "inp-del-ph2", DIXREQ_INPROGRESS, DIXREQ_DEL_PHASE2 },
129  { "del-ph2-final", DIXREQ_DEL_PHASE2, DIXREQ_FINAL },
130  { "del-ph2-fail", DIXREQ_DEL_PHASE2, DIXREQ_FAILURE },
131  { "all-cctg-fail", DIXREQ_GET_RESEND, DIXREQ_FAILURE },
132  { "resend", DIXREQ_GET_RESEND, DIXREQ_INPROGRESS },
133 };
134 
136  .scf_name = "dix_req",
137  .scf_nr_states = ARRAY_SIZE(dix_req_states),
138  .scf_state = dix_req_states,
139  .scf_trans_nr = ARRAY_SIZE(dix_req_trans),
140  .scf_trans = dix_req_trans
141 };
142 
143 M0_TL_DESCR_DEFINE(cas_rop, "cas record operations",
144  M0_INTERNAL, struct m0_dix_cas_rop, crp_linkage, crp_magix,
146 M0_TL_DEFINE(cas_rop, M0_INTERNAL, struct m0_dix_cas_rop);
147 
148 static void dix_idxop(struct m0_dix_req *req);
149 static void dix_rop(struct m0_dix_req *req);
150 static void dix_rop_units_set(struct m0_dix_req *req);
151 static int dix_cas_rops_alloc(struct m0_dix_req *req);
152 static int dix_cas_rops_fill(struct m0_dix_req *req);
153 static int dix_cas_rops_send(struct m0_dix_req *req);
154 static void dix_ldescr_resolve(struct m0_dix_req *req);
155 static void dix_discovery_completed(struct m0_dix_req *req);
156 static int dix_idxop_reqs_send(struct m0_dix_req *req);
157 static void dix_discovery(struct m0_dix_req *req);
158 
159 static int dix_id_layouts_nr(struct m0_dix_req *req);
160 static int dix_unknown_layouts_nr(struct m0_dix_req *req);
161 static uint32_t dix_rop_tgt_iter_max(struct m0_dix_req *req,
162  struct m0_dix_rec_op *rec_op);
163 
164 
165 static bool dix_req_is_idxop(const struct m0_dix_req *req)
166 {
167  return M0_IN(req->dr_type, (DIX_CREATE, DIX_DELETE, DIX_CCTGS_LOOKUP));
168 }
169 
170 static struct m0_sm_group *dix_req_smgrp(const struct m0_dix_req *req)
171 {
172  return req->dr_sm.sm_grp;
173 }
174 
175 static void dix_to_cas_map(const struct m0_dix_req *dreq,
176  const struct m0_cas_req *creq)
177 {
178  uint64_t did = m0_sm_id_get(&dreq->dr_sm);
179  uint64_t cid = m0_sm_id_get(&creq->ccr_sm);
180  M0_ADDB2_ADD(M0_AVI_DIX_TO_CAS, did, cid);
181 }
182 
183 
184 M0_INTERNAL void m0_dix_req_lock(struct m0_dix_req *req)
185 {
186  M0_ENTRY();
188 }
189 
190 M0_INTERNAL void m0_dix_req_unlock(struct m0_dix_req *req)
191 {
192  M0_ENTRY();
194 }
195 
196 M0_INTERNAL bool m0_dix_req_is_locked(const struct m0_dix_req *req)
197 {
199 }
200 
201 M0_INTERNAL int m0_dix_req_wait(struct m0_dix_req *req, uint64_t states,
202  m0_time_t to)
203 {
204  M0_ENTRY();
206  return M0_RC(m0_sm_timedwait(&req->dr_sm, states, to));
207 }
208 
209 static void dix_req_init(struct m0_dix_req *req,
210  struct m0_dix_cli *cli,
211  struct m0_sm_group *grp,
212  bool meta)
213 {
214  M0_SET0(req);
215  req->dr_cli = cli;
216  req->dr_is_meta = meta;
218  m0_sm_addb2_counter_init(&req->dr_sm);
219 }
220 
221 M0_INTERNAL void m0_dix_mreq_init(struct m0_dix_req *req,
222  struct m0_dix_cli *cli,
223  struct m0_sm_group *grp)
224 {
225  dix_req_init(req, cli, grp, true);
226 }
227 
228 M0_INTERNAL void m0_dix_req_init(struct m0_dix_req *req,
229  struct m0_dix_cli *cli,
230  struct m0_sm_group *grp)
231 {
232  dix_req_init(req, cli, grp, false);
233 }
234 
235 static enum m0_dix_req_state dix_req_state(const struct m0_dix_req *req)
236 {
237  return req->dr_sm.sm_state;
238 }
239 
240 static void dix_req_state_set(struct m0_dix_req *req,
241  enum m0_dix_req_state state)
242 {
243  M0_LOG(M0_DEBUG, "DIX req: %p, state change:[%s -> %s]\n",
244  req, m0_sm_state_name(&req->dr_sm, req->dr_sm.sm_state),
245  m0_sm_state_name(&req->dr_sm, state));
246  m0_sm_state_set(&req->dr_sm, state);
247 }
248 
249 static void dix_req_failure(struct m0_dix_req *req, int32_t rc)
250 {
251  M0_PRE(rc != 0);
252  m0_sm_fail(&req->dr_sm, DIXREQ_FAILURE, rc);
253 }
254 
255 static int dix_type_layouts_nr(struct m0_dix_req *req,
256  enum dix_layout_type type)
257 {
258  return m0_count(i, req->dr_indices_nr,
259  req->dr_indices[i].dd_layout.dl_type == type);
260 }
261 
262 static int dix_resolved_nr(struct m0_dix_req *req)
263 {
265 }
266 
267 static int dix_id_layouts_nr(struct m0_dix_req *req)
268 {
270 }
271 
273 {
275 }
276 
277 static void dix_to_mdix_map(const struct m0_dix_req *req,
278  const struct m0_dix_meta_req *mreq)
279 {
280  uint64_t rid = m0_sm_id_get(&req->dr_sm);
281  uint64_t mid = m0_sm_id_get(&mreq->dmr_req.dr_sm);
282  M0_ADDB2_ADD(M0_AVI_DIX_TO_MDIX, rid, mid);
283 }
284 
286  struct m0_sm_ast *ast)
287 {
288  struct m0_dix_req *req = ast->sa_datum;
289  struct m0_dix_meta_req *meta_req = req->dr_meta_req;
290  struct m0_dix_ldesc *ldesc;
291  enum m0_dix_req_state state = dix_req_state(req);
292  bool idx_op = dix_req_is_idxop(req);
293  uint32_t i;
294  uint32_t k;
295  int rc;
296  int rc2;
297 
298  M0_ENTRY("req %p", req);
300  rc = m0_dix_meta_generic_rc(meta_req);
301  if (rc == 0) {
302  M0_ASSERT(ergo(!idx_op, m0_dix_meta_req_nr(meta_req) == 1));
303  for (i = 0, k = 0; rc == 0 && i < req->dr_indices_nr; i++) {
304  switch(req->dr_indices[i].dd_layout.dl_type) {
305  case DIX_LTYPE_UNKNOWN:
307  rc2 = m0_dix_layout_rep_get(meta_req, k,
308  &req->dr_indices[k].dd_layout);
309  break;
310  case DIX_LTYPE_ID:
312  ldesc = &req->dr_indices[k].dd_layout.u.dl_desc;
313  rc2 = m0_dix_ldescr_rep_get(meta_req, k, ldesc);
314  break;
315  default:
316  /*
317  * Note, that CAS requests are not sent for
318  * layouts with DIX_LTYPE_DESCR.
319  */
320  M0_IMPOSSIBLE("Impossible layout type %d",
321  req->dr_indices[i].dd_layout.dl_type);
322  break;
323  }
324  if (rc2 != 0) {
325  /*
326  * Treat getting layout error as a fatal error
327  * for record operation, since the request is
328  * executed against only one index.
329  */
330  if (!idx_op)
331  rc = rc2;
332  else
333  req->dr_items[k].dxi_rc = rc2;
334  }
335  k++;
336  }
337  /* All replies for the meta request should be used. */
338  M0_ASSERT(k == m0_dix_meta_req_nr(meta_req));
339  }
340  m0_dix_meta_req_fini(meta_req);
341  m0_free0(&req->dr_meta_req);
342 
343  if (rc == 0) {
344  /*
345  * Stop request processing if there are no items which can
346  * potentially succeed.
347  */
348  if (!m0_exists(i, req->dr_items_nr,
349  req->dr_items[i].dxi_rc == 0)) {
351  /*
352  * If there are layouts identified by id then it's
353  * necessary to resolve id to layout descriptors. Check for
354  * state to avoid resolving layout ids in a loop.
355  */
356  } else if (dix_id_layouts_nr(req) > 0 &&
359  } else {
360  /*
361  * All (or at least some) layout descriptors are
362  * obtained successfully.
363  */
365  }
366  } else {
368  }
369 }
370 
371 static bool dix_layout_find_clink_cb(struct m0_clink *cl)
372 {
373  struct m0_dix_req *req = M0_AMB(req, cl, dr_clink);
374 
375  m0_clink_del(cl);
376  m0_clink_fini(cl);
377  req->dr_ast.sa_cb = dix_layout_find_ast_cb;
378  req->dr_ast.sa_datum = req;
379  m0_sm_ast_post(dix_req_smgrp(req), &req->dr_ast);
380  return true;
381 }
382 
383 static void dix_layout_find(struct m0_dix_req *req)
384 {
385  struct m0_dix_meta_req *meta_req;
386  struct m0_fid *fids;
387  struct m0_dix *indices;
388  uint32_t i;
389  uint32_t k;
390  uint32_t unknown_nr;
391  int rc;
392  M0_ENTRY();
393 
394  indices = req->dr_indices;
395  unknown_nr = dix_unknown_layouts_nr(req);
396 
397  M0_PRE(unknown_nr > 0);
398  M0_ALLOC_PTR(req->dr_meta_req);
399  M0_ALLOC_ARR(fids, unknown_nr);
400  if (fids == NULL || req->dr_meta_req == NULL) {
401  rc = M0_ERR(-ENOMEM);
402  goto err;
403  }
404  meta_req = req->dr_meta_req;
405  for (i = 0, k = 0; i < req->dr_indices_nr; i++)
407  fids[k++] = req->dr_indices[i].dd_fid;
408  m0_dix_meta_req_init(meta_req, req->dr_cli, dix_req_smgrp(req));
409  dix_to_mdix_map(req, meta_req);
411  m0_clink_add_lock(&meta_req->dmr_chan, &req->dr_clink);
412  /* Start loading layouts from CAS. */
413  rc = m0_dix_layout_get(meta_req, fids, unknown_nr);
414  if (rc != 0) {
415  m0_clink_del_lock(&req->dr_clink);
416  m0_clink_fini(&req->dr_clink);
417  m0_dix_meta_req_fini(meta_req);
418  }
419 
420 err:
421  if (rc != 0) {
422  m0_free0(&req->dr_meta_req);
424  } else {
426  }
427  m0_free(fids);
428  M0_LEAVE();
429 }
430 
431 static int dix_indices_copy(struct m0_dix **dst_indices,
432  const struct m0_dix *src_indices,
433  uint32_t indices_nr)
434 {
435  struct m0_dix *dst;
436  uint32_t i;
437  int rc = 0;
438 
439  M0_PRE(dst_indices != NULL);
440  M0_PRE(src_indices != NULL);
441  M0_PRE(indices_nr != 0);
442  M0_ALLOC_ARR(dst, indices_nr);
443  if (dst == NULL)
444  return M0_ERR(-ENOMEM);
445  for (i = 0; i < indices_nr; i++) {
446  rc = m0_dix_copy(&dst[i], &src_indices[i]);
447  if (rc != 0)
448  break;
449  }
450  if (rc != 0) {
451  for (i = 0; i < indices_nr; i++)
452  m0_dix_fini(&dst[i]);
453  m0_free(dst);
454  return M0_ERR(rc);
455  }
456  *dst_indices = dst;
457  return 0;
458 }
459 
461  const struct m0_dix *indices,
462  uint32_t indices_nr)
463 {
464  int rc;
465 
466  M0_PRE(indices != NULL);
467  M0_PRE(indices_nr != 0);
468  rc = dix_indices_copy(&req->dr_indices, indices, indices_nr);
469  if (rc == 0)
470  req->dr_indices_nr = indices_nr;
471  return rc;
472 }
473 
474 static struct m0_pool_version *dix_pver_find(const struct m0_dix_req *req,
475  const struct m0_fid *pver_fid)
476 {
477  return m0_pool_version_find(req->dr_cli->dx_pc, pver_fid);
478 }
479 
480 static void dix_idxop_ctx_free(struct m0_dix_idxop_ctx *idxop)
481 {
482  uint32_t i;
483 
484  for (i = 0; i < idxop->dcd_idxop_reqs_nr; i++)
486  m0_free0(&idxop->dcd_idxop_reqs);
487 }
488 
489 static void dix_idxop_item_rc_update(struct m0_dix_item *ditem,
490  struct m0_dix_req *req,
491  const struct m0_dix_cas_req *creq)
492 {
493  struct m0_cas_rec_reply crep;
494  const struct m0_cas_req *cas_req;
495  int rc;
496 
497  if (ditem->dxi_rc == 0) {
498  cas_req = &creq->ds_creq;
499  rc = creq->ds_rc ?:
500  m0_cas_req_generic_rc(cas_req);
501  if (rc == 0) {
502  switch (req->dr_type) {
503  case DIX_CREATE:
504  m0_cas_index_create_rep(cas_req, 0, &crep);
505  break;
506  case DIX_DELETE:
507  m0_cas_index_delete_rep(cas_req, 0, &crep);
508  break;
509  case DIX_CCTGS_LOOKUP:
510  m0_cas_index_lookup_rep(cas_req, 0, &crep);
511  break;
512  default:
513  M0_IMPOSSIBLE("Unknown type %u", req->dr_type);
514  }
515  rc = crep.crr_rc;
516  /*
517  * It is OK to get -ENOENT during 2nd phase, because
518  * catalogue can be deleted on 1st phase.
519  */
521  rc == -ENOENT)
522  rc = 0;
523  }
524  ditem->dxi_rc = rc;
525  }
526 }
527 
528 static void dix_idxop_completed(struct m0_sm_group *grp, struct m0_sm_ast *ast)
529 {
530  struct m0_dix_req *req = ast->sa_datum;
531  struct m0_dix_idxop_ctx *idxop_ctx = &req->dr_idxop;
532  struct m0_dix_idxop_req *idxop_req;
533  struct m0_dix_item *ditem;
534  struct m0_dix_cas_req *creq;
535  bool del_phase2 = false;
536  uint32_t i;
537  uint64_t j;
538  int rc;
539 
540  (void)grp;
541  M0_ENTRY("req %p", req);
542  for (i = 0; i < idxop_ctx->dcd_idxop_reqs_nr; i++) {
543  idxop_req = &idxop_ctx->dcd_idxop_reqs[i];
544  M0_ASSERT(idxop_req->dcr_index_no < req->dr_items_nr);
545  ditem = &req->dr_items[idxop_req->dcr_index_no];
546  for (j = 0; j < idxop_req->dcr_creqs_nr; j++) {
547  creq = &idxop_req->dcr_creqs[j];
548  dix_idxop_item_rc_update(ditem, req, creq);
549  if (ditem->dxi_rc == 0 && idxop_req->dcr_del_phase2) {
550  ditem->dxi_del_phase2 = true;
551  del_phase2 = true;
552  }
553  m0_cas_req_fini(&creq->ds_creq);
554  }
555  }
556  dix_idxop_ctx_free(idxop_ctx);
557  if (del_phase2) {
560  if (rc != 0)
562  } else {
564  }
565  M0_LEAVE();
566 }
567 
568 static bool dix_idxop_clink_cb(struct m0_clink *cl)
569 {
570  struct m0_dix_cas_req *creq = container_of(cl, struct m0_dix_cas_req,
571  ds_clink);
572  uint32_t state = creq->ds_creq.ccr_sm.sm_state;
573  struct m0_dix_idxop_ctx *idxop;
574  struct m0_dix_req *dreq;
575 
576  if (M0_IN(state, (CASREQ_FINAL, CASREQ_FAILURE))) {
577  dreq = creq->ds_parent;
578 
579  /* Update txid records in Client. */
580  if (dreq->dr_cli->dx_sync_rec_update != NULL)
581  dreq->dr_cli->dx_sync_rec_update(
582  dreq, creq->ds_creq.ccr_sess,
583  &creq->ds_creq.ccr_remid);
584 
585  m0_clink_del(cl);
586  m0_clink_fini(cl);
587  idxop = &creq->ds_parent->dr_idxop;
588  idxop->dcd_completed_nr++;
589  M0_PRE(idxop->dcd_completed_nr <= idxop->dcd_cas_reqs_nr);
590  if (idxop->dcd_completed_nr == idxop->dcd_cas_reqs_nr) {
592  idxop->dcd_ast.sa_datum = dreq;
593  m0_sm_ast_post(dix_req_smgrp(dreq), &idxop->dcd_ast);
594  }
595  }
596  return true;
597 }
598 
599 static int dix_idxop_pver_analyse(struct m0_dix_idxop_req *idxop_req,
600  struct m0_dix_req *dreq,
601  uint64_t *creqs_nr)
602 {
603  struct m0_pool_version *pver = idxop_req->dcr_pver;
604  struct m0_poolmach *pm = &pver->pv_mach;
605  struct m0_pools_common *pc = pver->pv_pc;
606  struct m0_pooldev *sdev;
607  enum m0_pool_nd_state state;
608  struct m0_reqh_service_ctx *cas_svc;
609  enum dix_req_type type = dreq->dr_type;
610  uint32_t i;
611  int rc = 0;
612  M0_ENTRY();
613 
615 
616  *creqs_nr = 0;
617  for (i = 0; i < pm->pm_state->pst_nr_devices; i++) {
618  sdev = &pm->pm_state->pst_devices_array[i];
619  cas_svc = pc->pc_dev2svc[sdev->pd_sdev_idx].pds_ctx;
620  if (cas_svc->sc_type != M0_CST_CAS) {
621  rc = M0_ERR_INFO(-EINVAL, "Incorrect service type %d",
622  cas_svc->sc_type);
623  break;
624  }
625 
626  state = sdev->pd_state;
627  /*
628  * It's impossible to create all component catalogues if some
629  * disk is not available (online or rebalancing). Also, it's
630  * impossible to check consistently that all component
631  * catalogues present if some disk is not online.
632  */
633  if ((state != M0_PNDS_ONLINE && type == DIX_CCTGS_LOOKUP) ||
634  (!M0_IN(state, (M0_PNDS_ONLINE, M0_PNDS_SNS_REBALANCING)) &&
635  type == DIX_CREATE)) {
636  rc = M0_ERR(-EIO);
637  break;
638  }
639 
640  /*
641  * Two-phase component catalogue removal is necessary if
642  * repair/re-balance is in progress.
643  * See dix/client.h, "Operation in degraded mode".
644  */
645  if (type == DIX_DELETE &&
646  dix_req_state(dreq) != DIXREQ_DEL_PHASE2 &&
647  M0_IN(state, (M0_PNDS_SNS_REPAIRING,
649  idxop_req->dcr_del_phase2 = true;
650 
651  /*
652  * Send CAS requests only on online or rebalancing drives.
653  * Actually, only DIX_DELETE only can send on devices
654  * selectively. DIX_CREATE, DIX_CCTGS_LOOKUP send whether to all
655  * drives in a pool or to none.
656  */
657  if (M0_IN(state, (M0_PNDS_ONLINE, M0_PNDS_SNS_REBALANCING)))
658  (*creqs_nr)++;
659  }
660 
661  if (rc == 0 && *creqs_nr == 0)
662  rc = M0_ERR(-EIO);
663 
664  if (rc != 0)
665  *creqs_nr = 0;
666  M0_POST(rc == 0 ? *creqs_nr > 0 : *creqs_nr == 0);
667  return M0_RC(rc);
668 }
669 
670 static int dix_idxop_req_send(struct m0_dix_idxop_req *idxop_req,
671  struct m0_dix_req *dreq,
672  uint64_t *reqs_acc)
673 {
674  struct m0_pool_version *pver = idxop_req->dcr_pver;
675  struct m0_poolmach *pm = &pver->pv_mach;
676  struct m0_pools_common *pc = pver->pv_pc;
677  struct m0_dix_cas_req *creq;
678  struct m0_pooldev *sdev;
679  uint32_t sdev_idx;
680  struct m0_fid cctg_fid;
681  struct m0_reqh_service_ctx *cas_svc;
682  uint32_t i;
683  uint32_t k;
684  struct m0_dix *index;
685  struct m0_cas_id cid;
686  uint32_t flags = dreq->dr_flags;
687  uint64_t creqs_nr;
688  int rc;
689  M0_ENTRY();
690 
692  rc = dix_idxop_pver_analyse(idxop_req, dreq, &creqs_nr);
693  if (rc != 0)
694  goto pmach_unlock;
695  M0_ALLOC_ARR(idxop_req->dcr_creqs, creqs_nr);
696  if (idxop_req->dcr_creqs == NULL) {
697  rc = M0_ERR(-ENOMEM);
698  goto pmach_unlock;
699  }
700  idxop_req->dcr_creqs_nr = creqs_nr;
701  k = 0;
702  for (i = 0; i < pm->pm_state->pst_nr_devices; i++) {
703  sdev = &pver->pv_mach.pm_state->pst_devices_array[i];
704  if (!M0_IN(sdev->pd_state, (M0_PNDS_ONLINE,
706  continue;
707  sdev_idx = sdev->pd_sdev_idx;
708  creq = &idxop_req->dcr_creqs[k++];
709  M0_LOG(M0_DEBUG, "creqs_nr=%" PRIu64 " this is the %d th creq=%p",
710  creqs_nr, k-1, creq);
711  creq->ds_parent = dreq;
712  cas_svc = pc->pc_dev2svc[sdev_idx].pds_ctx;
713  M0_ASSERT(cas_svc->sc_type == M0_CST_CAS);
714  m0_cas_req_init(&creq->ds_creq, &cas_svc->sc_rlink.rlk_sess,
715  dix_req_smgrp(dreq));
716  dix_to_cas_map(dreq, &creq->ds_creq);
718  m0_clink_add(&creq->ds_creq.ccr_sm.sm_chan, &creq->ds_clink);
719  index = &dreq->dr_indices[idxop_req->dcr_index_no];
720  m0_dix_fid_convert_dix2cctg(&index->dd_fid, &cctg_fid,
721  sdev_idx);
722  cid.ci_fid = cctg_fid;
723  M0_ASSERT(index->dd_layout.dl_type == DIX_LTYPE_DESCR);
724  cid.ci_layout.dl_type = index->dd_layout.dl_type;
725  rc = m0_dix_ldesc_copy(&cid.ci_layout.u.dl_desc,
726  &index->dd_layout.u.dl_desc);
727  if (rc == 0) {
728  switch (dreq->dr_type) {
729  case DIX_CREATE:
730  rc = m0_cas_index_create(&creq->ds_creq, &cid,
731  1, dreq->dr_dtx);
732  break;
733  case DIX_DELETE:
734  if (idxop_req->dcr_del_phase2)
735  flags |= COF_DEL_LOCK;
736  rc = m0_cas_index_delete(&creq->ds_creq, &cid,
737  1, dreq->dr_dtx,
738  flags);
739  break;
740  case DIX_CCTGS_LOOKUP:
741  rc = m0_cas_index_lookup(&creq->ds_creq, &cid,
742  1);
743  break;
744  default:
745  M0_IMPOSSIBLE("Unknown type %u", dreq->dr_type);
746  }
747  }
748  m0_cas_id_fini(&cid);
749  if (rc != 0) {
750  creq->ds_rc = M0_ERR(rc);
751  m0_clink_del(&creq->ds_clink);
752  m0_clink_fini(&creq->ds_clink);
753  /*
754  * index->dd_layout.u.dl_desc will be finalised in
755  * m0_dix_req_fini().
756  */
757  } else {
758  (*reqs_acc)++;
759  }
760  }
761  M0_ASSERT(k == creqs_nr);
762 pmach_unlock:
764  return M0_RC(rc);
765 }
766 
768  struct m0_sm_ast *ast)
769 {
770  struct m0_dix_req *req = ast->sa_datum;
771  struct m0_dix_meta_req *meta_req = req->dr_meta_req;
772  struct m0_dix_item *item;
773  uint64_t fids_nr;
774  int i;
775  int k;
776  /*
777  * Inidicates whether request processing should be continued.
778  * The request processing is stopped if all items in the user input
779  * vector are failed or CROW is requested for CREATE operation.
780  */
781  bool cont = false;
782  bool crow = !!(req->dr_flags & COF_CROW);
783  int rc;
784 
785  fids_nr = m0_count(i, req->dr_items_nr, req->dr_items[i].dxi_rc == 0);
786  M0_ASSERT(fids_nr > 0);
787  rc = m0_dix_meta_generic_rc(meta_req);
788  if (rc == 0) {
789  k = 0;
790  for (i = 0; i < req->dr_items_nr; i++) {
791  item = &req->dr_items[i];
792  if (item->dxi_rc == 0) {
793  item->dxi_rc = m0_dix_meta_item_rc(meta_req, k);
794  cont = cont || item->dxi_rc == 0;
795  k++;
796  }
797  }
798  M0_ASSERT(k == fids_nr);
799  if (!cont)
800  M0_LOG(M0_ERROR, "All items are failed");
801  /*
802  * If CROW is requested for CREATE operation, then component
803  * catalogues shouldn't be created => no CAS create requests
804  * should be sent.
805  */
806  cont = cont && !(req->dr_type == DIX_CREATE && crow);
807  if (cont)
809  }
810 
811  m0_dix_meta_req_fini(meta_req);
812  m0_free0(&req->dr_meta_req);
813  if (rc == 0)
814  dix_req_state_set(req, !cont ?
816  else
818 }
819 
821 {
822  struct m0_dix_req *req = container_of(cl, struct m0_dix_req, dr_clink);
823 
824  /*
825  * Sining: no need to update SYNC records in Client from this callback
826  * as it is invoked in meta.c::dix_meta_op_done_cb() and reply fops
827  * have been processed to update SYNC records in dix_cas_rop_clink_cb()
828  * before it.
829  */
830 
831  m0_clink_del(cl);
832  m0_clink_fini(cl);
833  req->dr_ast.sa_cb = dix_idxop_meta_update_ast_cb;
834  req->dr_ast.sa_datum = req;
835  m0_sm_ast_post(dix_req_smgrp(req), &req->dr_ast);
836  return true;
837 }
838 
840 {
841  struct m0_dix_meta_req *meta_req;
842  struct m0_fid *fids;
843  struct m0_dix_layout *layouts = NULL;
844  uint32_t fids_nr;
845  bool create = req->dr_type == DIX_CREATE;
846  uint64_t i;
847  uint64_t k;
848  int rc;
849 
850  M0_ENTRY();
851  M0_PRE(M0_IN(req->dr_type, (DIX_CREATE, DIX_DELETE)));
852  M0_ASSERT(req->dr_indices_nr == req->dr_items_nr);
853  fids_nr = m0_count(i, req->dr_items_nr, req->dr_items[i].dxi_rc == 0);
854  M0_ASSERT(fids_nr > 0);
855  M0_ALLOC_PTR(req->dr_meta_req);
856  M0_ALLOC_ARR(fids, fids_nr);
857  if (create)
858  M0_ALLOC_ARR(layouts, fids_nr);
859  if (fids == NULL || (create && layouts == NULL) ||
860  req->dr_meta_req == NULL) {
861  m0_free(fids);
862  m0_free(layouts);
863  m0_free(req->dr_meta_req);
864  return M0_ERR(-ENOMEM);
865  }
866 
867  meta_req = req->dr_meta_req;
868  k = 0;
869  for (i = 0; i < req->dr_items_nr; i++) {
870  if (req->dr_items[i].dxi_rc == 0) {
871  if (create) {
872  fids[k] = req->dr_orig_indices[i].dd_fid;
873  layouts[k] = req->dr_orig_indices[i].dd_layout;
874  } else {
875  fids[k] = req->dr_indices[i].dd_fid;
876  }
877  k++;
878  }
879  }
880  M0_ASSERT(k == fids_nr);
881 
882  m0_dix_meta_req_init(meta_req, req->dr_cli, dix_req_smgrp(req));
883  dix_to_mdix_map(req, meta_req);
884  /* Pass down the SYNC datum. */
885  meta_req->dmr_req.dr_sync_datum = req->dr_sync_datum;
887  m0_clink_add_lock(&meta_req->dmr_chan, &req->dr_clink);
888  rc = create ?
889  m0_dix_layout_put(meta_req, fids, layouts, fids_nr, req->dr_flags) :
890  m0_dix_layout_del(meta_req, fids, fids_nr);
891  if (rc != 0) {
892  m0_clink_del_lock(&req->dr_clink);
893  m0_clink_fini(&req->dr_clink);
894  m0_dix_meta_req_fini(meta_req);
895  m0_free0(&req->dr_meta_req);
896  }
897  m0_free(layouts);
898  m0_free(fids);
899  return M0_RC(rc);
900 }
901 
909 static bool dix_item_should_be_sent(const struct m0_dix_req *req, uint32_t i)
910 {
911  return dix_req_state(req) == DIXREQ_DEL_PHASE2 ?
912  req->dr_items[i].dxi_del_phase2 :
913  req->dr_items[i].dxi_rc == 0;
914 }
915 
916 static int dix_idxop_reqs_send(struct m0_dix_req *req)
917 {
918  struct m0_dix *indices = req->dr_indices;
919  struct m0_dix_idxop_ctx *idxop = &req->dr_idxop;
920  struct m0_dix_idxop_req *idxop_req;
921  struct m0_dix_layout *layout;
922  uint32_t reqs_nr;
923  uint32_t i;
924  uint32_t k;
925  uint64_t cas_nr = 0;
926  int rc;
927 
928  M0_ENTRY();
932  reqs_nr = m0_count(i, req->dr_items_nr,
934  M0_PRE(reqs_nr > 0);
935  M0_SET0(idxop);
936  M0_ALLOC_ARR(idxop->dcd_idxop_reqs, reqs_nr);
937  if (idxop->dcd_idxop_reqs == NULL)
938  return M0_ERR(-ENOMEM);
939  idxop->dcd_idxop_reqs_nr = reqs_nr;
940  for (i = 0, k = 0; i < req->dr_items_nr; i++) {
941  if (dix_item_should_be_sent(req, i)) {
942  layout = &indices[i].dd_layout;
943  M0_PRE(layout->dl_type == DIX_LTYPE_DESCR);
944  idxop_req = &idxop->dcd_idxop_reqs[k++];
945  idxop_req->dcr_index_no = i;
946  idxop_req->dcr_pver = dix_pver_find(req,
947  &layout->u.dl_desc.ld_pver);
948  rc = dix_idxop_req_send(idxop_req, req, &cas_nr);
949  if (rc != 0)
950  req->dr_items[i].dxi_rc = M0_ERR(rc);
951  }
952  }
953  if (cas_nr == 0) {
954  dix_idxop_ctx_free(idxop);
955  return M0_ERR(-EIO);
956  } else {
957  idxop->dcd_completed_nr = 0;
958  idxop->dcd_cas_reqs_nr = cas_nr;
959  }
960  return M0_RC(0);
961 }
962 
963 static void dix_idxop(struct m0_dix_req *req)
964 {
965  enum m0_dix_req_state next_state = DIXREQ_INVALID;
966  int rc;
967  M0_ENTRY();
968 
969  M0_PRE(dix_resolved_nr(req) > 0);
970  /*
971  * Put/delete ordinary indices layouts in 'layout' meta-index.
972  */
973  if (!req->dr_is_meta &&
974  M0_IN(req->dr_type, (DIX_CREATE, DIX_DELETE)) &&
975  !(req->dr_flags & COF_SKIP_LAYOUT)) {
977  next_state = DIXREQ_META_UPDATE;
978  } else {
980  next_state = DIXREQ_INPROGRESS;
981  }
982 
983  if (rc == 0)
984  dix_req_state_set(req, next_state);
985  else
987  M0_LEAVE();
988 }
989 
990 M0_INTERNAL int m0_dix_create(struct m0_dix_req *req,
991  const struct m0_dix *indices,
992  uint32_t indices_nr,
993  struct m0_dtx *dtx,
994  uint32_t flags)
995 {
996  int rc;
997 
998  M0_ENTRY();
999  /*
1000  * User should provide layouts of each index to create in the form of
1001  * layout descriptor or layout id.
1002  */
1003  M0_PRE(m0_forall(i, indices_nr,
1004  indices[i].dd_layout.dl_type != DIX_LTYPE_UNKNOWN));
1005  M0_PRE(ergo(req->dr_is_meta, dix_id_layouts_nr(req) == 0));
1006  M0_PRE((flags & ~(COF_CROW | COF_SKIP_LAYOUT)) == 0);
1007  req->dr_dtx = dtx;
1008  /*
1009  * Save indices identifiers in two arrays. Indices identifiers in
1010  * req->dr_indices will be overwritten once layout ids (if any) are
1011  * resolved into layout descriptors. Record in 'layout' index should be
1012  * inserted with the layout requested by a user, not the resolved one.
1013  */
1014  rc = dix_req_indices_copy(req, indices, indices_nr) ?:
1015  dix_indices_copy(&req->dr_orig_indices, indices, indices_nr);
1016  if (rc != 0)
1017  return M0_ERR(rc);
1018  M0_ALLOC_ARR(req->dr_items, indices_nr);
1019  if (req->dr_items == NULL)
1020  /*
1021  * Cleanup of req->dr_indices and req->dr_orig_indices will be
1022  * done in m0_dix_req_fini().
1023  */
1024  return M0_ERR(-ENOMEM);
1025  req->dr_items_nr = indices_nr;
1026  req->dr_type = DIX_CREATE;
1027  req->dr_flags = flags;
1028  dix_discovery(req);
1029  return M0_RC(0);
1030 }
1031 
1032 static void dix_ldescr_resolve(struct m0_dix_req *req)
1033 {
1034  struct m0_dix *indices = req->dr_indices;
1035  uint32_t id_nr = dix_id_layouts_nr(req);
1036  struct m0_dix_meta_req *meta_req;
1037  uint64_t *lids;
1038  int i;
1039  int k;
1040  int rc;
1041  M0_ENTRY();
1042 
1043  /*
1044  * If layout descriptors have DIX_LTYPE_ID, then they should be loaded
1045  * via m0_dix_ldescr_get() in order to find out actual layout.
1046  */
1047  M0_ALLOC_PTR(req->dr_meta_req);
1048  M0_ALLOC_ARR(lids, id_nr);
1049  if (lids == NULL || req->dr_meta_req == NULL) {
1050  rc = M0_ERR(-ENOMEM);
1051  goto err;
1052  }
1053  for (i = 0, k = 0; i < req->dr_indices_nr; i++)
1054  if (indices[i].dd_layout.dl_type == DIX_LTYPE_ID)
1055  lids[k++] = indices[i].dd_layout.u.dl_id;
1056  meta_req = req->dr_meta_req;
1057  m0_dix_meta_req_init(meta_req, req->dr_cli, dix_req_smgrp(req));
1058  dix_to_mdix_map(req, meta_req);
1060  m0_clink_add_lock(&meta_req->dmr_chan, &req->dr_clink);
1061  /* Start loading layout descriptors from CAS. */
1062  rc = m0_dix_ldescr_get(meta_req, lids, id_nr);
1063  if (rc != 0) {
1064  m0_clink_del_lock(&req->dr_clink);
1065  m0_clink_fini(&req->dr_clink);
1066  m0_dix_meta_req_fini(meta_req);
1067  }
1068 
1069 err:
1070  if (rc != 0) {
1071  m0_free0(&req->dr_meta_req);
1073  } else {
1075  }
1076  m0_free(lids);
1077  M0_LEAVE();
1078 }
1079 
1080 static void addb2_add_dix_req_attrs(const struct m0_dix_req *req)
1081 {
1082  uint64_t sm_id = m0_sm_id_get(&req->dr_sm);
1083 
1084  M0_ADDB2_ADD(M0_AVI_ATTR, sm_id,
1085  M0_AVI_DIX_REQ_ATTR_IS_META, req->dr_is_meta);
1086  M0_ADDB2_ADD(M0_AVI_ATTR, sm_id,
1087  M0_AVI_DIX_REQ_ATTR_REQ_TYPE, req->dr_type);
1088  M0_ADDB2_ADD(M0_AVI_ATTR, sm_id,
1089  M0_AVI_DIX_REQ_ATTR_ITEMS_NR, req->dr_items_nr);
1090  M0_ADDB2_ADD(M0_AVI_ATTR, sm_id,
1091  M0_AVI_DIX_REQ_ATTR_INDICES_NR, req->dr_indices_nr);
1092  if (req->dr_keys != NULL)
1094  req->dr_keys->ov_vec.v_nr);
1095  if (req->dr_vals != NULL)
1097  req->dr_vals->ov_vec.v_nr);
1098 }
1099 
1101 {
1102  M0_ENTRY();
1105 
1106  /*
1107  * All layouts have been resolved, all types are DIX_LTYPE_DESCR,
1108  * perform dix operation.
1109  */
1110  switch (req->dr_type) {
1111  case DIX_CREATE:
1112  case DIX_DELETE:
1113  case DIX_CCTGS_LOOKUP:
1114  dix_idxop(req);
1115  break;
1116  case DIX_GET:
1117  case DIX_PUT:
1118  case DIX_DEL:
1119  case DIX_NEXT:
1120  dix_rop(req);
1121  break;
1122  default:
1123  M0_IMPOSSIBLE("Unknown request type %u", req->dr_type);
1124  }
1125  M0_LEAVE();
1126 }
1127 
1128 static void dix_discovery_ast(struct m0_sm_group *grp, struct m0_sm_ast *ast)
1129 {
1130  struct m0_dix_req *req = container_of(ast, struct m0_dix_req, dr_ast);
1131  M0_ENTRY();
1132 
1133  (void)grp;
1134  if (dix_unknown_layouts_nr(req) > 0)
1136  else if (dix_id_layouts_nr(req) > 0)
1138  else
1140  M0_LEAVE();
1141 }
1142 
1143 static void dix_discovery(struct m0_dix_req *req)
1144 {
1145  /* Entry point to start discovering and resolving layout descriptors. */
1146  req->dr_ast.sa_cb = dix_discovery_ast;
1147  req->dr_ast.sa_datum = req;
1148  m0_sm_ast_post(dix_req_smgrp(req), &req->dr_ast);
1149 }
1150 
1152 void m0_dix_req_cancel(struct m0_dix_req *dreq)
1153 {
1154  struct m0_fop *fop;
1155  struct m0_dix_rop_ctx *rop;
1156  struct m0_dix_cas_rop *cas_rop;
1157 
1158  M0_ENTRY();
1159  switch (dreq->dr_type) {
1160  case DIX_GET:
1161  case DIX_PUT:
1162  case DIX_DEL:
1163  case DIX_NEXT:
1164  rop = dreq->dr_rop;
1165  if (rop == NULL)
1166  return;
1167  M0_LOG(M0_DEBUG, "dg_completed_nr=%" PRIu64 " "
1168  "dg_cas_reqs_nr=%" PRIu64 " dr_type=%d",
1169  rop->dg_completed_nr, rop->dg_cas_reqs_nr,
1170  dreq->dr_type);
1171  if (rop->dg_completed_nr < rop->dg_cas_reqs_nr) {
1172  m0_tl_for(cas_rop, &rop->dg_cas_reqs, cas_rop) {
1173  fop = cas_rop->crp_creq.ccr_fop;
1174  if (fop != NULL)
1176  } m0_tl_endfor;
1177  }
1178  break;
1179  case DIX_CREATE:
1180  case DIX_DELETE:
1181  case DIX_CCTGS_LOOKUP:
1182  /*
1183  * rpc item not available to cancel after launching index
1184  * create / delete / lookup operation so no-op
1185  */
1186  break;
1187  }
1188 }
1189 
1190 M0_INTERNAL int m0_dix_delete(struct m0_dix_req *req,
1191  const struct m0_dix *indices,
1192  uint64_t indices_nr,
1193  struct m0_dtx *dtx,
1194  uint32_t flags)
1195 {
1196  int rc;
1197 
1198  M0_ENTRY();
1199  M0_PRE((flags & ~(COF_CROW | COF_SKIP_LAYOUT)) == 0);
1200  req->dr_dtx = dtx;
1201  rc = dix_req_indices_copy(req, indices, indices_nr);
1202  if (rc != 0)
1203  return M0_ERR(rc);
1204  M0_ALLOC_ARR(req->dr_items, indices_nr);
1205  if (req->dr_items == NULL)
1206  return M0_ERR(-ENOMEM);
1207  req->dr_items_nr = indices_nr;
1208  req->dr_type = DIX_DELETE;
1209  req->dr_flags = flags;
1210  dix_discovery(req);
1211  return M0_RC(0);
1212 }
1213 
1214 M0_INTERNAL int m0_dix_cctgs_lookup(struct m0_dix_req *req,
1215  const struct m0_dix *indices,
1216  uint32_t indices_nr)
1217 {
1218  int rc;
1219 
1220  M0_ENTRY();
1221  rc = dix_req_indices_copy(req, indices, indices_nr);
1222  if (rc != 0)
1223  return M0_ERR(rc);
1224  M0_ALLOC_ARR(req->dr_items, indices_nr);
1225  if (req->dr_items == NULL)
1226  return M0_ERR(-ENOMEM);
1227  req->dr_items_nr = indices_nr;
1228  req->dr_type = DIX_CCTGS_LOOKUP;
1229  dix_discovery(req);
1230  return M0_RC(0);
1231 }
1232 
1233 static int dix_rec_op_init(struct m0_dix_rec_op *rec_op,
1234  struct m0_dix_req *req,
1235  struct m0_dix_cli *cli,
1236  struct m0_pool_version *pver,
1237  struct m0_dix *dix,
1238  struct m0_buf *key,
1239  uint64_t user_item)
1240 {
1241  int rc;
1242 
1244  rc = m0_dix_layout_iter_init(&rec_op->dgp_iter, &dix->dd_fid,
1245  cli->dx_ldom, pver,
1246  &dix->dd_layout.u.dl_desc, key);
1247  if (rc != 0)
1248  return M0_ERR(rc);
1249  rec_op->dgp_units_nr = dix_rop_tgt_iter_max(req, rec_op);
1250  M0_ALLOC_ARR(rec_op->dgp_units, rec_op->dgp_units_nr);
1251  if (rec_op->dgp_units == NULL) {
1253  return M0_ERR(rc);
1254  }
1255  rec_op->dgp_item = user_item;
1256  rec_op->dgp_key = *key;
1257  return 0;
1258 }
1259 
1260 static void dix_rec_op_fini(struct m0_dix_rec_op *rec_op)
1261 {
1263  m0_free(rec_op->dgp_units);
1264 }
1265 
1266 static int dix_cas_rop_alloc(struct m0_dix_req *req, uint32_t sdev,
1267  struct m0_dix_cas_rop **cas_rop)
1268 {
1269  struct m0_dix_rop_ctx *rop = req->dr_rop;
1270  M0_ENTRY();
1271 
1272  M0_ALLOC_PTR(*cas_rop);
1273  if (*cas_rop == NULL)
1274  return M0_ERR(-ENOMEM);
1275  (*cas_rop)->crp_parent = req;
1276  (*cas_rop)->crp_sdev_idx = sdev;
1277  (*cas_rop)->crp_flags = req->dr_flags;
1278  cas_rop_tlink_init_at(*cas_rop, &rop->dg_cas_reqs);
1279 
1280  return M0_RC_INFO(0, "dix req=%p rop=%p cas_rop=%p sdev=%u",
1281  req, rop, *cas_rop, sdev);
1282 }
1283 
1284 static void dix_cas_rop_fini(struct m0_dix_cas_rop *cas_rop)
1285 {
1286  m0_free(cas_rop->crp_attrs);
1287  m0_bufvec_free2(&cas_rop->crp_keys);
1288  m0_bufvec_free2(&cas_rop->crp_vals);
1289  cas_rop_tlink_fini(cas_rop);
1290 }
1291 
1292 static void dix_cas_rops_fini(struct m0_tl *cas_rops)
1293 {
1294  struct m0_dix_cas_rop *cas_rop;
1295 
1296  m0_tl_teardown(cas_rop, cas_rops, cas_rop) {
1297  dix_cas_rop_fini(cas_rop);
1298  m0_free(cas_rop);
1299  }
1300 }
1301 
1302 static int dix_rop_ctx_init(struct m0_dix_req *req,
1303  struct m0_dix_rop_ctx *rop,
1304  const struct m0_bufvec *keys,
1305  uint64_t *indices)
1306 {
1307  struct m0_dix *dix = &req->dr_indices[0];
1308  struct m0_dix_ldesc *ldesc;
1309  uint32_t keys_nr;
1310  struct m0_buf key;
1311  uint32_t i;
1312  int rc = 0;
1313 
1314  M0_ENTRY();
1315  M0_PRE(M0_IS0(rop));
1316  M0_PRE(req->dr_indices_nr == 1);
1318  M0_PRE(keys != NULL);
1319  keys_nr = keys->ov_vec.v_nr;
1320  M0_PRE(keys_nr != 0);
1321  ldesc = &dix->dd_layout.u.dl_desc;
1322  rop->dg_pver = dix_pver_find(req, &ldesc->ld_pver);
1323  M0_ALLOC_ARR(rop->dg_rec_ops, keys_nr);
1325  if (rop->dg_rec_ops == NULL || rop->dg_target_rop == NULL)
1326  return M0_ERR(-ENOMEM);
1327  for (i = 0; i < keys_nr; i++) {
1328  key = M0_BUF_INIT(keys->ov_vec.v_count[i], keys->ov_buf[i]);
1329  rc = dix_rec_op_init(&rop->dg_rec_ops[i], req, req->dr_cli,
1330  rop->dg_pver, &req->dr_indices[0], &key,
1331  indices == NULL ? i : indices[i]);
1332  if (rc != 0) {
1333  for (i = 0; i < rop->dg_rec_ops_nr; i++)
1334  dix_rec_op_fini(&rop->dg_rec_ops[i]);
1335  break;
1336  }
1337  rop->dg_rec_ops_nr++;
1338  }
1339  cas_rop_tlist_init(&rop->dg_cas_reqs);
1340  return M0_RC(rc);
1341 }
1342 
1343 static void dix_rop_ctx_fini(struct m0_dix_rop_ctx *rop)
1344 {
1345  uint32_t i;
1346 
1347  for (i = 0; i < rop->dg_rec_ops_nr; i++)
1348  dix_rec_op_fini(&rop->dg_rec_ops[i]);
1349  m0_free(rop->dg_rec_ops);
1350  m0_free(rop->dg_target_rop);
1352  cas_rop_tlist_fini(&rop->dg_cas_reqs);
1353  M0_SET0(rop);
1354 }
1355 
1356 static void dix__rop(struct m0_dix_req *req, const struct m0_bufvec *keys,
1357  uint64_t *indices)
1358 {
1359  struct m0_dix_rop_ctx *rop;
1360  uint32_t keys_nr;
1361  int rc;
1362  M0_ENTRY();
1363 
1364  M0_PRE(keys != NULL);
1365  M0_PRE(req->dr_indices_nr == 1);
1366 
1367  keys_nr = keys->ov_vec.v_nr;
1368  M0_PRE(keys_nr != 0);
1369  /* We support only one KV pair per request in DTM0. */
1370  M0_PRE(ergo(req->dr_dtx != NULL, keys_nr == 1));
1371  M0_ALLOC_PTR(rop);
1372  if (rop == NULL) {
1373  dix_req_failure(req, M0_ERR(-ENOMEM));
1374  return;
1375  }
1376  rc = dix_rop_ctx_init(req, rop, keys, indices);
1377  if (rc != 0) {
1379  return;
1380  }
1381  req->dr_rop = rop;
1383  rc = dix_cas_rops_alloc(req) ?:
1386  if (rc != 0) {
1387  dix_rop_ctx_fini(rop);
1389  } else if (dix_req_state(req) != DIXREQ_DEL_PHASE2)
1391  M0_LEAVE();
1392 }
1393 
1394 static void dix_rop(struct m0_dix_req *req)
1395 {
1396  M0_PRE(req->dr_indices_nr == 1);
1398  M0_PRE(req->dr_keys != NULL);
1399  M0_ENTRY();
1400 
1401  dix__rop(req, req->dr_keys, NULL);
1402  M0_LEAVE();
1403 }
1404 
1405 static void dix_item_rc_update(struct m0_dix_req *req,
1406  struct m0_cas_req *creq,
1407  uint64_t key_idx,
1408  struct m0_dix_item *ditem)
1409 {
1410  struct m0_cas_rec_reply rep;
1411  struct m0_cas_get_reply get_rep;
1412  int rc;
1413  enum dix_req_type rtype = req->dr_type;
1414 
1415  rc = m0_cas_req_generic_rc(creq);
1416  if (rc == 0) {
1417  switch (rtype) {
1418  case DIX_GET:
1419  m0_cas_get_rep(creq, key_idx, &get_rep);
1420  rc = get_rep.cge_rc;
1421  if (rc == 0) {
1422  ditem->dxi_val = get_rep.cge_val;
1423  /* Value will be freed at m0_dix_req_fini(). */
1424  m0_cas_rep_mlock(creq, key_idx);
1425  }
1426  break;
1427  case DIX_PUT:
1428  m0_cas_put_rep(creq, key_idx, &rep);
1429  rc = rep.crr_rc;
1430  break;
1431  case DIX_DEL:
1432  m0_cas_del_rep(creq, key_idx, &rep);
1433  /*
1434  * It is possible that repair process didn't copy
1435  * replica to spare disk yet. Ignore such an error.
1436  */
1438  rep.crr_rc == -ENOENT)
1439  rep.crr_rc = 0;
1440  rc = rep.crr_rc;
1441  break;
1442  default:
1443  M0_IMPOSSIBLE("Incorrect type %u", rtype);
1444  }
1445  }
1446  ditem->dxi_rc = rc;
1447 }
1448 
1450 {
1451  return item->dxi_rc != 0 && item->dxi_rc != -ENOENT;
1452 }
1453 
1454 static bool dix_item_parity_unit_is_last(const struct m0_dix_req *req,
1455  const struct m0_dix_item *item)
1456 {
1457  struct m0_pool_version *pver;
1458 
1459  pver = m0_dix_pver(req->dr_cli, &req->dr_indices[0]);
1460  return item->dxi_pg_unit == pver->pv_attr.pa_N + pver->pv_attr.pa_K - 1;
1461 }
1462 
1463 static void dix_get_req_resend(struct m0_dix_req *req)
1464 {
1465  struct m0_bufvec keys;
1466  uint64_t *indices;
1467  uint32_t keys_nr;
1468  uint32_t i;
1469  uint32_t k = 0;
1470  int rc;
1471  M0_ENTRY();
1472 
1473  keys_nr = m0_count(i, req->dr_items_nr,
1474  dix_item_get_has_failed(&req->dr_items[i]) &&
1475  !dix_item_parity_unit_is_last(req, &req->dr_items[i]));
1476  if (keys_nr == 0) {
1477  /*
1478  * Some records are not retrieved from both data and parity
1479  * units locations.
1480  */
1481  rc = M0_ERR(-EHOSTUNREACH);
1482  goto end;
1483  }
1484  rc = m0_bufvec_empty_alloc(&keys, keys_nr);
1485  M0_ALLOC_ARR(indices, keys_nr);
1486  if (rc != 0 || indices == NULL) {
1487  rc = rc ?: M0_ERR(-ENOMEM);
1488  goto free;
1489  }
1490  for (i = 0; i < req->dr_items_nr; i++) {
1491  if (!dix_item_get_has_failed(&req->dr_items[i]))
1492  continue;
1493  /*
1494  * Clear error code in order to update it successfully on
1495  * request completion. Otherwise, it wouldn't be overwritten
1496  * because it is possible that several CAS requests are sent
1497  * for one item and it holds error code for the first failed
1498  * request.
1499  */
1500  req->dr_items[i].dxi_rc = 0;
1501  req->dr_items[i].dxi_pg_unit++;
1502  keys.ov_vec.v_count[k] = req->dr_keys->ov_vec.v_count[i];
1503  keys.ov_buf[k] = req->dr_keys->ov_buf[i];
1504  indices[k] = i;
1505  k++;
1506  }
1507 
1508  dix__rop(req, &keys, indices);
1509 free:
1510  m0_bufvec_free2(&keys);
1511  m0_free(indices);
1512 end:
1513  if (rc != 0)
1515  M0_LEAVE();
1516 }
1517 
1518 static bool dix_del_phase2_is_needed(const struct m0_dix_rec_op *rec_op)
1519 {
1520  return m0_exists(i, rec_op->dgp_units_nr,
1521  rec_op->dgp_units[i].dpu_del_phase2);
1522 }
1523 
1525  struct m0_dix_rop_ctx **out)
1526 {
1527  struct m0_dix_rop_ctx *cur_rop = req->dr_rop;
1528  struct m0_dix_rop_ctx *out_rop;
1529  struct m0_dix_rec_op *src_rec_op;
1530  struct m0_dix_rec_op *dst_rec_op;
1531  struct m0_bufvec keys;
1532  uint64_t *indices;
1533  uint32_t keys_nr;
1534  uint32_t i;
1535  uint32_t j;
1536  uint32_t k = 0;
1537  int rc;
1538 
1539  keys_nr = m0_count(i, cur_rop->dg_rec_ops_nr,
1540  dix_del_phase2_is_needed(&cur_rop->dg_rec_ops[i]));
1541 
1542  if (keys_nr == 0)
1543  return 0;
1544 
1545  rc = m0_bufvec_empty_alloc(&keys, keys_nr);
1546  M0_ALLOC_ARR(indices, keys_nr);
1547  M0_ALLOC_PTR(out_rop);
1548  if (rc != 0 || indices == NULL || out_rop == NULL) {
1549  rc = M0_ERR(rc ?: -ENOMEM);
1550  goto free;
1551  }
1552  for (i = 0; i < cur_rop->dg_rec_ops_nr; i++) {
1553  if (!dix_del_phase2_is_needed(&cur_rop->dg_rec_ops[i]))
1554  continue;
1555  keys.ov_vec.v_count[k] = req->dr_keys->ov_vec.v_count[i];
1556  keys.ov_buf[k] = req->dr_keys->ov_buf[i];
1557  indices[k] = i;
1558  k++;
1559  }
1560 
1561  rc = dix_rop_ctx_init(req, out_rop, &keys, indices);
1562  if (rc != 0)
1563  goto free;
1564  k = 0;
1565  for (i = 0; i < cur_rop->dg_rec_ops_nr; i++) {
1566  src_rec_op = &cur_rop->dg_rec_ops[i];
1567  if (!dix_del_phase2_is_needed(src_rec_op))
1568  continue;
1569  dst_rec_op = &out_rop->dg_rec_ops[k++];
1570  M0_ASSERT(src_rec_op->dgp_units_nr == dst_rec_op->dgp_units_nr);
1571  for (j = 0; j < src_rec_op->dgp_units_nr; j++)
1572  dst_rec_op->dgp_units[j] = src_rec_op->dgp_units[j];
1573  }
1574 
1575 free:
1576  m0_bufvec_free2(&keys);
1577  m0_free(indices);
1578  if (rc == 0) {
1579  rc = keys_nr;
1580  *out = out_rop;
1581  } else
1582  m0_free(out_rop);
1583  return M0_RC(rc);
1584 }
1585 
1586 static void dix_rop_del_phase2(struct m0_dix_req *req)
1587 {
1588  int rc;
1589 
1591 
1592  rc = dix_cas_rops_alloc(req) ?:
1595 
1596  if (rc != 0) {
1597  dix_rop_ctx_fini(req->dr_rop);
1599  }
1600 }
1601 
1602 static void dix_cas_rop_rc_update(struct m0_dix_cas_rop *cas_rop, int rc)
1603 {
1604  struct m0_dix_req *req = cas_rop->crp_parent;
1605  struct m0_dix_item *ditem;
1606  uint64_t item_idx;
1607  uint32_t i;
1608 
1609  for (i = 0; i < cas_rop->crp_keys_nr; i++) {
1610  item_idx = cas_rop->crp_attrs[i].cra_item;
1611  ditem = &req->dr_items[item_idx];
1612  if (ditem->dxi_rc != 0)
1613  continue;
1614  if (rc == 0)
1615  dix_item_rc_update(req, &cas_rop->crp_creq, i, ditem);
1616  else
1617  ditem->dxi_rc = M0_ERR(rc);
1618  }
1619 }
1620 
1621 static void dix_rop_completed(struct m0_sm_group *grp, struct m0_sm_ast *ast)
1622 {
1623  struct m0_dix_req *req = ast->sa_datum;
1624  struct m0_dix_rop_ctx *rop = req->dr_rop;
1625  struct m0_dix_rop_ctx *rop_del_phase2 = NULL;
1626  bool del_phase2 = false;
1627  struct m0_dix_cas_rop *cas_rop;
1628 
1629  (void)grp;
1630  if (req->dr_type == DIX_NEXT)
1632  else {
1633  /*
1634  * Consider DIX request to be successful if there is at least
1635  * one successful CAS request.
1636  */
1637  if (m0_tl_forall(cas_rop, cas_rop,
1638  &rop->dg_cas_reqs,
1639  cas_rop->crp_creq.ccr_sm.sm_rc != 0))
1640  dix_cas_rop_rc_update(cas_rop_tlist_tail(
1641  &rop->dg_cas_reqs), 0);
1642 
1643  m0_tl_for (cas_rop, &rop->dg_cas_reqs, cas_rop) {
1644  if (cas_rop->crp_creq.ccr_sm.sm_rc == 0)
1645  dix_cas_rop_rc_update(cas_rop, 0);
1646  m0_cas_req_fini(&cas_rop->crp_creq);
1647  } m0_tl_endfor;
1648  }
1649 
1650  if (req->dr_type == DIX_DEL &&
1652  del_phase2 = dix_rop_del_phase2_rop(req, &rop_del_phase2) > 0;
1653 
1654  dix_rop_ctx_fini(rop);
1655  if (req->dr_type == DIX_GET &&
1656  m0_exists(i, req->dr_items_nr,
1657  dix_item_get_has_failed(&req->dr_items[i]))) {
1660  } else if (req->dr_type == DIX_DEL && del_phase2) {
1661  m0_free(rop);
1662  req->dr_rop = rop_del_phase2;
1664  } else {
1666  }
1667 }
1668 
1669 static void dix_rop_one_completed(struct m0_dix_cas_rop *crop)
1670 {
1671  struct m0_dix_req *dreq = crop->crp_parent;
1672  struct m0_dix_rop_ctx *rop;
1673 
1674  M0_ENTRY();
1675  M0_PRE(!dreq->dr_is_meta);
1676  M0_PRE(M0_IN(dreq->dr_type, (DIX_PUT, DIX_DEL)));
1677  M0_PRE(dreq->dr_dtx != NULL);
1678  M0_PRE(dix_req_smgrp(dreq) == dreq->dr_dtx->tx_dtx->dd_sm.sm_grp);
1679 
1680  rop = crop->crp_parent->dr_rop;
1681  dix_cas_rop_rc_update(crop, 0);
1682 
1683  m0_dtx0_executed(dreq->dr_dtx, crop->crp_pa_idx);
1684 
1685  if (rop->dg_completed_nr == rop->dg_cas_reqs_nr) {
1686  rop->dg_ast = (struct m0_sm_ast) {
1688  .sa_datum = dreq,
1689  };
1690  m0_sm_ast_post(dix_req_smgrp(dreq), &rop->dg_ast);
1691  }
1692 
1693  M0_LEAVE();
1694 }
1695 
1696 static bool dix_cas_rop_clink_cb(struct m0_clink *cl)
1697 {
1698  struct m0_dix_cas_rop *crop = container_of(cl, struct m0_dix_cas_rop,
1699  crp_clink);
1700  uint32_t state = crop->crp_creq.ccr_sm.sm_state;
1701  struct m0_dix_rop_ctx *rop;
1702  struct m0_dix_req *dreq;
1703 
1704  if (M0_IN(state, (CASREQ_FINAL, CASREQ_FAILURE))) {
1705  dreq = crop->crp_parent;
1706 
1707  /*
1708  * Update pending transaction number. Note: as
1709  * m0_cas_req::ccr_fop is set to NULL in cas_req_reply_handle()
1710  * we must get the returned remid before that.
1711  */
1712  if (dreq->dr_cli->dx_sync_rec_update != NULL)
1713  dreq->dr_cli->dx_sync_rec_update(
1714  dreq, crop->crp_creq.ccr_sess,
1715  &crop->crp_creq.ccr_remid);
1716 
1717 
1718  m0_clink_del(cl);
1719  m0_clink_fini(cl);
1720  rop = crop->crp_parent->dr_rop;
1721  rop->dg_completed_nr++;
1722  M0_PRE(rop->dg_completed_nr <= rop->dg_cas_reqs_nr);
1723 
1724  if (dreq->dr_dtx != NULL) {
1725  M0_ASSERT(dix_req_smgrp(dreq) ==
1726  dreq->dr_dtx->tx_dtx->dd_sm.sm_grp);
1727  dix_rop_one_completed(crop);
1728  } else {
1729  if (rop->dg_completed_nr == rop->dg_cas_reqs_nr) {
1730  rop->dg_ast = (struct m0_sm_ast) {
1732  .sa_datum = dreq,
1733  };
1735  &rop->dg_ast);
1736  }
1737  }
1738 
1739  }
1740  return true;
1741 }
1742 
1743 static int dix_cas_rops_send(struct m0_dix_req *req)
1744 {
1745  struct m0_pools_common *pc = req->dr_cli->dx_pc;
1746  struct m0_dix_rop_ctx *rop = req->dr_rop;
1747  struct m0_dix_cas_rop *cas_rop;
1748  struct m0_cas_req *creq;
1749  uint32_t sdev_idx;
1750  struct m0_cas_id cctg_id;
1751  struct m0_reqh_service_ctx *cas_svc;
1752  struct m0_dix_layout *layout = &req->dr_indices[0].dd_layout;
1753  int rc;
1754  M0_ENTRY("req=%p", req);
1755 
1756  M0_PRE(rop->dg_cas_reqs_nr == 0);
1757  m0_tl_for(cas_rop, &rop->dg_cas_reqs, cas_rop) {
1758  sdev_idx = cas_rop->crp_sdev_idx;
1759  creq = &cas_rop->crp_creq;
1760  cas_svc = pc->pc_dev2svc[sdev_idx].pds_ctx;
1761  M0_ASSERT(cas_svc->sc_type == M0_CST_CAS);
1762  m0_cas_req_init(creq, &cas_svc->sc_rlink.rlk_sess,
1763  dix_req_smgrp(req));
1764  dix_to_cas_map(req, creq);
1766  m0_clink_add(&creq->ccr_sm.sm_chan, &cas_rop->crp_clink);
1767  M0_ASSERT(req->dr_indices_nr == 1);
1768  m0_dix_fid_convert_dix2cctg(&req->dr_indices[0].dd_fid,
1769  &cctg_id.ci_fid, sdev_idx);
1770  M0_ASSERT(layout->dl_type == DIX_LTYPE_DESCR);
1771  cctg_id.ci_layout.dl_type = layout->dl_type;
1773  rc = m0_dix_ldesc_copy(&cctg_id.ci_layout.u.dl_desc,
1774  &layout->u.dl_desc);
1775  M0_LOG(M0_DEBUG, "Processing dix_req %p[%u] "FID_F
1776  " creq=%p "FID_F,
1777  req, req->dr_type,
1778  FID_P(&req->dr_indices[0].dd_fid),
1779  creq, FID_P(&cctg_id.ci_fid));
1780 
1781  switch (req->dr_type) {
1782  case DIX_GET:
1783  rc = m0_cas_get(creq, &cctg_id, &cas_rop->crp_keys);
1784  break;
1785  case DIX_PUT:
1786  rc = m0_cas_put(creq, &cctg_id, &cas_rop->crp_keys,
1787  &cas_rop->crp_vals, req->dr_dtx,
1788  cas_rop->crp_flags);
1789  break;
1790  case DIX_DEL:
1791  rc = m0_cas_del(creq, &cctg_id, &cas_rop->crp_keys,
1792  req->dr_dtx, cas_rop->crp_flags);
1793  break;
1794  case DIX_NEXT:
1795  rc = m0_cas_next(creq, &cctg_id, &cas_rop->crp_keys,
1796  req->dr_recs_nr,
1797  cas_rop->crp_flags | COF_SLANT);
1798  break;
1799  default:
1800  M0_IMPOSSIBLE("Unknown req type %u", req->dr_type);
1801  }
1802  m0_cas_id_fini(&cctg_id);
1803  if (rc != 0) {
1804  m0_clink_del(&cas_rop->crp_clink);
1805  m0_clink_fini(&cas_rop->crp_clink);
1806  m0_cas_req_fini(&cas_rop->crp_creq);
1807  dix_cas_rop_rc_update(cas_rop, rc);
1808  cas_rop_tlink_del_fini(cas_rop);
1809  dix_cas_rop_fini(cas_rop);
1810  m0_free(cas_rop);
1811  } else {
1812  if (req->dr_dtx != NULL) {
1813  m0_dtx0_fop_assign(req->dr_dtx,
1814  cas_rop->crp_pa_idx,
1815  creq->ccr_fop);
1816  }
1817  rop->dg_cas_reqs_nr++;
1818  }
1819  } m0_tl_endfor;
1820 
1821  M0_LOG(M0_DEBUG, "Processing dix_req %p rop=%p: dg_cas_reqs_nr=%"PRIu64,
1822  req, rop, rop->dg_cas_reqs_nr);
1823  if (rop->dg_cas_reqs_nr == 0)
1824  return M0_ERR(-EFAULT);
1825 
1826  if (req->dr_dtx != NULL) {
1827  rc = m0_dtx0_close(req->dr_dtx);
1828  if (rc != 0)
1829  return M0_ERR(rc);
1830  }
1831 
1832  return M0_RC(0);
1833 }
1834 
1835 static void dix_rop_tgt_iter_begin(const struct m0_dix_req *req,
1836  struct m0_dix_rec_op *rec_op)
1837 {
1838  if (req->dr_type == DIX_NEXT)
1839  rec_op->dgp_next_tgt = 0;
1840  else
1842 }
1843 
1844 static uint32_t dix_rop_tgt_iter_max(struct m0_dix_req *req,
1845  struct m0_dix_rec_op *rec_op)
1846 {
1847  struct m0_dix_layout_iter *iter = &rec_op->dgp_iter;
1848  enum dix_req_type type = req->dr_type;
1849 
1850  M0_ASSERT(M0_IN(type, (DIX_GET, DIX_PUT, DIX_DEL, DIX_NEXT)));
1851  if (type == DIX_NEXT)
1852  /*
1853  * NEXT operation should be sent to all devices, because the
1854  * distribution of keys over devices is unknown. Therefore, all
1855  * component catalogues should be queried and returned records
1856  * should be merge-sorted.
1857  */
1858  return m0_dix_liter_P(iter);
1859  else
1860  /* Skip spares when DTM0 is enabled */
1861  return ENABLE_DTM0 ?
1862  m0_dix_liter_N(iter) + m0_dix_liter_K(iter) :
1863  m0_dix_liter_N(iter) +
1864  m0_dix_liter_K(iter) +
1865  m0_dix_liter_S(iter);
1866 }
1867 
1868 static void dix_rop_tgt_iter_next(const struct m0_dix_req *req,
1869  struct m0_dix_rec_op *rec_op,
1870  uint64_t *target,
1871  bool *is_spare)
1872 {
1873  if (req->dr_type != DIX_NEXT) {
1875  rec_op->dgp_iter.dit_unit) == M0_PUT_SPARE;
1876  m0_dix_layout_iter_next(&rec_op->dgp_iter, target);
1877  } else {
1878  *target = rec_op->dgp_next_tgt++;
1879  *is_spare = false;
1880  }
1881 }
1882 
1883 static int dix_spare_slot_find(struct m0_poolmach_state *pm_state,
1884  uint64_t failed_tgt,
1885  uint32_t *spare_slot)
1886 {
1887  struct m0_pool_spare_usage *spare_usage_array;
1888  uint32_t i;
1889 
1890  spare_usage_array = pm_state->pst_spare_usage_array;
1891  for (i = 0; i < pm_state->pst_nr_spares; i++) {
1892  if (spare_usage_array[i].psu_device_index == failed_tgt) {
1893  *spare_slot = i;
1894  return 0;
1895  }
1896  }
1897  return M0_ERR_INFO(-ENOENT, "No spare slot found for target %"PRIu64,
1898  failed_tgt);
1899 }
1900 
1901 static struct m0_pool_version *dix_rec_op_pver(struct m0_dix_rec_op *rec_op)
1902 {
1903  return m0_pdl_to_layout(rec_op->dgp_iter.dit_linst.li_pl)->l_pver;
1904 }
1905 
1906 static uint32_t dix_rop_max_failures(struct m0_dix_rop_ctx *rop)
1907 {
1908  struct m0_pool_version *pver = rop->dg_pver;
1909 
1910  M0_ASSERT(pver != NULL);
1911  return pver->pv_mach.pm_state->pst_max_device_failures;
1912 }
1913 
1914 static uint32_t dix_rec_op_spare_offset(struct m0_dix_rec_op *rec_op)
1915 {
1916  return m0_dix_liter_spare_offset(&rec_op->dgp_iter);
1917 }
1918 
1919 static int dix__spare_target(struct m0_dix_rec_op *rec_op,
1920  const struct m0_dix_pg_unit *failed_unit,
1921  uint32_t *spare_slot,
1922  struct m0_dix_pg_unit **spare_unit,
1923  bool with_data)
1924 {
1925  struct m0_pool_version *pver;
1926  struct m0_poolmach_state *pm_state;
1927  struct m0_dix_pg_unit *spare;
1928  uint32_t slot;
1929  uint64_t spare_offset;
1930  uint64_t tgt;
1931  int rc;
1932 
1933  /*
1934  * Pool machine should be locked here. It is done in
1935  * dix_rop_units_set().
1936  */
1937  pver = dix_rec_op_pver(rec_op);
1938  pm_state = pver->pv_mach.pm_state;
1939  spare_offset = dix_rec_op_spare_offset(rec_op);
1940  M0_PRE(ergo(with_data, M0_IN(failed_unit->dpu_pd_state,
1942  tgt = failed_unit->dpu_tgt;
1943  do {
1944  rc = dix_spare_slot_find(pm_state, tgt, &slot);
1945  if (rc != 0)
1946  return M0_ERR(rc);
1947  spare = &rec_op->dgp_units[spare_offset + slot];
1948  if (!spare->dpu_failed) {
1949  /* Found non-failed spare unit, exit the loop. */
1950  *spare_unit = spare;
1951  *spare_slot = slot;
1952  return M0_RC(0);
1953  }
1954  if (with_data && M0_IN(spare->dpu_pd_state,
1956  /*
1957  * Spare unit with repaired data is requested, but some
1958  * spare unit in a chain is not repaired yet.
1959  */
1960  return M0_ERR(-ENODEV);
1961  }
1962  tgt = spare->dpu_tgt;
1963  } while (1);
1964 }
1965 
1966 static int dix_spare_target(struct m0_dix_rec_op *rec_op,
1967  const struct m0_dix_pg_unit *failed_unit,
1968  uint32_t *spare_slot,
1969  struct m0_dix_pg_unit **spare_unit)
1970 {
1971  return dix__spare_target(rec_op, failed_unit, spare_slot, spare_unit,
1972  false);
1973 }
1974 
1975 static int dix_spare_target_with_data(struct m0_dix_rec_op *rec_op,
1976  const struct m0_dix_pg_unit *failed_unit,
1977  uint32_t *spare_slot,
1978  struct m0_dix_pg_unit **spare_unit)
1979 {
1980  return dix__spare_target(rec_op, failed_unit, spare_slot, spare_unit,
1981  true);
1982 }
1983 
1985  struct m0_dix_rec_op *rec_op)
1986 {
1987  struct m0_dix_pg_unit *pgu;
1988  uint64_t start_unit;
1989  uint64_t i;
1990  uint64_t j;
1991 
1992  M0_ENTRY();
1993  M0_PRE(req->dr_type == DIX_GET);
1994  start_unit = req->dr_items[rec_op->dgp_item].dxi_pg_unit;
1995  M0_ASSERT(start_unit < dix_rec_op_spare_offset(rec_op));
1996  for (i = 0; i < start_unit; i++)
1997  rec_op->dgp_units[i].dpu_failed = true;
1998  for (i = start_unit; i < rec_op->dgp_units_nr; i++) {
1999  pgu = &rec_op->dgp_units[i];
2000  if (!pgu->dpu_is_spare && !pgu->dpu_failed)
2001  break;
2002  }
2003  for (j = i + 1; j < rec_op->dgp_units_nr; j++)
2004  rec_op->dgp_units[j].dpu_failed = true;
2005 }
2006 
2007 static void dix_pg_unit_pd_assign(struct m0_dix_pg_unit *pgu,
2008  struct m0_pooldev *pd)
2009 {
2010  pgu->dpu_tgt = pd->pd_index;
2011  pgu->dpu_sdev_idx = pd->pd_sdev_idx;
2012  pgu->dpu_pd_state = pd->pd_state;
2013  pgu->dpu_failed = pool_failed_devs_tlink_is_in(pd);
2014 }
2015 
2023  struct m0_dix_rec_op *rec_op,
2024  uint64_t unit)
2025 {
2026  struct m0_dix_pg_unit *pgu = &rec_op->dgp_units[unit];
2027  struct m0_dix_pg_unit *spare;
2028  uint32_t spare_offset;
2029  uint32_t spare_slot;
2030  int rc;
2031 
2032  M0_ENTRY();
2034  M0_PRE(pgu->dpu_failed);
2035  M0_PRE(M0_IN(pgu->dpu_pd_state, (M0_PNDS_FAILED,
2039  switch (req->dr_type) {
2040  case DIX_NEXT:
2041  /* Do nothing. */
2042  break;
2043  case DIX_GET:
2044  if (M0_IN(pgu->dpu_pd_state, (M0_PNDS_SNS_REPAIRED,
2046  rc = dix_spare_target_with_data(rec_op, pgu,
2047  &spare_slot, &spare);
2048  if (rc == 0) {
2049  spare->dpu_is_spare = false;
2050  break;
2051  }
2052  }
2053  break;
2054  case DIX_PUT:
2056  pgu->dpu_failed = false;
2057  rc = dix_spare_target(rec_op, pgu, &spare_slot, &spare);
2058  if (rc == 0) {
2059  spare_offset = dix_rec_op_spare_offset(rec_op);
2060  unit = spare_offset + spare_slot;
2061  rec_op->dgp_units[unit].dpu_is_spare = false;
2062  }
2063  break;
2064  case DIX_DEL:
2065  if (pgu->dpu_pd_state == M0_PNDS_FAILED)
2066  break;
2067  rc = dix_spare_target(rec_op, pgu, &spare_slot, &spare);
2068  if (rc != 0)
2069  break;
2070  spare_offset = dix_rec_op_spare_offset(rec_op);
2071  unit = spare_offset + spare_slot;
2072  if (pgu->dpu_pd_state == M0_PNDS_SNS_REPAIRED) {
2073  rec_op->dgp_units[unit].dpu_is_spare = false;
2074  } else if (pgu->dpu_pd_state == M0_PNDS_SNS_REPAIRING) {
2075  rec_op->dgp_units[unit].dpu_del_phase2 = true;
2076  } else if (pgu->dpu_pd_state == M0_PNDS_SNS_REBALANCING) {
2077  rec_op->dgp_units[unit].dpu_is_spare = false;
2078  pgu->dpu_del_phase2 = true;
2079  }
2080  break;
2081  default:
2082  M0_IMPOSSIBLE("Invalid request type %d", req->dr_type);
2083  }
2084  M0_LEAVE();
2085 }
2086 
2096 {
2097  struct m0_dix_rop_ctx *rop = req->dr_rop;
2098  struct m0_dix_rec_op *rec_op;
2099  struct m0_dix_pg_unit *unit;
2100  uint32_t i;
2101  uint32_t j;
2102 
2103  for (i = 0; i < rop->dg_rec_ops_nr; i++) {
2104  rec_op = &rop->dg_rec_ops[i];
2105  for (j = 0; j < rec_op->dgp_units_nr; j++) {
2106  unit = &rec_op->dgp_units[j];
2107  if (!unit->dpu_is_spare && unit->dpu_failed) {
2108  rec_op->dgp_failed_devs_nr++;
2109  dix_rop_failed_unit_tgt(req, rec_op, j);
2110  }
2111  }
2112  }
2113 }
2114 
2115 static void dix_rop_units_set(struct m0_dix_req *req)
2116 {
2117  struct m0_dix_rop_ctx *rop = req->dr_rop;
2118  struct m0_dix_rec_op *rec_op;
2119  struct m0_dix_pg_unit *unit;
2120  struct m0_pooldev *pd;
2121  struct m0_poolmach *pm = &rop->dg_pver->pv_mach;
2122  struct m0_pool *pool = rop->dg_pver->pv_pool;
2123  uint64_t tgt;
2124  uint32_t i;
2125  uint32_t j;
2126 
2128 
2129  /*
2130  * Determine destination devices for all records for all units as it
2131  * should be without failures in a pool.
2132  */
2133  for (i = 0; i < rop->dg_rec_ops_nr; i++) {
2134  rec_op = &rop->dg_rec_ops[i];
2135  dix_rop_tgt_iter_begin(req, rec_op);
2136  for (j = 0; j < rec_op->dgp_units_nr; j++) {
2137  unit = &rec_op->dgp_units[j];
2138  dix_rop_tgt_iter_next(req, rec_op, &tgt,
2139  &unit->dpu_is_spare);
2141  "We do not operate with spares in DTM0");
2142  pd = m0_dix_tgt2sdev(&rec_op->dgp_iter.dit_linst, tgt);
2143  dix_pg_unit_pd_assign(unit, pd);
2144  }
2145  }
2146 
2147  /*
2148  * Analyse failures in a pool and modify individual units state
2149  * in order to send CAS requests to proper destinations. Hold pool
2150  * machine lock to get consistent results.
2151  */
2152  if (pm->pm_pver->pv_is_dirty &&
2153  !pool_failed_devs_tlist_is_empty(&pool->po_failed_devices)) {
2154  if (ENABLE_DTM0)
2155  M0_IMPOSSIBLE("DTM0 can not operate when permanently"
2156  " failed devices exist.");
2157 
2159  }
2160 
2162 
2163  /*
2164  * Only one CAS GET request should be sent for every record.
2165  * Choose the best destination for every record.
2166  */
2167  if (req->dr_type == DIX_GET) {
2168  for (i = 0; i < rop->dg_rec_ops_nr; i++)
2170  }
2171 }
2172 
2173 static bool dix_pg_unit_skip(struct m0_dix_req *req,
2174  struct m0_dix_pg_unit *unit)
2175 {
2177  return unit->dpu_failed || unit->dpu_is_spare;
2178  else
2179  return !unit->dpu_del_phase2;
2180 }
2181 
2182 static int dix_cas_rops_alloc(struct m0_dix_req *req)
2183 {
2184  struct m0_dix_rop_ctx *rop = req->dr_rop;
2185  struct m0_dtx *dtx = req->dr_dtx;
2186  struct m0_pools_common *pc = req->dr_cli->dx_pc;
2187  struct m0_reqh_service_ctx *cas_svc;
2188  struct m0_dix_rec_op *rec_op;
2189  uint32_t i;
2190  uint32_t j;
2191  uint32_t max_failures;
2192  struct m0_dix_cas_rop **map = rop->dg_target_rop;
2193  struct m0_dix_cas_rop *cas_rop;
2194  struct m0_dix_pg_unit *unit;
2195  bool del_lock;
2196  int rc = 0;
2197 
2198  M0_ENTRY("req %p %u", req, rop->dg_rec_ops_nr);
2199  M0_ASSERT(rop->dg_rec_ops_nr > 0);
2200 
2201  max_failures = dix_rop_max_failures(rop);
2202  for (i = 0; i < rop->dg_rec_ops_nr; i++) {
2203  rec_op = &rop->dg_rec_ops[i];
2204  /*
2205  * If 2-phase delete is necessary, then CAS request should be
2206  * sent with COF_DEL_LOCK flag in order to prevent possible
2207  * concurrency issues with repair/re-balance process.
2208  */
2209  del_lock = (req->dr_type == DIX_DEL &&
2210  dix_del_phase2_is_needed(rec_op));
2211  if (rec_op->dgp_failed_devs_nr > max_failures) {
2212  req->dr_items[rec_op->dgp_item].dxi_rc = M0_ERR(-EIO);
2213  /* Skip this record operation. */
2214  continue;
2215  }
2216  for (j = 0; j < rec_op->dgp_units_nr; j++) {
2217  unit = &rec_op->dgp_units[j];
2218  if (dix_pg_unit_skip(req, unit))
2219  continue;
2220  if (map[unit->dpu_tgt] == NULL) {
2222  &cas_rop);
2223  if (rc != 0)
2224  goto end;
2225  map[unit->dpu_tgt] = cas_rop;
2226  }
2227  if (del_lock)
2228  map[unit->dpu_tgt]->crp_flags |= COF_DEL_LOCK;
2229  map[unit->dpu_tgt]->crp_keys_nr++;
2230  }
2231  }
2232 
2233  /* It is possible that all data units are not available. */
2234  if (cas_rop_tlist_is_empty(&rop->dg_cas_reqs))
2235  return M0_ERR(-EIO);
2236 
2237  if (dtx != NULL) {
2238  M0_ASSERT(!req->dr_is_meta);
2239  M0_ASSERT(M0_IN(req->dr_type, (DIX_PUT, DIX_DEL)));
2240  rc = m0_dtx0_open(dtx, cas_rop_tlist_length(&rop->dg_cas_reqs));
2241  if (rc != 0)
2242  goto end;
2243  }
2244 
2245  i = 0;
2246  m0_tl_for(cas_rop, &rop->dg_cas_reqs, cas_rop) {
2247  if (dtx != NULL) {
2248  cas_rop->crp_pa_idx = i++;
2249  cas_svc = pc->pc_dev2svc[cas_rop->crp_sdev_idx].pds_ctx;
2250  M0_ASSERT(cas_svc->sc_type == M0_CST_CAS);
2251  rc = m0_dtx0_fid_assign(dtx, cas_rop->crp_pa_idx,
2252  &cas_svc->sc_fid);
2253  if (rc != 0)
2254  goto end;
2255  }
2256  M0_ALLOC_ARR(cas_rop->crp_attrs, cas_rop->crp_keys_nr);
2257  if (cas_rop->crp_attrs == NULL) {
2258  rc = M0_ERR(-ENOMEM);
2259  goto end;
2260  }
2261  rc = m0_bufvec_empty_alloc(&cas_rop->crp_keys,
2262  cas_rop->crp_keys_nr);
2263  if (rc != 0)
2264  goto end;
2265  if (req->dr_type == DIX_PUT) {
2266  rc = m0_bufvec_empty_alloc(&cas_rop->crp_vals,
2267  cas_rop->crp_keys_nr);
2268  if (rc != 0)
2269  goto end;
2270  }
2271  cas_rop->crp_cur_key = 0;
2272  } m0_tl_endfor;
2273 
2274 end:
2275  if (rc != 0) {
2277  return M0_ERR(rc);
2278  }
2279  return M0_RC(0);
2280 }
2281 
2282 static int dix_cas_rops_fill(struct m0_dix_req *req)
2283 {
2284  struct m0_dix_rop_ctx *rop = req->dr_rop;
2285  struct m0_dix_cas_rop **map = rop->dg_target_rop;
2286  struct m0_dix_rec_op *rec_op;
2287  uint32_t j;
2288  uint32_t i;
2289  uint64_t tgt;
2290  uint64_t item;
2291  struct m0_bufvec *keys;
2292  struct m0_bufvec *vals;
2293  struct m0_buf *key;
2294  uint32_t idx;
2295  struct m0_dix_pg_unit *unit;
2296 
2297  M0_ENTRY("req %p", req);
2298  for (i = 0; i < rop->dg_rec_ops_nr; i++) {
2299  rec_op = &rop->dg_rec_ops[i];
2300  item = rec_op->dgp_item;
2301  for (j = 0; j < rec_op->dgp_units_nr; j++) {
2302  unit = &rec_op->dgp_units[j];
2303  tgt = unit->dpu_tgt;
2304  if (dix_pg_unit_skip(req, unit))
2305  continue;
2306  M0_ASSERT(map[tgt] != NULL);
2307  keys = &map[tgt]->crp_keys;
2308  vals = &map[tgt]->crp_vals;
2309  key = &rec_op->dgp_key;
2310  idx = map[tgt]->crp_cur_key;
2311  keys->ov_vec.v_count[idx] = key->b_nob;
2312  keys->ov_buf[idx] = key->b_addr;
2313  if (req->dr_type == DIX_PUT) {
2314  vals->ov_vec.v_count[idx] =
2315  req->dr_vals->ov_vec.v_count[item];
2316  vals->ov_buf[idx] =
2317  req->dr_vals->ov_buf[item];
2318  }
2319  map[tgt]->crp_attrs[idx].cra_item = item;
2320  map[tgt]->crp_cur_key++;
2321  }
2322  }
2323  return M0_RC(0);
2324 }
2325 
2326 M0_INTERNAL int m0_dix_put(struct m0_dix_req *req,
2327  const struct m0_dix *index,
2328  const struct m0_bufvec *keys,
2329  const struct m0_bufvec *vals,
2330  struct m0_dtx *dtx,
2331  uint32_t flags)
2332 {
2333  uint32_t keys_nr = keys->ov_vec.v_nr;
2334  int rc;
2335 
2336  M0_PRE(keys->ov_vec.v_nr == vals->ov_vec.v_nr);
2337  M0_PRE(keys_nr != 0);
2338  /* Only overwrite, crow, sync_wait and skip_layout flags are allowed. */
2340  COF_SKIP_LAYOUT)) == 0);
2342  if (rc != 0)
2343  return M0_ERR(rc);
2344  M0_ALLOC_ARR(req->dr_items, keys_nr);
2345  if (req->dr_items == NULL)
2346  return M0_ERR(-ENOMEM);
2347  req->dr_items_nr = keys_nr;
2348  req->dr_keys = keys;
2349  req->dr_vals = vals;
2350  req->dr_dtx = dtx;
2351  req->dr_type = DIX_PUT;
2352  req->dr_flags = flags;
2353  dix_discovery(req);
2354  return M0_RC(0);
2355 }
2356 
2357 M0_INTERNAL int m0_dix_get(struct m0_dix_req *req,
2358  const struct m0_dix *index,
2359  const struct m0_bufvec *keys)
2360 {
2361  uint32_t keys_nr = keys->ov_vec.v_nr;
2362  int rc;
2363 
2364  M0_PRE(keys_nr != 0);
2366  if (rc != 0)
2367  return M0_ERR(rc);
2368  M0_ALLOC_ARR(req->dr_items, keys_nr);
2369  if (req->dr_items == NULL)
2370  return M0_ERR(-ENOMEM);
2371  req->dr_items_nr = keys_nr;
2372  req->dr_keys = keys;
2373  req->dr_type = DIX_GET;
2374  dix_discovery(req);
2375  return M0_RC(0);
2376 }
2377 
2378 M0_INTERNAL void m0_dix_get_rep(const struct m0_dix_req *req,
2379  uint64_t idx,
2380  struct m0_dix_get_reply *rep)
2381 {
2382  M0_PRE(m0_dix_generic_rc(req) == 0);
2383  M0_PRE(idx < req->dr_items_nr);
2384  rep->dgr_rc = req->dr_items[idx].dxi_rc;
2385  rep->dgr_val = req->dr_items[idx].dxi_val;
2386 }
2387 
2388 M0_INTERNAL int m0_dix_del(struct m0_dix_req *req,
2389  const struct m0_dix *index,
2390  const struct m0_bufvec *keys,
2391  struct m0_dtx *dtx,
2392  uint32_t flags)
2393 {
2394  uint32_t keys_nr = keys->ov_vec.v_nr;
2395  int rc;
2396 
2397  M0_PRE(keys_nr != 0);
2398  /* Only sync_wait flag is allowed. */
2399  M0_PRE((flags & ~(COF_SYNC_WAIT)) == 0);
2401  if (rc != 0)
2402  return M0_ERR(rc);
2403  M0_ALLOC_ARR(req->dr_items, keys_nr);
2404  if (req->dr_items == NULL)
2405  return M0_ERR(-ENOMEM);
2406  req->dr_items_nr = keys_nr;
2407  req->dr_keys = keys;
2408  req->dr_dtx = dtx;
2409  req->dr_type = DIX_DEL;
2410  req->dr_flags = flags;
2411  dix_discovery(req);
2412  return M0_RC(0);
2413 }
2414 
2415 M0_INTERNAL int m0_dix_next(struct m0_dix_req *req,
2416  const struct m0_dix *index,
2417  const struct m0_bufvec *start_keys,
2418  const uint32_t *recs_nr,
2419  uint32_t flags)
2420 {
2421  uint32_t keys_nr = start_keys->ov_vec.v_nr;
2422  uint32_t i;
2423  int rc;
2424 
2425  /* Only slant and exclude start key flags are allowed. */
2427  M0_PRE(keys_nr != 0);
2428 
2430  if (rc != 0)
2431  return M0_ERR(rc);
2432  M0_ALLOC_ARR(req->dr_items, keys_nr);
2433  M0_ALLOC_ARR(req->dr_recs_nr, keys_nr);
2434  if (req->dr_items == NULL || req->dr_recs_nr == NULL)
2435  /*
2436  * Memory will be deallocated in m0_dix_req_fini() if necessary.
2437  */
2438  return M0_ERR(-ENOMEM);
2439  req->dr_items_nr = keys_nr;
2440  req->dr_keys = start_keys;
2441  req->dr_type = DIX_NEXT;
2442  req->dr_flags = flags;
2443  for (i = 0; i < keys_nr; i++)
2444  req->dr_recs_nr[i] = recs_nr[i];
2445  dix_discovery(req);
2446  return 0;
2447 }
2448 
2449 M0_INTERNAL void m0_dix_next_rep(const struct m0_dix_req *req,
2450  uint64_t key_idx,
2451  uint64_t val_idx,
2452  struct m0_dix_next_reply *rep)
2453 {
2454  const struct m0_dix_next_resultset *rs = &req->dr_rs;
2455  struct m0_dix_next_results *res;
2456  struct m0_cas_next_reply **reps;
2457 
2458  M0_ASSERT(rs != NULL);
2459  M0_ASSERT(key_idx < rs->nrs_res_nr);
2460  res = &rs->nrs_res[key_idx];
2461  reps = res->drs_reps;
2462  M0_ASSERT(val_idx < res->drs_pos);
2463  M0_ASSERT(reps[val_idx]->cnp_rc == 0);
2464  rep->dnr_key = reps[val_idx]->cnp_key;
2465  rep->dnr_val = reps[val_idx]->cnp_val;
2466 }
2467 
2468 M0_INTERNAL uint32_t m0_dix_next_rep_nr(const struct m0_dix_req *req,
2469  uint64_t key_idx)
2470 {
2471  M0_ASSERT(key_idx < req->dr_rs.nrs_res_nr);
2472  return req->dr_rs.nrs_res[key_idx].drs_pos;
2473 }
2474 
2475 M0_INTERNAL int m0_dix_item_rc(const struct m0_dix_req *req,
2476  uint64_t idx)
2477 {
2478  M0_PRE(m0_dix_generic_rc(req) == 0);
2479  M0_PRE(idx < m0_dix_req_nr(req));
2480  return req->dr_items[idx].dxi_rc;
2481 }
2482 
2483 M0_INTERNAL int m0_dix_generic_rc(const struct m0_dix_req *req)
2484 {
2486  return M0_RC(req->dr_sm.sm_rc);
2487 }
2488 
2489 M0_INTERNAL int m0_dix_req_rc(const struct m0_dix_req *req)
2490 {
2491  int rc;
2492  int i;
2493 
2495  if (rc == 0)
2496  for (i = 0; i < m0_dix_req_nr(req); i++) {
2497  rc = m0_dix_item_rc(req, i);
2498  if (rc != 0)
2499  break;
2500  }
2501  return M0_RC(rc);
2502 }
2503 
2504 M0_INTERNAL uint64_t m0_dix_req_nr(const struct m0_dix_req *req)
2505 {
2506  return req->dr_items_nr;
2507 }
2508 
2509 M0_INTERNAL void m0_dix_get_rep_mlock(struct m0_dix_req *req, uint64_t idx)
2510 {
2512  M0_PRE(req->dr_type == DIX_GET);
2513  M0_PRE(idx < req->dr_items_nr);
2514 
2515  req->dr_items[idx].dxi_key = M0_BUF_INIT0;
2516  req->dr_items[idx].dxi_val = M0_BUF_INIT0;
2517 }
2518 
2519 M0_INTERNAL void m0_dix_next_rep_mlock(struct m0_dix_req *req,
2520  uint32_t key_idx,
2521  uint32_t val_idx)
2522 {
2523  struct m0_dix_next_resultset *rs = &req->dr_rs;
2524  struct m0_dix_next_results *res;
2525  struct m0_cas_next_reply **reps;
2526 
2528  M0_PRE(req->dr_type == DIX_NEXT);
2529  M0_PRE(rs != NULL);
2530  M0_PRE(key_idx < rs->nrs_res_nr);
2531  res = &rs->nrs_res[key_idx];
2532  reps = res->drs_reps;
2533  M0_PRE(val_idx < res->drs_pos);
2534  reps[val_idx]->cnp_val = M0_BUF_INIT0;
2535  reps[val_idx]->cnp_key = M0_BUF_INIT0;
2536 }
2537 
2538 static void dix_item_fini(const struct m0_dix_req *req,
2539  struct m0_dix_item *item)
2540 {
2541  switch(req->dr_type){
2542  case DIX_NEXT:
2543  m0_buf_free(&item->dxi_key);
2544  /* Fall through. */
2545  case DIX_GET:
2546  m0_buf_free(&item->dxi_val);
2547  break;
2548  default:
2549  break;
2550  }
2551 }
2552 
2553 M0_INTERNAL void m0_dix_req_fini(struct m0_dix_req *req)
2554 {
2555  uint32_t i;
2556 
2558  for (i = 0; i < req->dr_indices_nr; i++)
2559  m0_dix_fini(&req->dr_indices[i]);
2560  m0_free(req->dr_indices);
2561  M0_ASSERT((req->dr_orig_indices != NULL) ==
2562  (req->dr_type == DIX_CREATE));
2563  if (req->dr_orig_indices != NULL) {
2564  for (i = 0; i < req->dr_indices_nr; i++)
2565  m0_dix_fini(&req->dr_orig_indices[i]);
2566  m0_free(req->dr_orig_indices);
2567  }
2568  for (i = 0; i < req->dr_items_nr; i++)
2569  dix_item_fini(req, &req->dr_items[i]);
2570  m0_free(req->dr_items);
2571  m0_free(req->dr_recs_nr);
2572  m0_free(req->dr_rop);
2573  m0_dix_rs_fini(&req->dr_rs);
2574  m0_sm_fini(&req->dr_sm);
2575 }
2576 
2577 M0_INTERNAL void m0_dix_req_fini_lock(struct m0_dix_req *req)
2578 {
2579  struct m0_sm_group *grp = dix_req_smgrp(req);
2580 
2585 }
2586 
2587 M0_INTERNAL int m0_dix_copy(struct m0_dix *dst, const struct m0_dix *src)
2588 {
2589  *dst = *src;
2590  if (src->dd_layout.dl_type == DIX_LTYPE_DESCR)
2591  return m0_dix_ldesc_copy(&dst->dd_layout.u.dl_desc,
2592  &src->dd_layout.u.dl_desc);
2593  return 0;
2594 }
2595 
2596 M0_INTERNAL int m0_dix_desc_set(struct m0_dix *dix,
2597  const struct m0_dix_ldesc *desc)
2598 {
2600  return m0_dix_ldesc_copy(&dix->dd_layout.u.dl_desc, desc);
2601 }
2602 
2603 M0_INTERNAL void m0_dix_fini(struct m0_dix *dix)
2604 {
2605  if (dix->dd_layout.dl_type == DIX_LTYPE_DESCR)
2606  m0_dix_ldesc_fini(&dix->dd_layout.u.dl_desc);
2607 }
2608 
2609 M0_INTERNAL int m0_dix_sm_conf_init(void)
2610 {
2615 }
2616 
2617 M0_INTERNAL void m0_dix_sm_conf_fini(void)
2618 {
2621 }
2622 
2623 
2624 #undef M0_TRACE_SUBSYSTEM
2625 
2628 /*
2629  * Local variables:
2630  * c-indentation-style: "K&R"
2631  * c-basic-offset: 8
2632  * tab-width: 8
2633  * fill-column: 80
2634  * scroll-step: 1
2635  * End:
2636  */
2637 /*
2638  * vim: tabstop=8 shiftwidth=8 noexpandtab textwidth=80 nowrap
2639  */
M0_INTERNAL void m0_cas_req_fini(struct m0_cas_req *req)
Definition: client.c:288
struct m0_poolmach_state * pm_state
Definition: pool_machine.h:169
M0_INTERNAL int m0_dix_layout_put(struct m0_dix_meta_req *req, const struct m0_fid *fid, const struct m0_dix_layout *dlay, uint32_t nr, uint32_t flags)
Definition: meta.c:574
static m0_bindex_t indices[ZEROVEC_UT_SEGS_NR]
Definition: zerovec.c:38
static void dix_layout_find_ast_cb(struct m0_sm_group *grp, struct m0_sm_ast *ast)
Definition: req.c:285
struct m0_be_tx_remid ccr_remid
Definition: client.h:166
struct m0_dtm0_dtx * tx_dtx
Definition: dtm.h:563
M0_INTERNAL int m0_dix_layout_get(struct m0_dix_meta_req *req, const struct m0_fid *fid, uint32_t nr)
Definition: meta.c:626
#define M0_PRE(cond)
M0_INTERNAL void m0_sm_conf_init(struct m0_sm_conf *conf)
Definition: sm.c:340
#define M0_ALLOC_ARR(arr, nr)
Definition: memory.h:84
struct m0_dix_layout dd_layout
Definition: req.h:112
Definition: dtm.h:554
M0_INTERNAL void m0_dix_get_rep(const struct m0_dix_req *req, uint64_t idx, struct m0_dix_get_reply *rep)
Definition: req.c:2378
M0_INTERNAL void m0_sm_fail(struct m0_sm *mach, int fail_state, int32_t rc)
Definition: sm.c:468
struct m0_dix_cli * dr_cli
Definition: req.h:192
struct m0_rpc_session * ccr_sess
Definition: client.h:130
static void dix_rop_del_phase2(struct m0_dix_req *req)
Definition: req.c:1586
static void dix_rop_failures_analyse(struct m0_dix_req *req)
Definition: req.c:2095
uint32_t dcd_idxop_reqs_nr
Definition: req_internal.h:81
struct m0_fid sc_fid
Definition: reqh_service.h:751
struct m0_dix_pg_unit * dgp_units
Definition: req_internal.h:153
M0_INTERNAL uint32_t m0_dix_liter_K(struct m0_dix_layout_iter *iter)
Definition: layout.c:280
int const char const void size_t int flags
Definition: dir.c:328
M0_INTERNAL int m0_dtx0_open(struct m0_dtx *dtx, uint32_t nr)
Definition: dtx.c:469
M0_INTERNAL int m0_sm_addb2_init(struct m0_sm_conf *conf, uint64_t id, uint64_t counter)
Definition: sm.c:846
struct m0_dix_req * ds_parent
Definition: req_internal.h:64
static int dix_idxop_reqs_send(struct m0_dix_req *req)
Definition: req.c:916
#define NULL
Definition: misc.h:38
uint32_t pst_nr_devices
Definition: pool_machine.h:108
static void dix_idxop_ctx_free(struct m0_dix_idxop_ctx *idxop)
Definition: req.c:480
M0_INTERNAL void m0_clink_init(struct m0_clink *link, m0_chan_cb_t cb)
Definition: chan.c:201
Definition: req.h:179
uint32_t dr_flags
Definition: req.h:257
static struct m0_bufvec dst
Definition: xform.c:61
M0_INTERNAL void m0_clink_del(struct m0_clink *link)
Definition: chan.c:267
map
Definition: processor.c:112
struct m0_sm_ast dcd_ast
Definition: req_internal.h:84
M0_INTERNAL void m0_clink_del_lock(struct m0_clink *link)
Definition: chan.c:293
struct m0_pool_version * l_pver
Definition: layout.h:261
struct m0_dix_layout_iter dgp_iter
Definition: req_internal.h:142
static void addb2_add_dix_req_attrs(const struct m0_dix_req *req)
Definition: req.c:1080
#define ergo(a, b)
Definition: misc.h:293
void(* sa_cb)(struct m0_sm_group *grp, struct m0_sm_ast *)
Definition: sm.h:506
M0_INTERNAL int m0_dix_desc_set(struct m0_dix *dix, const struct m0_dix_ldesc *desc)
Definition: req.c:2596
static void dix_get_req_resend(struct m0_dix_req *req)
Definition: req.c:1463
Definition: sm.h:350
struct m0_pool_version * pm_pver
Definition: pool_machine.h:172
static int dix_idxop_meta_update(struct m0_dix_req *req)
Definition: req.c:839
M0_INTERNAL struct m0_pool_version * m0_pool_version_find(struct m0_pools_common *pc, const struct m0_fid *id)
Definition: pool.c:586
static void dix_cas_rops_fini(struct m0_tl *cas_rops)
Definition: req.c:1292
static struct io_request req
Definition: file.c:100
static struct m0_sm_group * grp
Definition: bytecount.c:38
#define m0_count(var, nr,...)
Definition: misc.h:79
uint64_t m0_time_t
Definition: time.h:37
struct m0_poolmach pv_mach
Definition: pool.h:133
#define M0_LOG(level,...)
Definition: trace.h:167
M0_LEAVE()
dix_layout_type
Definition: layout.h:62
M0_INTERNAL void m0_sm_ast_post(struct m0_sm_group *grp, struct m0_sm_ast *ast)
Definition: sm.c:135
static int dix_id_layouts_nr(struct m0_dix_req *req)
Definition: req.c:267
static void dix_rop_one_completed(struct m0_dix_cas_rop *crop)
Definition: req.c:1669
struct m0_clink ds_clink
Definition: req_internal.h:66
static bool dix_req_is_idxop(const struct m0_dix_req *req)
Definition: req.c:165
struct m0_dix_linst dit_linst
Definition: layout.h:135
M0_INTERNAL int m0_dix_ldescr_rep_get(struct m0_dix_meta_req *req, uint64_t idx, struct m0_dix_ldesc *ldesc)
Definition: meta.c:530
static void create(void)
Definition: service_ut.c:546
M0_INTERNAL int m0_dix_next_result_prepare(struct m0_dix_req *req)
Definition: next_merge.c:252
Definition: cas.h:247
struct m0_vec ov_vec
Definition: vec.h:147
static enum m0_dix_req_state dix_req_state(const struct m0_dix_req *req)
Definition: req.c:235
M0_INTERNAL void m0_dix_mreq_init(struct m0_dix_req *req, struct m0_dix_cli *cli, struct m0_sm_group *grp)
Definition: req.c:221
M0_INTERNAL int m0_dix_delete(struct m0_dix_req *req, const struct m0_dix *indices, uint64_t indices_nr, struct m0_dtx *dtx, uint32_t flags)
Definition: req.c:1190
M0_INTERNAL void m0_dix_meta_req_init(struct m0_dix_meta_req *req, struct m0_dix_cli *cli, struct m0_sm_group *grp)
Definition: meta.c:259
void m0_dix_req_cancel(struct m0_dix_req *dreq)
Definition: req.c:1152
static void dix_layout_find(struct m0_dix_req *req)
Definition: req.c:383
M0_INTERNAL void m0_cas_id_fini(struct m0_cas_id *cid)
Definition: cas.c:198
M0_INTERNAL int m0_dix_req_rc(const struct m0_dix_req *req)
Definition: req.c:2489
dix_req_type
Definition: req.h:171
M0_INTERNAL void m0_cas_get_rep(const struct m0_cas_req *req, uint64_t idx, struct m0_cas_get_reply *rep)
Definition: client.c:1755
struct m0_dix_cas_req * dcr_creqs
Definition: req_internal.h:74
M0_INTERNAL int m0_dix_item_rc(const struct m0_dix_req *req, uint64_t idx)
Definition: req.c:2475
M0_INTERNAL int m0_dtx0_close(struct m0_dtx *dtx)
Definition: dtx.c:492
struct m0_dix_rop_ctx * dr_rop
Definition: req.h:244
struct m0_dix_layout ci_layout
Definition: cas.h:120
M0_INTERNAL int m0_dix_layout_iter_init(struct m0_dix_layout_iter *iter, const struct m0_fid *index, struct m0_layout_domain *ldom, struct m0_pool_version *pver, struct m0_dix_ldesc *ldesc, struct m0_buf *key)
Definition: layout.c:203
static struct m0_sm_group * dix_req_smgrp(const struct m0_dix_req *req)
Definition: req.c:170
uint32_t dgp_failed_devs_nr
Definition: req_internal.h:159
M0_INTERNAL const char * m0_sm_state_name(const struct m0_sm *mach, int state)
Definition: sm.c:781
M0_INTERNAL void m0_cas_index_create_rep(const struct m0_cas_req *req, uint64_t idx, struct m0_cas_rec_reply *rep)
Definition: client.c:1357
#define m0_exists(var, nr,...)
Definition: misc.h:134
uint32_t dpu_tgt
Definition: req_internal.h:124
#define M0_BITS(...)
Definition: misc.h:236
static bool is_spare(uint64_t alloc_flags)
Definition: balloc.c:1049
struct m0_pool_version * dcr_pver
Definition: req_internal.h:73
Definition: sm.h:504
static bool dix_item_should_be_sent(const struct m0_dix_req *req, uint32_t i)
Definition: req.c:909
#define container_of(ptr, type, member)
Definition: misc.h:33
static void dix_idxop_meta_update_ast_cb(struct m0_sm_group *grp, struct m0_sm_ast *ast)
Definition: req.c:767
#define M0_SET0(obj)
Definition: misc.h:64
M0_ADDB2_ADD(M0_AVI_FS_CREATE, new_fid.f_container, new_fid.f_key, mode, rc)
struct m0_dix_req dmr_req
Definition: meta.h:94
M0_INTERNAL bool m0_sm_addb2_counter_init(struct m0_sm *sm)
Definition: sm.c:891
void(* dx_sync_rec_update)(struct m0_dix_req *, struct m0_rpc_session *, struct m0_be_tx_remid *)
Definition: client.h:202
struct m0_pool * pv_pool
Definition: pool.h:128
bool pv_is_dirty
Definition: pool.h:116
static struct m0_rpc_item * item
Definition: item.c:56
struct m0_fop_getxattr_rep * rep
Definition: dir.c:455
M0_INTERNAL void m0_dix_rs_fini(struct m0_dix_next_resultset *rs)
Definition: next_merge.c:356
void ** ov_buf
Definition: vec.h:149
M0_INTERNAL int m0_cas_get(struct m0_cas_req *req, struct m0_cas_id *index, const struct m0_bufvec *keys)
Definition: client.c:1741
M0_INTERNAL void m0_dix_req_fini(struct m0_dix_req *req)
Definition: req.c:2553
M0_INTERNAL uint32_t m0_dix_liter_S(struct m0_dix_layout_iter *iter)
Definition: layout.c:285
static struct m0_pools_common pc
Definition: iter_ut.c:59
struct m0_buf cnp_val
Definition: client.h:223
M0_INTERNAL int m0_dix_copy(struct m0_dix *dst, const struct m0_dix *src)
Definition: req.c:2587
M0_INTERNAL void m0_dix_layout_iter_fini(struct m0_dix_layout_iter *iter)
Definition: layout.c:315
M0_INTERNAL int m0_dix_ldesc_copy(struct m0_dix_ldesc *dst, const struct m0_dix_ldesc *src)
Definition: layout.c:189
struct m0_sm dd_sm
Definition: dtx.h:61
#define m0_tl_endfor
Definition: tlist.h:700
M0_INTERNAL int m0_sm_timedwait(struct m0_sm *mach, uint64_t states, m0_time_t deadline)
Definition: sm.c:387
struct m0_pooldev * pst_devices_array
Definition: pool_machine.h:111
struct m0_cas_req crp_creq
Definition: req_internal.h:94
static void dix_rop_tgt_iter_next(const struct m0_dix_req *req, struct m0_dix_rec_op *rec_op, uint64_t *target, bool *is_spare)
Definition: req.c:1868
return M0_RC(rc)
struct m0_buf dxi_val
Definition: req_internal.h:48
static bool dix_layout_find_clink_cb(struct m0_clink *cl)
Definition: req.c:371
M0_INTERNAL uint64_t m0_dix_req_nr(const struct m0_dix_req *req)
Definition: req.c:2504
struct m0_sm ccr_sm
Definition: client.h:125
#define M0_ENTRY(...)
Definition: trace.h:170
Definition: buf.h:37
static uint32_t dix_rop_tgt_iter_max(struct m0_dix_req *req, struct m0_dix_rec_op *rec_op)
Definition: req.c:1844
static struct m0_sm_ast ast[NR]
Definition: locality.c:44
M0_INTERNAL void m0_sm_group_unlock(struct m0_sm_group *grp)
Definition: sm.c:96
static struct m0_pool_version * dix_rec_op_pver(struct m0_dix_rec_op *rec_op)
Definition: req.c:1901
uint32_t dpu_sdev_idx
Definition: req_internal.h:127
static bool dix_idxop_clink_cb(struct m0_clink *cl)
Definition: req.c:568
int i
Definition: dir.c:1033
#define M0_RC_INFO(rc, fmt,...)
Definition: trace.h:209
static uint32_t dix_rop_max_failures(struct m0_dix_rop_ctx *rop)
Definition: req.c:1906
#define PRIu64
Definition: types.h:58
static bool dix_item_get_has_failed(struct m0_dix_item *item)
Definition: req.c:1449
M0_INTERNAL int m0_dix_layout_rep_get(struct m0_dix_meta_req *req, uint64_t idx, struct m0_dix_layout *dlay)
Definition: meta.c:650
M0_INTERNAL int m0_dix_next(struct m0_dix_req *req, const struct m0_dix *index, const struct m0_bufvec *start_keys, const uint32_t *recs_nr, uint32_t flags)
Definition: req.c:2415
bool dr_is_meta
Definition: req.h:222
uint32_t pst_nr_spares
Definition: pool_machine.h:120
#define M0_ERR_INFO(rc, fmt,...)
Definition: trace.h:215
return M0_ERR(-EOPNOTSUPP)
void * sa_datum
Definition: sm.h:508
M0_INTERNAL int m0_cas_req_generic_rc(const struct m0_cas_req *req)
Definition: client.c:457
M0_INTERNAL void m0_cas_req_init(struct m0_cas_req *req, struct m0_rpc_session *sess, struct m0_sm_group *grp)
Definition: client.c:195
uint64_t dcd_completed_nr
Definition: req_internal.h:83
static int key
Definition: locality.c:283
struct m0_sm dr_sm
Definition: req.h:190
void m0_rpc_item_cancel(struct m0_rpc_item *item)
Definition: item.c:932
#define M0_AMB(obj, ptr, field)
Definition: misc.h:320
static void dix_online_unit_choose(struct m0_dix_req *req, struct m0_dix_rec_op *rec_op)
Definition: req.c:1984
uint32_t dg_rec_ops_nr
Definition: req_internal.h:164
#define m0_tl_teardown(name, head, obj)
Definition: tlist.h:708
M0_INTERNAL void m0_cas_put_rep(struct m0_cas_req *req, uint64_t idx, struct m0_cas_rec_reply *rep)
Definition: client.c:1685
Definition: cas.h:264
uint64_t dcr_creqs_nr
Definition: req_internal.h:75
#define ENABLE_DTM0
Definition: config.h:36
static void dix_req_state_set(struct m0_dix_req *req, enum m0_dix_req_state state)
Definition: req.c:240
M0_INTERNAL int m0_cas_next(struct m0_cas_req *req, struct m0_cas_id *index, struct m0_bufvec *start_keys, uint32_t *recs_nr, uint32_t flags)
Definition: client.c:1775
struct m0_fid dd_fid
Definition: req.h:111
#define m0_free0(pptr)
Definition: memory.h:77
#define M0_ASSERT(cond)
const char * scf_name
Definition: sm.h:352
static void dix_item_rc_update(struct m0_dix_req *req, struct m0_cas_req *creq, uint64_t key_idx, struct m0_dix_item *ditem)
Definition: req.c:1405
M0_INTERNAL int m0_dix_get(struct m0_dix_req *req, const struct m0_dix *index, const struct m0_bufvec *keys)
Definition: req.c:2357
static int dix_resolved_nr(struct m0_dix_req *req)
Definition: req.c:262
M0_INTERNAL uint32_t m0_dix_liter_spare_offset(struct m0_dix_layout_iter *iter)
Definition: layout.c:290
M0_INTERNAL void m0_dix_layout_iter_next(struct m0_dix_layout_iter *iter, uint64_t *tgt)
Definition: layout.c:249
Definition: req.h:185
uint32_t dcr_index_no
Definition: req_internal.h:72
static void dix_cas_rop_fini(struct m0_dix_cas_rop *cas_rop)
Definition: req.c:1284
m0_dix_req_state
Definition: req.h:94
static int dix_cas_rops_send(struct m0_dix_req *req)
Definition: req.c:1743
struct m0_fid pver
Definition: idx_dix.c:74
static int dix_cas_rops_fill(struct m0_dix_req *req)
Definition: req.c:2282
void m0_sm_state_set(struct m0_sm *mach, int state)
Definition: sm.c:478
m0_pool_nd_state
Definition: pool_machine.h:57
uint64_t dg_cas_reqs_nr
Definition: req_internal.h:169
M0_INTERNAL int m0_dix_ldescr_get(struct m0_dix_meta_req *req, const uint64_t *lid, uint32_t nr)
Definition: meta.c:506
uint64_t dgp_item
Definition: req_internal.h:152
struct m0_dix_idxop_ctx dr_idxop
Definition: req.h:242
Definition: tlist.h:251
M0_INTERNAL int m0_dix_meta_item_rc(const struct m0_dix_meta_req *req, uint64_t idx)
Definition: meta.c:314
struct m0_bufvec crp_keys
Definition: req_internal.h:101
M0_INTERNAL void m0_dix_req_lock(struct m0_dix_req *req)
Definition: req.c:184
M0_INTERNAL void m0_dix_req_init(struct m0_dix_req *req, struct m0_dix_cli *cli, struct m0_sm_group *grp)
Definition: req.c:228
M0_INTERNAL int m0_dix_req_wait(struct m0_dix_req *req, uint64_t states, m0_time_t to)
Definition: req.c:201
#define M0_BUF_INIT0
Definition: buf.h:71
static void dix_discovery(struct m0_dix_req *req)
Definition: req.c:1143
enum m0_conf_service_type sc_type
Definition: reqh_service.h:757
M0_INTERNAL int m0_dix_sm_conf_init(void)
Definition: req.c:2609
static bool dix_del_phase2_is_needed(const struct m0_dix_rec_op *rec_op)
Definition: req.c:1518
M0_INTERNAL int m0_cas_del(struct m0_cas_req *req, struct m0_cas_id *index, struct m0_bufvec *keys, struct m0_dtx *dtx, uint32_t flags)
Definition: client.c:1842
M0_INTERNAL uint32_t m0_dix_liter_unit_classify(struct m0_dix_layout_iter *iter, uint64_t unit)
Definition: layout.c:295
struct m0_sm_group * sm_grp
Definition: sm.h:321
M0_INTERNAL void m0_dix_meta_req_fini(struct m0_dix_meta_req *req)
Definition: meta.c:285
static void dix__rop(struct m0_dix_req *req, const struct m0_bufvec *keys, uint64_t *indices)
Definition: req.c:1356
uint32_t pd_sdev_idx
Definition: pool.h:437
static void dix_rop_failed_unit_tgt(struct m0_dix_req *req, struct m0_dix_rec_op *rec_op, uint64_t unit)
Definition: req.c:2022
#define M0_POST(cond)
M0_TL_DEFINE(cas_rop, M0_INTERNAL, struct m0_dix_cas_rop)
M0_INTERNAL void m0_sm_addb2_fini(struct m0_sm_conf *conf)
Definition: sm.c:870
struct m0_buf dgp_key
Definition: req_internal.h:144
struct m0_buf cge_val
Definition: client.h:207
static bool dix_item_parity_unit_is_last(const struct m0_dix_req *req, const struct m0_dix_item *item)
Definition: req.c:1454
struct m0_fop * ccr_fop
Definition: client.h:132
static int dix_type_layouts_nr(struct m0_dix_req *req, enum dix_layout_type type)
Definition: req.c:255
uint32_t dl_type
Definition: layout.h:100
uint32_t v_nr
Definition: vec.h:51
static void dix_cas_rop_rc_update(struct m0_dix_cas_rop *cas_rop, int rc)
Definition: req.c:1602
M0_INTERNAL void m0_dix_req_fini_lock(struct m0_dix_req *req)
Definition: req.c:2577
int32_t sm_rc
Definition: sm.h:336
struct m0_fid ci_fid
Definition: cas.h:113
M0_INTERNAL int m0_cas_index_create(struct m0_cas_req *req, const struct m0_cas_id *cids, uint64_t cids_nr, struct m0_dtx *dtx)
Definition: client.c:1321
struct m0_bufvec crp_vals
Definition: req_internal.h:102
struct m0_dix_cas_rop ** dg_target_rop
Definition: req_internal.h:166
struct m0_dix_idxop_req * dcd_idxop_reqs
Definition: req_internal.h:80
static struct m0_sm_trans_descr dix_req_trans[]
Definition: req.c:107
M0_INTERNAL void m0_buf_free(struct m0_buf *buf)
Definition: buf.c:55
static const struct m0_fid fids[]
Definition: diter.c:76
m0_bcount_t * v_count
Definition: vec.h:53
static void dix_rop_completed(struct m0_sm_group *grp, struct m0_sm_ast *ast)
Definition: req.c:1621
struct m0_pool_device_to_service * pc_dev2svc
Definition: pool.h:207
M0_INTERNAL int m0_dix_create(struct m0_dix_req *req, const struct m0_dix *indices, uint32_t indices_nr, struct m0_dtx *dtx, uint32_t flags)
Definition: req.c:990
union m0_dix_layout::@145 u
static int dix_rop_ctx_init(struct m0_dix_req *req, struct m0_dix_rop_ctx *rop, const struct m0_bufvec *keys, uint64_t *indices)
Definition: req.c:1302
struct m0_fid ld_pver
Definition: layout.h:78
#define FID_P(f)
Definition: fid.h:77
Definition: req.h:181
struct m0_layout_domain * dx_ldom
Definition: client.h:188
static bool dix_cas_rop_clink_cb(struct m0_clink *cl)
Definition: req.c:1696
uint64_t dg_completed_nr
Definition: req_internal.h:170
static int dix_spare_target_with_data(struct m0_dix_rec_op *rec_op, const struct m0_dix_pg_unit *failed_unit, uint32_t *spare_slot, struct m0_dix_pg_unit **spare_unit)
Definition: req.c:1975
static struct m0_pool pool
Definition: iter_ut.c:58
M0_INTERNAL void m0_dix_layout_iter_reset(struct m0_dix_layout_iter *iter)
Definition: layout.c:310
M0_INTERNAL int m0_cas_index_delete(struct m0_cas_req *req, const struct m0_cas_id *cids, uint64_t cids_nr, struct m0_dtx *dtx, uint32_t flags)
Definition: client.c:1366
M0_INTERNAL struct m0_layout * m0_pdl_to_layout(struct m0_pdclust_layout *pl)
Definition: pdclust.c:393
struct m0_reqh_service_ctx * pds_ctx
Definition: pool.h:74
M0_INTERNAL void m0_dix_ldesc_fini(struct m0_dix_ldesc *ld)
Definition: layout.c:197
bool dxi_del_phase2
Definition: req_internal.h:59
#define m0_forall(var, nr,...)
Definition: misc.h:112
void m0_clink_add_lock(struct m0_chan *chan, struct m0_clink *link)
Definition: chan.c:255
uint32_t sd_flags
Definition: sm.h:378
struct m0_clink crp_clink
Definition: req_internal.h:97
static int dix_rop_del_phase2_rop(struct m0_dix_req *req, struct m0_dix_rop_ctx **out)
Definition: req.c:1524
struct m0_dix_rec_op * dg_rec_ops
Definition: req_internal.h:163
struct m0_pdclust_tgt_addr tgt
Definition: fd.c:110
M0_INTERNAL void m0_cas_index_delete_rep(const struct m0_cas_req *req, uint64_t idx, struct m0_cas_rec_reply *rep)
Definition: client.c:1394
static void dix_rec_op_fini(struct m0_dix_rec_op *rec_op)
Definition: req.c:1260
static void dix_req_failure(struct m0_dix_req *req, int32_t rc)
Definition: req.c:249
M0_INTERNAL void m0_cas_del_rep(struct m0_cas_req *req, uint64_t idx, struct m0_cas_rec_reply *rep)
Definition: client.c:1873
M0_INTERNAL void m0_dtx0_executed(struct m0_dtx *dtx, uint32_t pa_idx)
Definition: dtx.c:498
uint64_t dit_unit
Definition: layout.h:141
uint64_t dcd_cas_reqs_nr
Definition: req_internal.h:82
static void dix_discovery_ast(struct m0_sm_group *grp, struct m0_sm_ast *ast)
Definition: req.c:1128
M0_INTERNAL int m0_dtx0_fid_assign(struct m0_dtx *dtx, uint32_t pa_idx, const struct m0_fid *pa_sfid)
Definition: dtx.c:475
static int dix_indices_copy(struct m0_dix **dst_indices, const struct m0_dix *src_indices, uint32_t indices_nr)
Definition: req.c:431
struct m0_sm_conf dix_req_sm_conf
Definition: req.c:135
void * dr_sync_datum
Definition: req.h:260
static void dix_idxop(struct m0_dix_req *req)
Definition: req.c:963
Definition: req.h:183
Definition: fid.h:38
struct m0_pool_version * dg_pver
Definition: req_internal.h:172
uint64_t dgp_units_nr
Definition: req_internal.h:154
M0_INTERNAL void m0_dtx0_fop_assign(struct m0_dtx *dtx, uint32_t pa_idx, const struct m0_fop *pa_fop)
Definition: dtx.c:483
M0_INTERNAL void m0_sm_init(struct m0_sm *mach, const struct m0_sm_conf *conf, uint32_t state, struct m0_sm_group *grp)
Definition: sm.c:313
struct m0_dix_crop_attrs * crp_attrs
Definition: req_internal.h:104
uint32_t psu_device_index
Definition: pool.h:413
#define M0_IS0(obj)
Definition: misc.h:70
M0_INTERNAL int m0_cas_index_lookup(struct m0_cas_req *req, const struct m0_cas_id *cids, uint64_t cids_nr)
Definition: client.c:1403
static int dix_req_indices_copy(struct m0_dix_req *req, const struct m0_dix *indices, uint32_t indices_nr)
Definition: req.c:460
M0_INTERNAL void m0_dix_sm_conf_fini(void)
Definition: req.c:2617
static int dix_spare_slot_find(struct m0_poolmach_state *pm_state, uint64_t failed_tgt, uint32_t *spare_slot)
Definition: req.c:1883
static int dix__spare_target(struct m0_dix_rec_op *rec_op, const struct m0_dix_pg_unit *failed_unit, uint32_t *spare_slot, struct m0_dix_pg_unit **spare_unit, bool with_data)
Definition: req.c:1919
#define M0_ALLOC_PTR(ptr)
Definition: memory.h:86
struct m0_chan sm_chan
Definition: sm.h:331
M0_INTERNAL void m0_clink_add(struct m0_chan *chan, struct m0_clink *link)
Definition: chan.c:228
struct m0_dtx * dr_dtx
Definition: req.h:240
static uint32_t dix_rec_op_spare_offset(struct m0_dix_rec_op *rec_op)
Definition: req.c:1914
static void dix_rop(struct m0_dix_req *req)
Definition: req.c:1394
static void dix_rop_tgt_iter_begin(const struct m0_dix_req *req, struct m0_dix_rec_op *rec_op)
Definition: req.c:1835
static void dix_req_init(struct m0_dix_req *req, struct m0_dix_cli *cli, struct m0_sm_group *grp, bool meta)
Definition: req.c:209
M0_INTERNAL int m0_dix_del(struct m0_dix_req *req, const struct m0_dix *index, const struct m0_bufvec *keys, struct m0_dtx *dtx, uint32_t flags)
Definition: req.c:2388
static void dix_ldescr_resolve(struct m0_dix_req *req)
Definition: req.c:1032
static void dix_discovery_completed(struct m0_dix_req *req)
Definition: req.c:1100
M0_INTERNAL int m0_dix_meta_generic_rc(const struct m0_dix_meta_req *req)
Definition: meta.c:309
static int dix_rec_op_init(struct m0_dix_rec_op *rec_op, struct m0_dix_req *req, struct m0_dix_cli *cli, struct m0_pool_version *pver, struct m0_dix *dix, struct m0_buf *key, uint64_t user_item)
Definition: req.c:1233
struct m0_sm_ast dg_ast
Definition: req_internal.h:173
M0_INTERNAL int m0_cas_put(struct m0_cas_req *req, struct m0_cas_id *index, const struct m0_bufvec *keys, const struct m0_bufvec *values, struct m0_dtx *dtx, uint32_t flags)
Definition: client.c:1643
static struct m0_sm_state_descr dix_req_states[]
Definition: req.c:50
uint32_t pa_P
Definition: pdclust.h:115
static void dix_to_cas_map(const struct m0_dix_req *dreq, const struct m0_cas_req *creq)
Definition: req.c:175
uint32_t id_nr
Definition: io_fops.h:280
M0_TL_DESCR_DEFINE(cas_rop, "cas record operations", M0_INTERNAL, struct m0_dix_cas_rop, crp_linkage, crp_magix, M0_DIX_ROP_MAGIC, M0_DIX_ROP_HEAD_MAGIC)
M0_INTERNAL uint32_t m0_dix_liter_N(struct m0_dix_layout_iter *iter)
Definition: layout.c:270
uint32_t crp_pa_idx
Definition: req_internal.h:112
static int dix_cas_rops_alloc(struct m0_dix_req *req)
Definition: req.c:2182
uint32_t crp_flags
Definition: req_internal.h:96
static int dix_cas_rop_alloc(struct m0_dix_req *req, uint32_t sdev, struct m0_dix_cas_rop **cas_rop)
Definition: req.c:1266
M0_INTERNAL void m0_clink_fini(struct m0_clink *link)
Definition: chan.c:208
uint32_t dgp_next_tgt
Definition: req_internal.h:150
static struct m0_fop * fop
Definition: item.c:57
M0_INTERNAL void m0_rwlock_read_lock(struct m0_rwlock *lock)
Definition: rwlock.c:52
M0_INTERNAL void m0_dix_req_unlock(struct m0_dix_req *req)
Definition: req.c:190
M0_INTERNAL int m0_dix_meta_req_nr(const struct m0_dix_meta_req *req)
Definition: meta.c:321
M0_INTERNAL void m0_sm_group_lock(struct m0_sm_group *grp)
Definition: sm.c:83
M0_INTERNAL void m0_dix_get_rep_mlock(struct m0_dix_req *req, uint64_t idx)
Definition: req.c:2509
static void dix_idxop_completed(struct m0_sm_group *grp, struct m0_sm_ast *ast)
Definition: req.c:528
struct m0_clink dr_clink
Definition: req.h:201
struct m0_pdclust_layout * li_pl
Definition: layout.h:115
#define M0_ASSERT_INFO(cond, fmt,...)
static void dix_idxop_item_rc_update(struct m0_dix_item *ditem, struct m0_dix_req *req, const struct m0_dix_cas_req *creq)
Definition: req.c:489
M0_INTERNAL void m0_cas_rep_mlock(const struct m0_cas_req *req, uint64_t idx)
Definition: client.c:1816
Definition: pool.h:80
M0_INTERNAL int m0_dix_layout_del(struct m0_dix_meta_req *req, const struct m0_fid *fid, uint32_t nr)
Definition: meta.c:603
M0_INTERNAL struct m0_pool_version * m0_dix_pver(const struct m0_dix_cli *cli, const struct m0_dix *dix)
Definition: client.c:309
uint32_t crp_keys_nr
Definition: req_internal.h:99
struct m0_tl dg_cas_reqs
Definition: req_internal.h:168
static void dix_rop_ctx_fini(struct m0_dix_rop_ctx *rop)
Definition: req.c:1343
struct m0_dix * dr_indices
Definition: req.h:214
struct m0_cas_req ds_creq
Definition: req_internal.h:65
struct m0_rwlock pm_lock
Definition: pool_machine.h:178
M0_INTERNAL void m0_rwlock_read_unlock(struct m0_rwlock *lock)
Definition: rwlock.c:57
static struct m0_pool_version * dix_pver_find(const struct m0_dix_req *req, const struct m0_fid *pver_fid)
Definition: req.c:474
struct m0_sm_ast dr_ast
Definition: req.h:246
enum dix_req_type dr_type
Definition: req.h:248
#define out(...)
Definition: gen.c:41
static void dix_item_fini(const struct m0_dix_req *req, struct m0_dix_item *item)
Definition: req.c:2538
int type
Definition: dir.c:1031
static void dix_to_mdix_map(const struct m0_dix_req *req, const struct m0_dix_meta_req *mreq)
Definition: req.c:277
M0_INTERNAL void m0_sm_conf_fini(struct m0_sm_conf *conf)
Definition: sm.c:376
M0_INTERNAL void m0_dix_next_rep_mlock(struct m0_dix_req *req, uint32_t key_idx, uint32_t val_idx)
Definition: req.c:2519
uint32_t crp_cur_key
Definition: req_internal.h:100
struct m0_rpc_link sc_rlink
Definition: reqh_service.h:759
struct m0_dix_next_results * nrs_res
Definition: req.h:166
static void dix_rop_units_set(struct m0_dix_req *req)
Definition: req.c:2115
struct m0_chan dmr_chan
Definition: meta.h:97
static int dix_unknown_layouts_nr(struct m0_dix_req *req)
Definition: req.c:272
M0_INTERNAL void m0_dix_next_rep(const struct m0_dix_req *req, uint64_t key_idx, uint64_t val_idx, struct m0_dix_next_reply *rep)
Definition: req.c:2449
M0_INTERNAL uint64_t m0_sm_id_get(const struct m0_sm *sm)
Definition: sm.c:1021
M0_INTERNAL bool m0_dix_req_is_locked(const struct m0_dix_req *req)
Definition: req.c:196
#define m0_tl_for(name, head, obj)
Definition: tlist.h:695
void m0_free(void *data)
Definition: memory.c:146
static bool dix_idxop_meta_update_clink_cb(struct m0_clink *cl)
Definition: req.c:820
M0_INTERNAL uint32_t m0_dix_next_rep_nr(const struct m0_dix_req *req, uint64_t key_idx)
Definition: req.c:2468
struct m0_rpc_item f_item
Definition: fop.h:83
static int dix_spare_target(struct m0_dix_rec_op *rec_op, const struct m0_dix_pg_unit *failed_unit, uint32_t *spare_slot, struct m0_dix_pg_unit **spare_unit)
Definition: req.c:1966
Definition: cas.h:107
#define M0_BUF_INIT(size, data)
Definition: buf.h:64
uint32_t sm_state
Definition: sm.h:307
struct m0_pdclust_attr pv_attr
Definition: pool.h:122
struct m0_pdclust_src_addr src
Definition: fd.c:108
M0_INTERNAL void m0_bufvec_free2(struct m0_bufvec *bufvec)
Definition: vec.c:401
static void dix_pg_unit_pd_assign(struct m0_dix_pg_unit *pgu, struct m0_pooldev *pd)
Definition: req.c:2007
M0_INTERNAL void m0_dix_fid_convert_dix2cctg(const struct m0_fid *dix_fid, struct m0_fid *cctg_fid, uint32_t device_id)
Definition: fid_convert.c:54
M0_INTERNAL uint32_t m0_dix_liter_P(struct m0_dix_layout_iter *iter)
Definition: layout.c:275
int32_t rc
Definition: trigger_fop.h:47
struct m0_pool_spare_usage * pst_spare_usage_array
Definition: pool_machine.h:137
static bool dix_pg_unit_skip(struct m0_dix_req *req, struct m0_dix_pg_unit *unit)
Definition: req.c:2173
#define ARRAY_SIZE(a)
Definition: misc.h:45
uint32_t pd_index
Definition: pool.h:432
uint32_t crp_sdev_idx
Definition: req_internal.h:98
M0_INTERNAL void m0_cas_index_lookup_rep(const struct m0_cas_req *req, uint64_t idx, struct m0_cas_rec_reply *rep)
Definition: client.c:1426
struct m0_buf cnp_key
Definition: client.h:219
M0_INTERNAL int m0_dix_cctgs_lookup(struct m0_dix_req *req, const struct m0_dix *indices, uint32_t indices_nr)
Definition: req.c:1214
M0_INTERNAL bool m0_sm_group_is_locked(const struct m0_sm_group *grp)
Definition: sm.c:107
static struct m0_sm_state_descr states[C_NR]
Definition: sm.c:512
struct m0_dix_req * crp_parent
Definition: req_internal.h:93
M0_INTERNAL int m0_dix_put(struct m0_dix_req *req, const struct m0_dix *index, const struct m0_bufvec *keys, const struct m0_bufvec *vals, struct m0_dtx *dtx, uint32_t flags)
Definition: req.c:2326
M0_INTERNAL struct m0_pooldev * m0_dix_tgt2sdev(struct m0_dix_linst *linst, uint64_t tgt)
Definition: layout.c:76
Definition: fop.h:79
#define FID_F
Definition: fid.h:75
M0_INTERNAL void m0_dix_fini(struct m0_dix *dix)
Definition: req.c:2603
Definition: vec.h:145
Definition: req.h:110
M0_INTERNAL int m0_bufvec_empty_alloc(struct m0_bufvec *bufvec, uint32_t num_segs)
Definition: vec.c:213
static int dix_idxop_pver_analyse(struct m0_dix_idxop_req *idxop_req, struct m0_dix_req *dreq, uint64_t *creqs_nr)
Definition: req.c:599
enum m0_pool_nd_state dpu_pd_state
Definition: req_internal.h:130
Definition: idx_mock.c:47
M0_INTERNAL int m0_dix_generic_rc(const struct m0_dix_req *req)
Definition: req.c:2483
#define m0_tl_forall(name, var, head,...)
Definition: tlist.h:735
static int dix_idxop_req_send(struct m0_dix_idxop_req *idxop_req, struct m0_dix_req *dreq, uint64_t *reqs_acc)
Definition: req.c:670
#define M0_IMPOSSIBLE(fmt,...)
M0_INTERNAL void m0_sm_fini(struct m0_sm *mach)
Definition: sm.c:331
struct m0_tl po_failed_devices
Definition: pool.h:93