Motr  M0
pool.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011-2020 Seagate Technology LLC and/or its Affiliates
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *
16  * For any questions about this software or licensing,
17  * please email opensource@seagate.com or cortx-questions@seagate.com.
18  *
19  */
20 
21 
22 #define M0_TRACE_SUBSYSTEM M0_TRACE_SUBSYS_POOL
23 #include "lib/trace.h"
24 
25 #include "lib/errno.h"
26 #include "lib/memory.h"
27 #include "lib/misc.h"
28 #include "lib/assert.h"
29 #include "lib/hash.h" /* m0_hash */
30 #include "lib/string.h" /* m0_streq */
31 #include "conf/confc.h" /* m0_confc_from_obj */
32 #include "conf/schema.h" /* M0_CST_IOS, M0_CST_MDS */
33 #include "conf/diter.h" /* m0_conf_diter_next_sync */
34 #include "conf/obj_ops.h" /* M0_CONF_DIRNEXT */
35 #include "conf/pvers.h" /* m0_conf_pver_find_by_fid */
36 #include "conf/cache.h" /* m0_conf_cache_contains */
37 #include "conf/helpers.h" /* m0_confc_root_open() */
38 #include "reqh/reqh_service.h" /* m0_reqh_service_ctx */
39 #include "reqh/reqh.h"
40 #include "rpc/rpc_machine.h" /* m0_rpc_machine_ep */
41 #include "ha/entrypoint.h" /* m0_ha_entrypoint_client */
42 #include "ha/ha.h" /* m0_ha */
43 #include "module/instance.h" /* m0 */
44 #include "pool/pool.h"
45 #include "pool/pool_fops.h"
46 #include "fd/fd.h" /* m0_fd_tile_build, m0_fd_tree_build */
47 #ifndef __KERNEL__
48 # include "motr/setup.h"
49 #else
50 # include "m0t1fs/linux_kernel/m0t1fs.h" /* m0t1fs_sb */
51 #endif
52 #include "lib/finject.h" /* M0_FI_ENABLED */
53 
186 enum {
191 };
192 
197 M0_TL_DESCR_DEFINE(pools_common_svc_ctx, "Service contexts", M0_INTERNAL,
198  struct m0_reqh_service_ctx, sc_link, sc_magic,
200 
201 M0_TL_DEFINE(pools_common_svc_ctx, M0_INTERNAL, struct m0_reqh_service_ctx);
202 
207 M0_TL_DESCR_DEFINE(pools, "pools", M0_INTERNAL,
208  struct m0_pool, po_linkage, po_magic,
210 
211 M0_TL_DEFINE(pools, M0_INTERNAL, struct m0_pool);
212 
213 M0_TL_DESCR_DEFINE(pool_version, "pool versions", M0_INTERNAL,
214  struct m0_pool_version, pv_linkage, pv_magic,
216 
217 M0_TL_DEFINE(pool_version, M0_INTERNAL, struct m0_pool_version);
218 
219 M0_TL_DESCR_DEFINE(pool_failed_devs, "pool failed devices", M0_INTERNAL,
220  struct m0_pooldev, pd_fail_linkage, pd_magic,
222 M0_TL_DEFINE(pool_failed_devs, M0_INTERNAL, struct m0_pooldev);
223 
224 static const struct m0_bob_type pver_bob = {
225  .bt_name = "m0_pool_version",
226  .bt_magix_offset = M0_MAGIX_OFFSET(struct m0_pool_version, pv_magic),
227  .bt_magix = M0_POOL_VERSION_MAGIC,
228  .bt_check = NULL
229 };
231 
234 static int pool_version_get_locked(struct m0_pools_common *pc,
235  const struct m0_fid *pool,
236  struct m0_pool_version **pv);
238  struct m0_layout_domain *ldom);
239 
240 static struct m0_pool *pool_find(struct m0_pools_common *pc,
241  const struct m0_fid *pool)
242 {
243  struct m0_pool *ret;
244 
245  M0_ENTRY("pool="FID_F, FID_P(pool));
246 
247  /*
248  * XXX TODO:
249  * Lock pools common before accessing/updating, at other places.
250  * Merge m0_pool_find() and pool_find().
251  */
252  ret = m0_tl_find(pools, p, &pc->pc_pools, m0_fid_eq(&p->po_id, pool));
253  M0_LEAVE("%sfound", ret == NULL ? "not " : "");
254  return ret;
255 }
256 
257 M0_INTERNAL struct m0_pool *m0_pool_find(struct m0_pools_common *pc,
258  const struct m0_fid *pool)
259 {
260  /*
261  * XXX TODO:
262  * Enable this assert once the pools common locking is fixed in the
263  * pools common update paths.
264  */
265  /*M0_PRE(m0_pools_is_locked(pc));*/
266 
267  return pool_find(pc, pool);
268 }
269 
270 static void pool__layouts_evict(struct m0_pool *pool,
271  struct m0_layout_domain *ldom);
272 
273 M0_INTERNAL int m0_pools_init(void)
274 {
275 
276 #ifndef __KERNEL__
277  return m0_poolmach_fop_init();
278 #else
279  return 0;
280 #endif
281 }
282 
283 M0_INTERNAL void m0_pools_fini(void)
284 {
285 #ifndef __KERNEL__
287 #endif
288 }
289 
290 M0_INTERNAL const char *m0_pool_dev_state_to_str(enum m0_pool_nd_state state)
291 {
292  static const char *names[M0_PNDS_NR] = {
293  [M0_PNDS_UNKNOWN] = "unknown",
294  [M0_PNDS_ONLINE] = "online",
295  [M0_PNDS_FAILED] = "failed",
296  [M0_PNDS_OFFLINE] = "offline",
297  [M0_PNDS_SNS_REPAIRING] = "repairing",
298  [M0_PNDS_SNS_REPAIRED] = "repaired",
299  [M0_PNDS_SNS_REBALANCING] = "rebalancing"
300  };
301 
302  M0_PRE(IS_IN_ARRAY(state, names));
303  M0_ASSERT(m0_forall(i, ARRAY_SIZE(names), names[i] != NULL));
304  return names[state];
305 }
306 
307 M0_INTERNAL int m0_pool_init(struct m0_pool *pool, const struct m0_fid *id,
308  enum m0_pver_policy_code pver_policy)
309 {
310  struct m0_pver_policy_type *ppt;
311 
312  M0_ENTRY();
313  pool->po_id = *id;
314  pools_tlink_init(pool);
315  pool_version_tlist_init(&pool->po_vers);
316  pool_failed_devs_tlist_init(&pool->po_failed_devices);
317  ppt = m0_pver_policy_type_find(pver_policy);
318  M0_ASSERT(ppt != NULL);
319  return M0_RC(ppt->ppt_ops->ppto_create(&pool->po_pver_policy));
320 }
321 
322 M0_INTERNAL void m0_pool_fini(struct m0_pool *pool)
323 {
324  pools_tlink_fini(pool);
325  pool_version_tlist_fini(&pool->po_vers);
326  pool_failed_devs_tlist_fini(&pool->po_failed_devices);
328 }
329 
330 static bool pools_common_invariant(const struct m0_pools_common *pc)
331 {
332  return _0C(pc != NULL) && _0C(pc->pc_confc != NULL);
333 }
334 
335 static bool pool_version_invariant(const struct m0_pool_version *pv)
336 {
337  return _0C(pv != NULL) && _0C(m0_pool_version_bob_check(pv)) &&
338  _0C(m0_fid_is_set(&pv->pv_id)) && _0C(pv->pv_pool != NULL);
339 }
340 
341 static bool obj_is_service(const struct m0_conf_obj *obj)
342 {
344 }
345 
346 static bool is_mds(const struct m0_conf_obj *obj)
347 {
348  return obj_is_service(obj) &&
349  M0_CONF_CAST(obj, m0_conf_service)->cs_type == M0_CST_MDS;
350 }
351 
356 static int mds_map_fill(struct m0_pools_common *pc,
357  const struct m0_conf_enclosure *encl)
358 {
359  struct m0_conf_diter it;
360  struct m0_reqh_service_ctx *ctx;
361  struct m0_conf_service *svc;
362  uint64_t idx = 0;
363  int rc;
364 
365  M0_ENTRY("encl="FID_F" node="FID_F, FID_P(&encl->ce_obj.co_id),
366  FID_P(&encl->ce_node->cn_obj.co_id));
367 
369  M0_CONF_NODE_PROCESSES_FID,
370  M0_CONF_PROCESS_SERVICES_FID);
371  if (rc != 0)
372  return M0_ERR(rc);
373 
376  ctx = m0_tl_find(pools_common_svc_ctx, ctx, &pc->pc_svc_ctxs,
377  m0_fid_eq(&svc->cs_obj.co_id, &ctx->sc_fid));
378  pc->pc_mds_map[idx++] = ctx;
379  M0_LOG(M0_DEBUG, "mds index:%" PRIu64 ", no. of mds:%"PRIu64,
380  idx, pc->pc_nr_svcs[M0_CST_MDS]);
381  M0_ASSERT(idx <= pc->pc_nr_svcs[M0_CST_MDS]);
382  }
384  return M0_RC(rc);
385 }
386 
387 static bool obj_is_enclosurev(const struct m0_conf_obj *obj)
388 {
389  return m0_conf_obj_type(obj) == &M0_CONF_OBJV_TYPE &&
392 }
393 
395 {
396  struct m0_conf_obj *mdpool;
397  struct m0_conf_objv *objv;
398  struct m0_conf_root *root;
399  struct m0_conf_diter it;
400  int rc;
401 
402  M0_ENTRY();
403  M0_PRE(!pools_common_svc_ctx_tlist_is_empty(&pc->pc_svc_ctxs));
404  M0_PRE(pc->pc_mds_map != NULL);
405 
407  rc = m0_confc_open_sync(&mdpool, &root->rt_obj,
408  M0_CONF_ROOT_POOLS_FID, root->rt_mdpool);
409  if (rc != 0)
410  return M0_ERR(rc);
411 
412  rc = m0_conf_diter_init(&it, pc->pc_confc, mdpool,
413  M0_CONF_POOL_PVERS_FID,
414  M0_CONF_PVER_SITEVS_FID,
415  M0_CONF_SITEV_RACKVS_FID,
416  M0_CONF_RACKV_ENCLVS_FID);
417  if (rc != 0)
418  goto end;
419 
421  M0_CONF_DIRNEXT) {
423  /*
424  * XXX BUG: mds_map_fill() overwrites pc->pc_mds_map[].
425  *
426  * The bug will bite on cluster configurations that have
427  * several enclosure-v objects in the MD pool subtree.
428  */
431  if (rc != 0)
432  break;
433  }
435 end:
436  m0_confc_close(mdpool);
437  return M0_RC(rc);
438 }
439 
440 static bool obj_is_ios_cas_diskv(const struct m0_conf_obj *obj)
441 {
442  return m0_conf_obj_type(obj) == &M0_CONF_OBJV_TYPE &&
444  M0_BITS(M0_CST_IOS, M0_CST_CAS));
445 }
446 
448  struct m0_conf_pver *pver,
449  struct m0_pools_common *pc)
450 {
451  struct m0_conf_diter it;
452  struct m0_conf_drive *disk;
453  struct m0_conf_sdev *sdev;
454  struct m0_conf_service *svc;
455  struct m0_reqh_service_ctx *ctx;
456  struct m0_pool_device_to_service *dev;
457  uint32_t nr_sdevs = 0;
458  int rc;
459 
460  M0_ENTRY();
461  M0_PRE(pc != NULL && pc->pc_dev2svc != NULL);
462  M0_PRE(!pools_common_svc_ctx_tlist_is_empty(&pc->pc_svc_ctxs));
463 
464  rc = m0_conf_diter_init(&it, pc->pc_confc, &pver->pv_obj,
465  M0_CONF_PVER_SITEVS_FID,
466  M0_CONF_SITEV_RACKVS_FID,
467  M0_CONF_RACKV_ENCLVS_FID,
468  M0_CONF_ENCLV_CTRLVS_FID,
469  M0_CONF_CTRLV_DRIVEVS_FID);
470  if (rc != 0)
471  return M0_ERR(rc);
472  /*
473  * XXX TODO: Replace m0_conf_diter_next_sync() with
474  * m0_conf_diter_next().
475  */
477  M0_CONF_DIRNEXT) {
478  /*
479  * Assign helper pointers.
480  */
482  m0_conf_objv)->cv_real,
483  m0_conf_drive);
484  sdev = disk->ck_sdev;
487  /*
488  * Find a m0_reqh_service_ctx that corresponds to `svc'.
489  */
490  ctx = m0_tl_find(pools_common_svc_ctx, ctx, &pc->pc_svc_ctxs,
491  m0_fid_eq(&ctx->sc_fid, &svc->cs_obj.co_id) &&
492  ctx->sc_type == svc->cs_type);
493  M0_ASSERT(ctx != NULL);
494 
495  M0_LOG(M0_DEBUG, "dev_idx=%d service="FID_F" disk="FID_F
496  " sdev_fid="FID_F, sdev->sd_dev_idx,
497  FID_P(&svc->cs_obj.co_id), FID_P(&disk->ck_obj.co_id),
498  FID_P(&sdev->sd_obj.co_id));
500  /*
501  * Set "(m0_reqh_service_ctx, sdev_fid)" tuple, associated
502  * with this dev_idx, or make sure it is set to correct
503  * values.
504  */
505  dev = &pc->pc_dev2svc[sdev->sd_dev_idx];
506  if (dev->pds_ctx == NULL) {
507  dev->pds_sdev_fid = sdev->sd_obj.co_id;
508  dev->pds_ctx = ctx;
509  } else {
511  &sdev->sd_obj.co_id));
512  M0_ASSERT(dev->pds_ctx == ctx);
513  }
514  M0_CNT_INC(nr_sdevs);
515  }
516 
518  M0_POST(nr_sdevs <= pc->pc_nr_devices && nr_sdevs == pv->pv_attr.pa_P);
519  return M0_RC(rc);
520 }
521 
522 M0_INTERNAL int m0_pool_version_init(struct m0_pool_version *pv,
523  const struct m0_fid *id,
524  struct m0_pool *pool,
525  uint32_t pool_width,
526  uint32_t nr_nodes,
527  uint32_t nr_data,
528  uint32_t nr_failures,
529  uint32_t nr_spare)
530 {
531  int rc;
532 
533  M0_ENTRY("pver id:"FID_F"N:%d K:%d S:%d P:%d", FID_P(id), nr_data,
534  nr_failures, nr_spare, pool_width);
535  pv->pv_id = *id;
536  pv->pv_attr.pa_N = nr_data;
537  pv->pv_attr.pa_K = nr_failures;
538  pv->pv_attr.pa_S = nr_spare;
539  pv->pv_attr.pa_P = pool_width;
540  pv->pv_pool = pool;
541  pv->pv_nr_nodes = nr_nodes;
542 
546  m0_pool_version_bob_init(pv);
547  pool_version_tlink_init(pv);
548  pv->pv_is_dirty = false;
549  pv->pv_is_stale = false;
550 
552  return M0_RC(rc);
553 }
554 
555 M0_INTERNAL struct m0_pool_version *
557 {
558  struct m0_pool_version *pver;
559 
560  m0_tl_for (pool_version, &pool->po_vers, pver) {
561  if (!pver->pv_is_dirty)
562  return pver;
563  } m0_tl_endfor;
564  return NULL;
565 }
566 
567 M0_INTERNAL struct m0_pool_version *
569  const struct m0_fid *id)
570 {
571  struct m0_pool *pool;
572  struct m0_pool_version *pver;
573 
574  M0_ENTRY(FID_F, FID_P(id));
575 
576  m0_tl_for (pools, &pc->pc_pools, pool) {
577  pver = m0_tl_find(pool_version, pv, &pool->po_vers,
578  m0_fid_eq(&pv->pv_id, id));
579  if (pver != NULL)
580  return pver;
581  } m0_tl_endfor;
582  return NULL;
583 }
584 
585 M0_INTERNAL struct m0_pool_version *
586 m0_pool_version_find(struct m0_pools_common *pc, const struct m0_fid *id)
587 {
588  struct m0_pool_version *pv;
589  struct m0_conf_root *root;
590  struct m0_conf_pver *pver = NULL;
591  int rc = 0;
592 
593  M0_ENTRY(FID_F, FID_P(id));
594  M0_PRE(pc != NULL);
595 
598  if (pv != NULL)
599  goto end;
600 
602  if (rc != 0) {
603  M0_LOG(M0_ERROR, "Failed to open conf-root object: rc=%d", rc);
604  goto end;
605  }
609  end:
611  if (rc != 0)
612  pv = NULL;
613  M0_LEAVE("pv=%p", pv);
614  return pv;
615 }
616 
617 static bool is_md_pool(const struct m0_pools_common *pc,
618  const struct m0_pool *pool)
619 {
620  return (pc->pc_md_pool != NULL &&
622 }
623 
624 static bool is_dix_pool(const struct m0_pools_common *pc,
625  const struct m0_pool *pool)
626 {
627  return (pc->pc_dix_pool != NULL &&
629 }
630 
632  const struct m0_fid *hint,
633  struct m0_pool_version **pv)
634 {
635  int rc = -ENOENT;
636  struct m0_pool *pool;
637 
638  M0_ENTRY();
640 
641  if (pv == NULL)
642  return M0_RC(-EINVAL);
643 
644  m0_tl_for(pools, &pc->pc_pools, pool) {
645  if (is_md_pool(pc, pool) || is_dix_pool(pc, pool))
646  continue;
647  if (hint != NULL && !m0_fid_eq(&pool->po_id, hint))
648  continue;
650  if (rc == 0 || hint != NULL)
651  break;
652  /*
653  * Couldn't find pver in this pool.
654  * No worries, let's try another one.
655  */
656  } m0_tl_endfor;
657 
658  return M0_RC(rc);
659 }
660 
661 M0_INTERNAL int
663  const struct m0_fid *pool,
664  struct m0_pool_version **pv)
665 {
666  int rc;
667 
668  M0_ENTRY();
672  return M0_RC(rc);
673 }
674 
676  struct m0_pool_version **pv)
677 {
678  M0_ENTRY();
680 
681  if (pv == NULL)
682  return M0_ERR(-EINVAL);
683 
685  if (*pv != NULL)
686  return M0_RC(0);
687  else
688  return M0_ERR(-ENOENT);
689 
690  /*
691  * @todo: Enable this logic once multiple DIX pvers available.
692  *
693  * m0_tl_for(pools, &pc->pc_pools, pool) {
694  * if (is_dix_pool(pc, pool)) {
695  * *pv = m0_pool_clean_pver_find(pool);
696  * if (*pv != NULL)
697  * return M0_RC(0);
698  * }
699  *} m0_tl_endfor;
700  */
701 }
702 
703 M0_INTERNAL int
705  struct m0_pool_version **pv)
706 {
707  int rc;
708 
709  M0_ENTRY();
713  return M0_RC(rc);
714 }
715 
716 static int _nodes_count(struct m0_conf_pver *pver, uint32_t *nodes)
717 {
718  struct m0_conf_diter it;
719  struct m0_confc *confc;
720  uint32_t nr_nodes = 0;
721  int rc;
722 
723  confc = m0_confc_from_obj(&pver->pv_obj);
724  rc = m0_conf_diter_init(&it, confc, &pver->pv_obj,
725  M0_CONF_PVER_SITEVS_FID,
726  M0_CONF_SITEV_RACKVS_FID,
727  M0_CONF_RACKV_ENCLVS_FID,
728  M0_CONF_ENCLV_CTRLVS_FID);
729  if (rc != 0)
730  return M0_ERR(rc);
731 
732  /*
733  * XXX TODO: Replace m0_conf_diter_next_sync() with
734  * m0_conf_diter_next().
735  */
737  M0_CONF_DIRNEXT) {
738  /* We filter only enclosurev objects. */
739  M0_CNT_INC(nr_nodes);
740  }
741 
742  *nodes = nr_nodes;
744 
745  return M0_RC(rc);
746 }
747 
749  struct m0_conf_pver *pver,
750  struct m0_pool *pool,
751  struct m0_pools_common *pc)
752 {
753  uint32_t nodes = 0;
754  uint32_t failure_level;
755  int rc;
756 
757  M0_ENTRY();
758  M0_PRE(pv != NULL && pver != NULL && pool != NULL && pc != NULL);
759 
760  rc = _nodes_count(pver, &nodes);
761  if (rc != 0)
762  return M0_ERR(rc);
763  rc = m0_pool_version_init(pv, &pver->pv_obj.co_id, pool,
764  pver->pv_u.subtree.pvs_attr.pa_P, nodes,
765  pver->pv_u.subtree.pvs_attr.pa_N,
766  pver->pv_u.subtree.pvs_attr.pa_K,
767  pver->pv_u.subtree.pvs_attr.pa_S) ?:
771  if (rc == 0) {
772  pv->pv_pc = pc;
773  pv->pv_mach.pm_pver = pv;
774  memcpy(pv->pv_fd_tol_vec, pver->pv_u.subtree.pvs_tolerance,
775  sizeof(pver->pv_u.subtree.pvs_tolerance));
776  rc = m0_fd_tile_build(pver, pv, &failure_level) ?:
778  if (rc == 0)
779  pool_version_tlist_add_tail(&pool->po_vers, pv);
780  }
781 
783  return M0_RC(rc);
784 }
785 
786 M0_INTERNAL void m0_pool_version_fini(struct m0_pool_version *pv)
787 {
788  M0_ENTRY();
790 
791  pool_version_tlink_fini(pv);
792  m0_pool_version_bob_fini(pv);
794  pv->pv_mach.pm_pver = NULL;
797 
798  M0_LEAVE();
799 }
800 
801 M0_INTERNAL void m0_pool_versions_fini(struct m0_pool *pool)
802 {
803  struct m0_pool_version *pv;
804 
805  m0_tl_teardown(pool_version, &pool->po_vers, pv) {
807  m0_free(pv);
808  }
809 }
810 
812  struct m0_confc_update_state *s)
813 {
814  struct m0_pool *pool;
815  struct m0_pool_version *pver;
816  struct m0_conf_cache *cache = &pc->pc_confc->cc_cache;
817 
818  M0_PRE(m0_mutex_is_locked(&s->cus_lock));
819 
820  if (pc->pc_confc == NULL)
821  return;
823  m0_tl_for(pools, &pc->pc_pools, pool) {
824  m0_tl_for(pool_version, &pool->po_vers, pver) {
825  if (!m0_conf_cache_contains(cache, &pver->pv_id))
826  pver->pv_is_stale = true;
827  } m0_tl_endfor;
828  } m0_tl_endfor;
830 }
831 
832 M0_INTERNAL struct m0_pool_version *
834 {
835  M0_PRE(pc != NULL);
836 
837  return pool_version_tlist_head(&pc->pc_dix_pool->po_vers);
838 }
839 
840 M0_INTERNAL struct m0_pool_version *
842 {
843  M0_PRE(pc != NULL);
844 
845  return pool_version_tlist_head(&pc->pc_md_pool->po_vers);
846 }
847 
849 {
850  struct m0_reqh_service_ctx *ctx;
851  int rc;
852  int i;
853 
854  M0_ENTRY();
855 
856  /* Disconnect from all services asynchronously. */
857  m0_tl_for(pools_common_svc_ctx, &pc->pc_svc_ctxs, ctx) {
860  } m0_tl_endfor;
861 
862  m0_tl_teardown(pools_common_svc_ctx, &pc->pc_svc_ctxs, ctx) {
865  M0_ASSERT_INFO(M0_IN(rc, (0, -ECANCELED, -ETIMEDOUT,
866  -EINVAL, -EHOSTUNREACH,
867  -ECONNREFUSED, -EIO)),
868  "rc=%d", rc);
869  }
871  }
872  M0_POST(pools_common_svc_ctx_tlist_is_empty(&pc->pc_svc_ctxs));
873 
874  for (i = 0; i < pc->pc_nr_devices; ++i)
875  pc->pc_dev2svc[i].pds_ctx = NULL;
876 
877  M0_LEAVE();
878 }
879 
881  struct m0_conf_service *cs)
882 {
883  return m0_tl_find(pools_common_svc_ctx, ctx, &pc->pc_svc_ctxs,
884  m0_fid_eq(&cs->cs_obj.co_id, &ctx->sc_fid)) != NULL;
885 }
886 
892  struct m0_conf_service *cs,
893  bool services_connect)
894 {
895  struct m0_reqh_service_ctx *ctx;
896  const char **endpoint;
897  bool already_in;
898  int rc = 0;
899 
901  M0_PRE((pc->pc_rmach != NULL) == services_connect);
902 
903  for (endpoint = cs->cs_endpoints; *endpoint != NULL; ++endpoint) {
904  M0_ASSERT_INFO(endpoint == cs->cs_endpoints,
905  "Only single endpoint per service is supported for now");
906  already_in = reqh_svc_ctx_is_in_pools(pc, cs);
907  M0_LOG(M0_DEBUG, "%s svc:"FID_F" type:%d ep:%s",
908  already_in ? "unchanged" : "new",
909  FID_P(&cs->cs_obj.co_id),
910  (int)cs->cs_type, *endpoint);
911  if (already_in)
912  continue;
914  pc->pc_rmach, *endpoint,
916  &ctx);
917  if (rc != 0)
918  return M0_ERR(rc);
919  ctx->sc_pc = pc;
920  pools_common_svc_ctx_tlink_init_at_tail(ctx, &pc->pc_svc_ctxs);
921  if (services_connect) {
922  /*
923  * m0_reqh_service_ctx handles current HA state and
924  * further state changes.
925  */
929  }
930  }
932  return M0_RC(rc);
933 }
934 
935 static bool is_local_svc(const struct m0_conf_service *svc,
937 {
938  const struct m0_conf_process *proc;
939  struct m0_rpc_machine *mach;
940  const char *local_ep;
941 
942  if (svc->cs_type != stype)
943  return false;
944  proc = M0_CONF_CAST(m0_conf_obj_grandparent(&svc->cs_obj),
946  mach = m0_reqh_rpc_mach_tlist_head(
947  &m0_conf_obj2reqh(&svc->cs_obj)->rh_rpc_machines);
949  M0_LOG(M0_DEBUG, "local_ep=%s proc_ep=%s svc_type=%d process="FID_F
950  "service="FID_F, local_ep, proc->pc_endpoint, svc->cs_type,
951  FID_P(&proc->pc_obj.co_id), FID_P(&svc->cs_obj.co_id));
952  return m0_streq(local_ep, proc->pc_endpoint);
953 }
954 
956  bool service_connect)
957 {
958  struct m0_conf_service *svc;
959  struct m0_fid active_rm;
960  int rc = 0;
961 
962  active_rm = pc->pc_ha_ecl->ecl_rep.hae_active_rm_fid;
963  if (m0_fid_is_set(&active_rm)) {
964  rc = m0_conf_service_get(pc->pc_confc, &active_rm, &svc);
965  if (rc != 0)
966  return M0_ERR(rc);
967  rc = __service_ctx_create(pc, svc, service_connect);
968  m0_confc_close(&svc->cs_obj);
969  }
970  return M0_RC(rc);
971 }
972 
973 static struct m0_reqh_service_ctx *
975 {
978  M0_CST_RMS);
979 }
980 
986  bool service_connect)
987 {
988  struct m0_conf_diter it;
989  struct m0_conf_service *svc;
990  bool rm_is_set;
991  int rc;
992 
993  M0_ENTRY();
994 
995  rc = active_rm_ctx_create(pc, service_connect);
996  if (rc != 0)
997  return M0_ERR(rc);
998  rm_is_set = active_rm_ctx_find(pc) != NULL;
999 
1000  rc = M0_FI_ENABLED("diter_fail") ? -ENOMEM :
1002  M0_CONF_ROOT_NODES_FID,
1003  M0_CONF_NODE_PROCESSES_FID,
1004  M0_CONF_PROCESS_SERVICES_FID);
1005  if (rc != 0) {
1006  if (rm_is_set)
1008  return M0_ERR(rc);
1009  }
1010  /*
1011  * XXX TODO: Replace m0_conf_diter_next_sync() with
1012  * m0_conf_diter_next().
1013  */
1014  while ((rc = m0_conf_diter_next_sync(&it, obj_is_service)) ==
1015  M0_CONF_DIRNEXT) {
1017  /*
1018  * Connection to confd is managed by configuration client.
1019  * confd is already connected in m0_confc_init() to the
1020  * endpoint provided by the command line argument -C.
1021  *
1022  * Connection to RMS is skipped here except local RMS
1023  * if local configuration preloaded. Nodes will only
1024  * use RM services returned by HA entrypoint.
1025  *
1026  * FI services need no service context either.
1027  *
1028  * DTM0 service has its own transport.
1029  */
1030  if (((!rm_is_set && is_local_svc(svc, M0_CST_RMS)) ||
1031  !M0_IN(svc->cs_type, (M0_CST_CONFD, M0_CST_RMS, M0_CST_HA,
1032  M0_CST_FIS, M0_CST_DTM0)))) {
1033  rc = __service_ctx_create(pc, svc, service_connect);
1034  if (rc != 0)
1035  break;
1036  }
1037  }
1039  if (rc != 0)
1041  return M0_RC(rc);
1042 }
1043 
1045 {
1047  struct m0_ha_entrypoint_client *ecl = pc->pc_ha_ecl;
1048  struct m0_ha_entrypoint_rep *rep = &ecl->ecl_rep;
1049  enum m0_ha_entrypoint_client_state state;
1050 
1051  state = m0_ha_entrypoint_client_state_get(ecl);
1052  if (state == M0_HEC_AVAILABLE &&
1053  rep->hae_control != M0_HA_ENTRYPOINT_QUERY &&
1054  m0_fid_is_set(&rep->hae_active_rm_fid) &&
1056  &rep->hae_active_rm_fid))) {
1059  M0_LOG(M0_DEBUG, "new RM="FID_F" pc_rm_ctx=%p",
1060  FID_P(&rep->hae_active_rm_fid),
1061  pc->pc_rm_ctx);
1062  /*
1063  * If pc_rm_ctx == NULL - it will be set at
1064  * m0_pools_common_conf_ready_async_cb() when
1065  * the configuration is ready. The latter is
1066  * required for active_rm_ctx_create().
1067  */
1069  }
1070  return true;
1071 }
1072 
1073 M0_INTERNAL struct m0_rpc_session *
1075 {
1076  struct m0_rpc_session *sess = NULL;
1077 
1079  if (pc->pc_rm_ctx != NULL)
1080  sess = &pc->pc_rm_ctx->sc_rlink.rlk_sess;
1082 
1083  return sess;
1084 }
1085 
1086 static struct m0_reqh_service_ctx *
1089 {
1090  return m0_tl_find(pools_common_svc_ctx, ctx, &pc->pc_svc_ctxs,
1091  ctx->sc_type == type);
1092 }
1093 
1094 M0_INTERNAL struct m0_reqh_service_ctx *
1096  const struct m0_fid *id,
1098 {
1099  return m0_tl_find(pools_common_svc_ctx, ctx, &pc->pc_svc_ctxs,
1100  m0_fid_eq(id, &ctx->sc_fid) && ctx->sc_type == type);
1101 }
1102 
1111 {
1113  struct m0_reqh_service_ctx *ctx;
1114 
1115  M0_ENTRY("pc %p", pc);
1116 
1117  m0_tl_for(pools_common_svc_ctx, &pc->pc_svc_ctxs, ctx) {
1119  } m0_tl_endfor;
1120 #ifndef __KERNEL__
1121  /*
1122  * The async callback for pools_common shall be called after
1123  * configuration expired and new configuration ready events. For this
1124  * reason the clink isn't added to m0_reqh::rh_conf_cache_ready_async
1125  * chan in m0_pools_common_init(), because it might be triggered when
1126  * rconfc starts at the first time.
1127  */
1129  /*
1130  * Note: m0_reqh::rh_conf_cache_ready_async and
1131  * m0_reqh::rh_conf_cache_exp are protected with different mutexes, but
1132  * deadlock is impossible due to the fact that broadcast on
1133  * rh_conf_cache_ready_async happens only atfer broadcasting on
1134  * rh_conf_cache_ready chan which is protected with same mutex as
1135  * rh_conf_cache_exp. Thus, here the sequence of locks is constant:
1136  * 1. When conf expired:
1137  * m0_mutex_lock(m0_reqh::rh_guard
1138  * m0_mutex_lock(m0_reqh::rh_guard_async
1139  * m0_mutex_unlock(m0_reqh::rh_guard_async
1140  * m0_mutex_unlock(m0_reqh::rh_guard
1141  * 2. When conf ready
1142  * m0_mutex_lock(m0_reqh::rh_guard
1143  * m0_mutex_unlock(m0_reqh::rh_guard
1144  * m0_mutex_lock(m0_reqh::rh_guard_async
1145  * m0_mutex_unlock(m0_reqh::rh_guard_async
1146  */
1150 #endif
1151  M0_LEAVE();
1152  return true;
1153 }
1154 
1162  const struct m0_conf_obj *svc)
1163 {
1164  struct m0_conf_obj *proc;
1165  const char *ep = m0_rpc_link_end_point(&ctx->sc_rlink);
1166 
1167  M0_PRE(svc != NULL); /* service found in conf */
1168  proc = m0_conf_obj_grandparent(svc);
1169  return m0_fid_eq(&proc->co_id, &ctx->sc_fid_process) &&
1171 }
1172 
1180 {
1181  /*
1182  * The context is unsubscribed by now. When configuration updates,
1183  * pools_common_conf_ready_cb() is always called after
1184  * pools_common_conf_expired_cb():
1185  *
1186  * read lock conflict
1187  * \_ pools_common_conf_expired_cb()
1188  * \_ m0_reqh_service_ctx_unsubscribe() <-- context clinks cleanup
1189  *
1190  * ... new conf version distribution ...
1191  *
1192  * read lock acquisition && full conf load
1193  * \_ pools_common_conf_ready_cb()
1194  * \_ pools_common__ctx_subscribe_or_abandon()
1195  * \_ reqh_service_ctx_abandon() <-- YOU ARE HERE
1196  */
1197  M0_PRE(ctx->sc_svc_event.cl_chan == NULL);
1198  M0_PRE(ctx->sc_process_event.cl_chan == NULL);
1199 
1200  /* Move the context to the list of abandoned ones. */
1201  M0_PRE(ctx->sc_pc != NULL);
1202  pools_common_svc_ctx_tlink_del_fini(ctx);
1203  pools_common_svc_ctx_tlink_init_at_tail(ctx, &ctx->sc_pc->
1204  pc_abandoned_svc_ctxs);
1205  /*
1206  * The context found disappeared from conf, so go start disconnecting it
1207  * asynchronously if needed.
1208  */
1211  /*
1212  * The context is to be physically destroyed later when rpc link
1213  * disconnection is confirmed in reqh_service_ctx_ast_cb().
1214  */
1215 }
1216 
1224 {
1225  struct m0_conf_cache *cache = &pc->pc_confc->cc_cache;
1226  struct m0_conf_obj *obj;
1227  struct m0_reqh_service_ctx *ctx;
1228 
1229  M0_ENTRY("pc %p", pc);
1230 
1231  m0_tl_for(pools_common_svc_ctx, &pc->pc_svc_ctxs, ctx) {
1233  obj = m0_conf_cache_lookup(cache, &ctx->sc_fid);
1235  ctx->sc_service = obj;
1236  ctx->sc_process = m0_conf_obj_grandparent(obj);
1237  if (!m0_clink_is_armed(&ctx->sc_svc_event) &&
1238  !m0_clink_is_armed(&ctx->sc_process_event))
1240  } else {
1242  }
1244  } m0_tl_endfor;
1245 
1246  M0_LEAVE();
1247 }
1248 
1250 {
1251  if (pc->pc_md_pool_linst != NULL)
1254  pc->pc_md_pool = NULL;
1255 }
1256 
1258 {
1259  struct m0_pool *pool;
1260  struct m0_pool_version *pver;
1261  struct m0_conf_cache *cache = &pc->pc_confc->cc_cache;
1262  struct m0_reqh *reqh = m0_confc2reqh(pc->pc_confc);
1263  bool in_conf;
1264 
1265  m0_tl_for(pools, &pc->pc_pools, pool) {
1266  in_conf = m0_conf_cache_contains(cache, &pool->po_id);
1267  M0_LOG(M0_DEBUG, "pool %p "FID_F" is %sknown", pool,
1268  FID_P(&pool->po_id), in_conf ? "":"not ");
1269 
1270  if (!in_conf) {
1271  if (pc->pc_md_pool == pool)
1273  if (pc->pc_dix_pool != NULL && pc->pc_dix_pool == pool)
1274  pc->pc_dix_pool = NULL;
1275  /* cleanup */
1276  pools_tlink_del_fini(pool);
1279  m0_pool_fini(pool);
1280  m0_free(pool);
1281  } else {
1282  m0_tl_for(pool_version, &pool->po_vers, pver) {
1284  &pver->pv_id)) {
1285  pool_version_tlink_del_fini(pver);
1287  &reqh->rh_ldom);
1289  m0_free(pver);
1290  }
1291  } m0_tl_endfor;
1292  }
1293  } m0_tl_endfor;
1294 }
1295 
1296 static void pool__layouts_evict(struct m0_pool *pool,
1297  struct m0_layout_domain *ldom)
1298 {
1299 
1300  struct m0_pool_version *pver;
1301 
1302  m0_tl_for(pool_version, &pool->po_vers, pver) {
1304  } m0_tl_endfor;
1305 
1306 }
1307 
1309  struct m0_layout_domain *ldom)
1310 {
1311  struct m0_pdclust_attr *pa = &pv->pv_attr;
1312  uint64_t layout_id;
1313  struct m0_layout *layout;
1314  int i;
1315 
1316  for (i = M0_DEFAULT_LAYOUT_ID; i < m0_lid_to_unit_map_nr; ++i) {
1320  layout = m0_layout_find(ldom, layout_id);
1321  if (layout != NULL) {
1322  m0_layout_put(layout);
1323  /*
1324  * Assumes that all referees of layout have put the
1325  * reference.
1326  */
1327  M0_ASSERT(m0_ref_read(&layout->l_ref) == 1);
1328  m0_layout_put(layout);
1329  }
1330  }
1331 }
1332 
1334 {
1335  int rc;
1336  struct m0_reqh *reqh = m0_confc2reqh(pc->pc_confc);
1337  struct m0_fid *pfid = m0_reqh2profile(reqh);
1338 
1339  M0_ENTRY("pc=%p", pc);
1340 
1341  if (m0_fid_eq(pfid, &M0_FID0))
1342  pfid = NULL;
1343 
1344  if (pc->pc_md_pool_linst != NULL)
1348  m0_pools_setup(pc, pfid, NULL, NULL) ?:
1351 
1352  return M0_RC(rc);
1353 }
1354 
1359 {
1361  int rc;
1362 
1363  M0_ENTRY("pc %p", pc);
1365 
1367  if (pc->pc_rm_ctx == NULL) {
1368  /*
1369  * active_rm_ctx_create() calls m0_conf_service_get()
1370  * which requires configuration to be ready. Therefore,
1371  * we have it here instead of service_ctx_ha_entrypoint_cb().
1372  */
1373  rc = active_rm_ctx_create(pc, true);
1374  M0_ASSERT(rc == 0);
1376  M0_LOG(M0_DEBUG, "created new pc_rm_ctx=%p", pc->pc_rm_ctx);
1377  }
1379  M0_ASSERT(pc->pc_rm_ctx != NULL);
1380 
1382 
1384  /*
1385  * prepare for refreshing pools common:
1386  * - zero counters
1387  * - free runtime maps
1388  */
1390  m0_free0(&pc->pc_mds_map);
1391  m0_free0(&pc->pc_dev2svc);
1392  pc->pc_nr_devices = 0;
1393  /* cleanup outdated pools if any */
1395  /* add missing service contexts and re-build mds map */
1401  M0_POST(rc == 0);
1404  M0_LEAVE();
1405  return true;
1406 }
1407 
1409 {
1410  int rc;
1411 
1412  M0_ENTRY();
1414  M0_BITS(M0_CST_IOS, M0_CST_CAS),
1415  &pc->pc_nr_devices);
1416  if (rc != 0)
1417  return M0_ERR(rc);
1418  M0_LOG(M0_DEBUG, "io/cas services device count: %u", pc->pc_nr_devices);
1420  if (pc->pc_dev2svc == NULL)
1421  return M0_ERR(-ENOMEM);
1422  return M0_RC(0);
1423 }
1424 
1425 M0_INTERNAL int m0_pools_common_init(struct m0_pools_common *pc,
1426  struct m0_rpc_machine *rmach)
1427 {
1428  struct m0_confc *confc;
1429  struct m0_conf_root *root;
1430 
1431  M0_ENTRY();
1432  confc = m0_reqh2confc(rmach->rm_reqh);
1434  return M0_RC(m0__pools_common_init(pc, rmach, root));
1435 }
1436 
1437 M0_INTERNAL int m0__pools_common_init(struct m0_pools_common *pc,
1438  struct m0_rpc_machine *rmach,
1439  struct m0_conf_root *root)
1440 {
1441  struct m0_reqh *reqh;
1442  int rc;
1443 
1444  M0_ENTRY();
1445  M0_PRE(pc != NULL);
1446  M0_PRE(rmach == NULL ||
1447  m0_reqh2confc(rmach->rm_reqh)->cc_root == &root->rt_obj);
1448 
1449  *pc = (struct m0_pools_common) {
1450  .pc_rmach = rmach,
1451  .pc_md_redundancy = root->rt_mdredundancy,
1452  .pc_confc = m0_confc_from_obj(&root->rt_obj),
1453  .pc_cur_pver = NULL,
1454  .pc_md_pool = NULL,
1455  .pc_dix_pool = NULL
1456  };
1457 
1459  if (rc != 0) {
1460  /* We want pools_common_invariant(pc) to fail. */
1461  pc->pc_confc = NULL;
1462  return M0_ERR(rc);
1463  }
1465  pools_common_svc_ctx_tlist_init(&pc->pc_abandoned_svc_ctxs);
1466  pools_common_svc_ctx_tlist_init(&pc->pc_svc_ctxs);
1467  pools_tlist_init(&pc->pc_pools);
1474 #ifndef __KERNEL__
1475  /*
1476  * m0_pools_common_conf_ready_async_cb() is called directly by
1477  * m0t1fs_ref_put_lock() in m0t1fs code. No need to register the
1478  * callback in kernel space.
1479  * @see m0t1fs_ref_put_lock()
1480  */
1483 #endif
1485  return M0_RC(0);
1486 }
1487 
1495 {
1496  int rc;
1497 
1499  M0_PRE(pc->pc_mds_map == NULL);
1500 
1502  if (rc != 0)
1503  return M0_ERR(rc);
1504 
1506  pools_common_svc_ctx_tlist_length(&pc->pc_svc_ctxs));
1507  rc = pc->pc_mds_map == NULL ? M0_ERR(-ENOMEM) :
1509  if (rc != 0)
1510  goto err;
1511  pc->pc_rm_ctx = service_ctx_find_by_type(pc, M0_CST_RMS);
1512  if (pc->pc_rm_ctx == NULL) {
1513  rc = M0_ERR_INFO(-ENOENT, "The mandatory rmservice is missing."
1514  "Make sure this is specified in the conf db.");
1515  goto err;
1516  }
1518  return M0_RC(0);
1519 err:
1520  m0_free0(&pc->pc_mds_map);
1522  return M0_ERR(rc);
1523 }
1524 
1526 {
1527  int rc;
1528 
1532  return M0_RC(rc);
1533 }
1534 
1536 {
1537  int rc;
1538 
1539  M0_ENTRY();
1540  M0_PRE(pc != NULL);
1541 
1543  if (rc == 0)
1545  &pc->pc_ha_clink);
1546  return M0_RC(rc);
1547 }
1548 
1549 M0_INTERNAL void m0_pools_common_fini(struct m0_pools_common *pc)
1550 {
1551  M0_ENTRY();
1553 
1556  pools_common_svc_ctx_tlist_fini(&pc->pc_abandoned_svc_ctxs);
1557  pools_common_svc_ctx_tlist_fini(&pc->pc_svc_ctxs);
1558  pools_tlist_fini(&pc->pc_pools);
1559  m0_free0(&pc->pc_dev2svc);
1562 #ifndef __KERNEL__
1565 #endif
1567  /*
1568  * We want pools_common_invariant(pc) to fail after
1569  * m0_pools_common_fini().
1570  */
1571  pc->pc_confc = NULL;
1572  M0_LEAVE();
1573 }
1574 
1580 {
1581  struct m0_reqh_service_ctx *ctx;
1582 
1583  if (!m0_confc_is_inited(pc->pc_confc)) {
1584  /*
1585  * rconfc seems failed to start or never tried to, so no
1586  * configuration was read, so no conf updates were possible, so
1587  * there was no condition to abandon any context, thus must be
1588  * nothing to clean here.
1589  */
1590  M0_ASSERT(pools_common_svc_ctx_tlist_is_empty(
1592  return;
1593  }
1594 
1595  m0_tl_teardown(pools_common_svc_ctx, &pc->pc_abandoned_svc_ctxs, ctx) {
1596  /*
1597  * Any abandoned context was initially set for disconnection
1598  * (see reqh_service_ctx_abandon())
1599  *
1600  * So just wait here for disconnection completion if required.
1601  */
1604  }
1605 }
1606 
1607 M0_INTERNAL void
1609 {
1610  struct m0_reqh_service_ctx *ctx;
1611 
1612  m0_tl_for (pools_common_svc_ctx, &pc->pc_svc_ctxs, ctx) {
1614  } m0_tl_endfor;
1615 }
1616 
1618 {
1619  M0_ENTRY();
1621  /* m0_cs_fini() calls this function even without m0_cs_start() */
1625  m0_free0(&pc->pc_mds_map);
1627  M0_LEAVE();
1628 }
1629 
1630 static bool is_actual_pver(const struct m0_conf_obj *obj)
1631 {
1636  return m0_conf_obj_type(obj) == &M0_CONF_PVER_TYPE &&
1638 }
1639 
1641 {
1642  struct m0_confc *confc;
1643  struct m0_conf_diter it;
1644  struct m0_pool_version *pver;
1645  struct m0_conf_pver *pver_obj;
1646  struct m0_fid *pool_id;
1647  struct m0_pool *pool;
1648  struct m0_reqh *reqh = m0_confc2reqh(pc->pc_confc);
1649  int rc;
1650 
1651  M0_ENTRY();
1652 
1653  confc = pc->pc_confc;
1654  rc = M0_FI_ENABLED("diter_fail") ? -ENOMEM :
1656  M0_CONF_ROOT_POOLS_FID,
1657  M0_CONF_POOL_PVERS_FID);
1658  if (rc != 0)
1659  return M0_ERR(rc);
1660 
1661  while ((rc = m0_conf_diter_next_sync(&it, is_actual_pver)) ==
1662  M0_CONF_DIRNEXT) {
1663  pver_obj = M0_CONF_CAST(m0_conf_diter_result(&it),
1664  m0_conf_pver);
1665  pool_id = &m0_conf_obj_grandparent(&pver_obj->pv_obj)->co_id;
1666  pool = m0_tl_find(pools, pool, &pc->pc_pools,
1667  m0_fid_eq(&pool->po_id, pool_id));
1668  if (pool == NULL)
1669  continue;
1670  pver = m0_tl_find(pool_version, pver, &pool->po_vers,
1671  m0_fid_eq(&pver_obj->pv_obj.co_id,
1672  &pver->pv_id));
1673  M0_LOG(M0_DEBUG, "%spver:"FID_F, pver != NULL ? "! ":"",
1674  FID_P(&pver_obj->pv_obj.co_id));
1675  if (pver != NULL) {
1676  /*
1677  * Version is already in pool, so we must be in pools
1678  * refreshing cycle.
1679  */
1680  rc = m0_pool_version_device_map_init(pver, pver_obj, pc);
1681  if (rc != 0)
1682  break;
1683  continue;
1684  }
1685  M0_ALLOC_PTR(pver);
1686  if (pver == NULL) {
1687  rc = M0_ERR(-ENOMEM);
1688  break;
1689  }
1690  rc = m0_pool_version_init_by_conf(pver, pver_obj, pool, pc) ?:
1692  if (rc != 0)
1693  break;
1694  }
1695 
1697  if (rc != 0)
1699 
1700  return M0_RC(rc);
1701 }
1702 
1703 static int pool_from_virtual_pver(const struct m0_conf_pver *virtual,
1704  struct m0_confc *confc,
1705  struct m0_conf_pool **out)
1706 {
1707  struct m0_conf_root *root;
1708  const struct m0_conf_pver *fpver;
1709  int rc;
1710 
1711  M0_ENTRY("virtual="FID_F, FID_P(&virtual->pv_obj.co_id));
1712  M0_PRE(virtual->pv_kind == M0_CONF_PVER_VIRTUAL);
1713 
1715  if (rc != 0)
1716  return M0_ERR_INFO(rc, "Cannot open root object");
1717  rc = m0_conf_pver_formulaic_from_virtual(virtual, root, &fpver);
1718  if (rc == 0)
1720  m0_conf_pool);
1722  return M0_RC(rc);
1723 }
1724 
1726  struct m0_conf_pver *pver,
1727  struct m0_pool_version **pv)
1728 {
1729  struct m0_conf_pool *cp = NULL;
1730  struct m0_pool *p;
1731  int rc;
1732 
1733  M0_ENTRY();
1734 
1735  if (pver->pv_kind == M0_CONF_PVER_ACTUAL) {
1736  cp = M0_CONF_CAST(m0_conf_obj_grandparent(&pver->pv_obj),
1737  m0_conf_pool);
1738  } else {
1740  if (rc != 0)
1741  return M0_ERR(rc);
1742  }
1743  M0_ASSERT(cp != NULL);
1744  p = pool_find(pc, &cp->pl_obj.co_id);
1745  M0_ASSERT(p != NULL);
1746 
1747  M0_ALLOC_PTR(*pv);
1748  if (*pv == NULL)
1749  return M0_ERR(-ENOMEM);
1750 
1752  if (rc != 0) {
1753  m0_free(*pv);
1754  return M0_ERR(rc);
1755  }
1757  NULL);
1758  if (rc != 0) {
1760  m0_free(*pv);
1761  return M0_ERR(rc);
1762  }
1763  return M0_RC(0);
1764 }
1765 
1766 static int
1767 _pool_create(struct m0_pool **out, const struct m0_conf_pool *conf_pool)
1768 {
1769  int rc;
1770 
1771  M0_ALLOC_PTR(*out);
1772  if (*out == NULL)
1773  return M0_ERR(-ENOMEM);
1774  rc = m0_pool_init(*out, &conf_pool->pl_obj.co_id,
1775  conf_pool->pl_pver_policy);
1776  if (rc != 0)
1777  m0_free0(out);
1778  return M0_RC(rc);
1779 }
1780 
1781 static void
1782 dix_pool_setup(struct m0_pools_common *pc, const struct m0_fid *imeta_pver)
1783 {
1784  M0_ENTRY("imeta_pver="FID_F, FID_P(imeta_pver));
1785  if (m0_fid_is_set(imeta_pver)) {
1786  struct m0_conf_obj *pver;
1787  struct m0_conf_pool *pool;
1788  int rc;
1789 
1790  rc = m0_conf_obj_find_lock(&pc->pc_confc->cc_cache, imeta_pver,
1791  &pver);
1792  M0_ASSERT(rc == 0);
1794  m0_conf_pool);
1795  pc->pc_dix_pool = pool_find(pc, &pool->pl_obj.co_id);
1797  M0_LOG(M0_DEBUG, "imeta_pver="FID_F" -> dix_pool="FID_F,
1798  FID_P(imeta_pver), FID_P(&pc->pc_dix_pool->po_id));
1799  }
1800  M0_LEAVE();
1801 }
1802 
1803 static bool profile_has_pool(const struct m0_conf_profile *profile,
1804  const struct m0_fid *pool)
1805 {
1806  return m0_exists(i, profile->cp_pools.af_count,
1807  m0_fid_eq(&profile->cp_pools.af_elems[i], pool));
1808 }
1809 
1810 M0_INTERNAL int m0_pools_setup(struct m0_pools_common *pc,
1811  const struct m0_fid *profile,
1812  struct m0_sm_group *sm_grp,
1813  struct m0_dtm *dtm)
1814 {
1815  int rc;
1816  struct m0_pool *pool;
1817  struct m0_conf_obj *pool_obj;
1818  struct m0_conf_root *root;
1819  struct m0_conf_profile *prof = NULL;
1820  struct m0_conf_diter it;
1821 
1822  M0_ENTRY("profile="FID_F, FID_P(profile ?: &M0_FID0));
1823 
1824  if (profile != NULL) {
1826  if (rc != 0)
1827  return M0_ERR_INFO(rc, "profile="FID_F, FID_P(profile));
1828  }
1830  rc = M0_FI_ENABLED("diter_fail") ? M0_ERR(-ENOMEM) :
1832  M0_CONF_ROOT_POOLS_FID);
1833  if (rc != 0) {
1834  if (prof != NULL)
1835  m0_confc_close(&prof->cp_obj);
1836  return M0_ERR(rc);
1837  }
1839  M0_CONF_DIRNEXT) {
1840  pool_obj = m0_conf_diter_result(&it);
1841  M0_LOG(M0_DEBUG, "pool="FID_F, FID_P(&pool_obj->co_id));
1842  if (prof != NULL && !profile_has_pool(prof, &pool_obj->co_id))
1843  continue;
1844  pool = pool_find(pc, &pool_obj->co_id);
1845  if (pool != NULL)
1846  /*
1847  * Pool is already in pools common, so we must be in
1848  * pools refreshing cycle.
1849  */
1850  continue;
1851  rc = _pool_create(&pool, M0_CONF_CAST(pool_obj, m0_conf_pool));
1852  if (rc != 0)
1853  break;
1854  pools_tlink_init_at_tail(pool, &pc->pc_pools);
1855  }
1857  if (prof != NULL)
1858  m0_confc_close(&prof->cp_obj);
1859  if (rc != 0) {
1861  return M0_ERR(rc);
1862  }
1863  /* MD pool setup. */
1865  if (pc->pc_md_pool == NULL) {
1867  return M0_ERR_INFO(-ENOENT, "Cannot find metadata pool "FID_F,
1868  FID_P(&root->rt_mdpool));
1869  }
1870  M0_LOG(M0_DEBUG, "md_pool="FID_F, FID_P(&root->rt_mdpool));
1871 
1873  return M0_RC(0);
1874 }
1875 
1877 {
1878  struct m0_pool *p;
1879 
1880  M0_ENTRY();
1881  m0_tl_for(pools, &pc->pc_pools, p) {
1883  } m0_tl_endfor;
1884  M0_LEAVE();
1885 }
1886 
1887 M0_INTERNAL void m0_pools_destroy(struct m0_pools_common *pc)
1888 {
1889  struct m0_pool *p;
1890 
1891  M0_ENTRY();
1892  m0_tl_teardown(pools, &pc->pc_pools, p) {
1893  m0_pool_fini(p);
1894  m0_free(p);
1895  }
1896  M0_LEAVE();
1897 }
1898 
1899 M0_INTERNAL uint64_t
1900 m0_pool_version2layout_id(const struct m0_fid *pv_fid, uint64_t lid)
1901 {
1902  return m0_hash(m0_fid_hash(pv_fid) + lid);
1903 }
1904 
1905 M0_INTERNAL uint32_t m0_ha2pm_state_map(enum m0_ha_obj_state hastate)
1906 {
1907  uint32_t ha2pm_statemap [] = {
1915  };
1916  M0_ASSERT (hastate < M0_NC_NR);
1917  return ha2pm_statemap[hastate];
1918 }
1919 
1921 {
1922  struct m0_poolmach_event pme;
1923  struct m0_conf_obj *obj =
1925  struct m0_poolnode *pnode =
1927 
1928  M0_ENTRY();
1930 
1931  pme.pe_type = M0_POOL_NODE;
1932  pme.pe_index = pnode->pn_index;
1933  pme.pe_state = m0_ha2pm_state_map(obj->co_ha_state);
1934  M0_LOG(M0_DEBUG, "pe_type=%6s pe_index=%x, pe_state=%10d",
1935  pme.pe_type == M0_POOL_DEVICE ? "device":"node",
1936  pme.pe_index, pme.pe_state);
1937 
1938  return M0_RC(m0_poolmach_state_transit(pnode->pn_pm, &pme));
1939 }
1940 
1942 {
1943  struct m0_poolmach_event pme;
1944  struct m0_conf_obj *obj =
1946  struct m0_pooldev *pdev =
1947  container_of(cl, struct m0_pooldev, pd_clink.bc_u.clink);
1948 
1949  M0_ENTRY();
1951 
1952  pme.pe_type = M0_POOL_DEVICE;
1953  pme.pe_index = pdev->pd_index;
1954  pme.pe_state = m0_ha2pm_state_map(obj->co_ha_state);
1955  M0_LOG(M0_DEBUG, "pe_type=%6s pe_index=%x, pe_state=%10d",
1956  pme.pe_type == M0_POOL_DEVICE ? "device":"node",
1957  pme.pe_index, pme.pe_state);
1958 
1959  return M0_RC(m0_poolmach_state_transit(pdev->pd_pm, &pme));
1960 }
1961 
1962 M0_INTERNAL void m0_poolnode_clink_del(struct m0_clink *cl)
1963 {
1964  if (M0_FI_ENABLED("do_nothing_for_poolmach-ut")) {
1965  /*
1966  * The poolmach-ut does not add/register clink in poolnode.
1967  * So need to skip deleting the links if this is called
1968  * during poolmach-ut.
1969  * TODO: This is workaround & can be addressed differently.
1970  */
1971  return;
1972  }
1973  m0_clink_del_lock(cl);
1974  m0_clink_fini(cl);
1975 }
1976 
1977 M0_INTERNAL void m0_poolnode_clink_add(struct m0_clink *link,
1978  struct m0_chan *chan)
1979 {
1981  m0_clink_add_lock(chan, link);
1982 }
1983 
1984 M0_INTERNAL void m0_pooldev_clink_del(struct m0_clink *cl)
1985 {
1986  if (M0_FI_ENABLED("do_nothing_for_poolmach-ut")) {
1987  /*
1988  * The poolmach-ut does not add/register clink in pooldev.
1989  * So need to skip deleting the links if this is called
1990  * during poolmach-ut.
1991  * TODO: This is workaround & can be addressed differently.
1992  */
1993  return;
1994  }
1995  m0_clink_del_lock(cl);
1996  m0_clink_fini(cl);
1997 }
1998 
1999 M0_INTERNAL void m0_pooldev_clink_add(struct m0_clink *link,
2000  struct m0_chan *chan)
2001 {
2003  m0_clink_add_lock(chan, link);
2004 }
2005 
2006 #ifndef __KERNEL__
2007 
2011 M0_INTERNAL int m0_pool_device_reopen(struct m0_poolmach *pm,
2012  struct m0_reqh *reqh)
2013 {
2014  struct m0_pool_spare_usage *spare_array;
2015  struct m0_pooldev *dev_array;
2016  uint32_t dev_id;
2017  int i;
2018  int rc = 0;
2019 
2020  dev_array = pm->pm_state->pst_devices_array;
2021  spare_array = pm->pm_state->pst_spare_usage_array;
2022  for (i = 0; spare_array[i].psu_device_index !=
2024  dev_id = spare_array[i].psu_device_index;
2025  if (dev_array[dev_id].pd_state == M0_PNDS_SNS_REPAIRED) {
2026  rc = m0_motr_stob_reopen(reqh, pm, dev_id);
2027  if (rc != 0)
2028  return M0_ERR(rc);
2029  }
2030  }
2031  return M0_RC(rc);
2032 }
2033 
2034 static int pool_device_index(const struct m0_poolmach *pm,
2035  const struct m0_fid *fid)
2036 {
2037  int j;
2038  return m0_exists(i, pm->pm_state->pst_nr_devices,
2041 }
2042 
2044  struct m0_fid *dev_fid,
2045  struct m0_poolmach *pm_stop)
2046 {
2047  struct m0_pool *pool;
2048  struct m0_pool_version *pver;
2049  struct m0_poolmach *pm;
2050  int dev_idx;
2051 
2052  m0_tl_for(pools, &pc->pc_pools, pool) {
2053  m0_tl_for(pool_version, &pool->po_vers, pver) {
2054  pm = &pver->pv_mach;
2055  if (pm == pm_stop)
2056  return;
2057  dev_idx = pool_device_index(pm, dev_fid);
2058  if (dev_idx != POOL_DEVICE_INDEX_INVALID)
2060  } m0_tl_endfor;
2061  } m0_tl_endfor;
2062 }
2063 
2068 M0_INTERNAL int m0_pool_device_state_update(struct m0_reqh *reqh,
2069  struct m0_be_tx *tx,
2070  struct m0_fid *dev_fid,
2071  enum m0_pool_nd_state new_state)
2072 {
2073  struct m0_pools_common *pc = reqh->rh_pools;
2074  struct m0_pool *pool;
2075  struct m0_pool_version *pver;
2076  struct m0_poolmach *pm;
2077  int rc;
2078  int dev_idx;
2079  struct m0_poolmach_event pme;
2080 
2081  m0_tl_for(pools, &pc->pc_pools, pool) {
2082  m0_tl_for(pool_version, &pool->po_vers, pver) {
2083  pm = &pver->pv_mach;
2084  dev_idx = pool_device_index(pm, dev_fid);
2085  if (dev_idx != POOL_DEVICE_INDEX_INVALID) {
2086  pme.pe_type = M0_POOL_DEVICE;
2087  pme.pe_index = dev_idx;
2088  pme.pe_state = new_state;
2089  rc = m0_poolmach_state_transit(pm, &pme);
2090  if (rc != 0) {
2092  dev_fid,
2093  pm);
2094  return M0_ERR(rc);
2095  }
2096  }
2097  } m0_tl_endfor;
2098  } m0_tl_endfor;
2099 
2100  return M0_RC(0);
2101 }
2102 
2103 #endif /* !__KERNEL__ */
2104 
2105 M0_INTERNAL void m0_pools_lock(struct m0_pools_common *pc)
2106 {
2108 }
2109 
2110 M0_INTERNAL void m0_pools_unlock(struct m0_pools_common *pc)
2111 {
2113 }
2114 
2115 M0_INTERNAL bool m0_pools_is_locked(struct m0_pools_common *pc)
2116 {
2117  return m0_mutex_is_locked(&pc->pc_mutex);
2118 }
2119 
2121 #undef M0_TRACE_SUBSYSTEM
2122 
2123 /*
2124  * Local variables:
2125  * c-indentation-style: "K&R"
2126  * c-basic-offset: 8
2127  * tab-width: 8
2128  * fill-column: 80
2129  * scroll-step: 1
2130  * End:
2131  */
const struct m0_conf_obj_type * m0_conf_obj_type(const struct m0_conf_obj *obj)
Definition: obj.c:363
struct m0_fid co_id
Definition: obj.h:208
M0_INTERNAL struct m0_layout * m0_layout_find(struct m0_layout_domain *dom, uint64_t lid)
Definition: layout.c:861
M0_INTERNAL int m0_dix_pool_version_get(struct m0_pools_common *pc, struct m0_pool_version **pv)
Definition: pool.c:704
static bool service_ctx_ha_entrypoint_cb(struct m0_clink *clink)
Definition: pool.c:1044
struct m0_layout_instance * pc_md_pool_linst
Definition: pool.h:214
static bool pools_common_conf_expired_cb(struct m0_clink *clink)
Definition: pool.c:1110
struct m0_poolmach_state * pm_state
Definition: pool_machine.h:169
Definition: beck.c:235
uint64_t id
Definition: cob.h:2380
static struct m0_reqh_service_ctx * active_rm_ctx_find(struct m0_pools_common *pc)
Definition: pool.c:974
static const struct m0_bob_type pver_bob
Definition: pool.c:224
struct m0_fid hae_active_rm_fid
static int pool_mds_map_init(struct m0_pools_common *pc)
Definition: pool.c:394
static struct m0_addb2_philter p
Definition: consumer.c:40
M0_INTERNAL void m0_pools_common_fini(struct m0_pools_common *pc)
Definition: pool.c:1549
static int __service_ctx_create(struct m0_pools_common *pc, struct m0_conf_service *cs, bool services_connect)
Definition: pool.c:891
struct m0_conf_obj * cc_root
Definition: confc.h:404
static bool is_dix_pool(const struct m0_pools_common *pc, const struct m0_pool *pool)
Definition: pool.c:624
const char * pc_endpoint
Definition: obj.h:590
M0_INTERNAL int m0_pool_version_append(struct m0_pools_common *pc, struct m0_conf_pver *pver, struct m0_pool_version **pv)
Definition: pool.c:1725
M0_INTERNAL int m0_fd_tile_build(const struct m0_conf_pver *pv, struct m0_pool_version *pool_ver, uint32_t *failure_level)
Definition: fd.c:260
#define M0_PRE(cond)
struct m0_ha_entrypoint_client h_entrypoint_client
Definition: ha.h:303
#define M0_ALLOC_ARR(arr, nr)
Definition: memory.h:84
const struct m0_conf_obj_type M0_CONF_OBJV_TYPE
Definition: objv.c:151
M0_INTERNAL void m0_pool_fini(struct m0_pool *pool)
Definition: pool.c:322
static bool is_mds(const struct m0_conf_obj *obj)
Definition: pool.c:346
M0_INTERNAL void m0_mutex_unlock(struct m0_mutex *mutex)
Definition: mutex.c:66
M0_INTERNAL int m0_poolmach_state_transit(struct m0_poolmach *pm, const struct m0_poolmach_event *event)
Definition: pool_machine.c:554
struct m0_fid sc_fid
Definition: reqh_service.h:751
struct m0_ref l_ref
Definition: layout.h:234
struct m0_conf_obj ce_obj
Definition: obj.h:680
struct m0_tl po_vers
Definition: pool.h:84
M0_INTERNAL uint64_t m0_fid_hash(const struct m0_fid *fid)
Definition: fid.c:295
#define NULL
Definition: misc.h:38
uint32_t pst_nr_devices
Definition: pool_machine.h:108
M0_INTERNAL void m0_clink_init(struct m0_clink *link, m0_chan_cb_t cb)
Definition: chan.c:201
struct m0_pool * pc_dix_pool
Definition: pool.h:237
static bool profile_has_pool(const struct m0_conf_profile *profile, const struct m0_fid *pool)
Definition: pool.c:1803
uint64_t pa_unit_size
Definition: pdclust.h:118
m0_conf_service_type
Definition: schema.h:194
M0_INTERNAL void m0_clink_del_lock(struct m0_clink *link)
Definition: chan.c:293
M0_INTERNAL int m0_confc_profile_open(struct m0_confc *confc, const struct m0_fid *fid, struct m0_conf_profile **out)
Definition: helpers.c:234
struct m0_pool_version * pm_pver
Definition: pool_machine.h:172
M0_INTERNAL int m0_motr_stob_reopen(struct m0_reqh *reqh, struct m0_poolmach *pm, uint32_t dev_id)
Definition: setup.c:3043
M0_INTERNAL struct m0_pool_version * m0_pool_version_find(struct m0_pools_common *pc, const struct m0_fid *id)
Definition: pool.c:586
M0_INTERNAL int m0_pool_version_init_by_conf(struct m0_pool_version *pv, struct m0_conf_pver *pver, struct m0_pool *pool, struct m0_pools_common *pc)
Definition: pool.c:748
struct m0_chan rh_conf_cache_ready_async
Definition: reqh.h:227
uint32_t pa_N
Definition: pdclust.h:104
struct m0_conf_obj rt_obj
Definition: obj.h:372
M0_INTERNAL struct m0_conf_obj * m0_conf_cache_lookup(const struct m0_conf_cache *cache, const struct m0_fid *id)
Definition: cache.c:106
struct m0_conf_obj pl_obj
Definition: obj.h:432
struct m0_pool_version * pv
Definition: dir.c:629
struct m0_poolmach pv_mach
Definition: pool.h:133
#define M0_LOG(level,...)
Definition: trace.h:167
struct m0_mutex pc_rm_lock
Definition: pool.h:218
M0_LEAVE()
M0_INTERNAL bool m0_pools_is_locked(struct m0_pools_common *pc)
Definition: pool.c:2115
uint32_t pv_fd_tol_vec[M0_CONF_PVER_HEIGHT]
Definition: pool.h:141
const struct m0_conf_obj_type M0_CONF_PVER_TYPE
Definition: pver.c:260
const struct m0_conf_obj_type M0_CONF_SERVICE_TYPE
Definition: service.c:156
M0_INTERNAL int m0_poolmach_init_by_conf(struct m0_poolmach *pm, struct m0_conf_pver *pver)
Definition: pool_machine.c:236
uint32_t pn_index
Definition: pool.h:399
struct m0_reqh_service_ctx * pc_rm_ctx
Definition: pool.h:183
M0_INTERNAL void m0_reqh_service_ctx_subscribe(struct m0_reqh_service_ctx *ctx)
Definition: reqh_service.c:693
M0_INTERNAL void m0_uint128_init(struct m0_uint128 *u128, const char *magic)
Definition: misc.c:150
uint32_t pa_K
Definition: pdclust.h:107
struct m0_ha * i_ha
Definition: instance.h:108
const struct m0_pver_policy_ops * pp_ops
Definition: policy.h:48
M0_INTERNAL void m0_poolnode_clink_del(struct m0_clink *cl)
Definition: pool.c:1962
M0_INTERNAL bool m0_reqh_service_ctx_is_connected(const struct m0_reqh_service_ctx *ctx)
Definition: reqh_service.c:760
Definition: note.h:172
M0_INTERNAL int m0_conf_diter_next_sync(struct m0_conf_diter *it, bool(*filter)(const struct m0_conf_obj *obj))
Definition: diter.c:555
M0_INTERNAL void m0_reqh_service_ctx_destroy(struct m0_reqh_service_ctx *ctx)
struct m0_pool * pc_md_pool
Definition: pool.h:212
static struct m0_be_emap_cursor it
Definition: extmap.c:46
struct m0_conf_obj pv_obj
Definition: obj.h:533
M0_INTERNAL enum m0_ha_entrypoint_client_state m0_ha_entrypoint_client_state_get(struct m0_ha_entrypoint_client *ecl)
Definition: entrypoint.c:829
void(* ppo_fini)(struct m0_pver_policy *pver_policy)
Definition: policy.h:70
M0_INTERNAL bool m0_clink_is_armed(const struct m0_clink *link)
Definition: chan.c:303
static struct m0_addb2_mach * mach
Definition: storage.c:42
#define m0_exists(var, nr,...)
Definition: misc.h:134
#define M0_BITS(...)
Definition: misc.h:236
uint32_t pa_S
Definition: pdclust.h:110
M0_INTERNAL struct m0 * m0_get(void)
Definition: instance.c:41
static int pools_common_refresh_locked(struct m0_pools_common *pc)
Definition: pool.c:1494
int(* ppo_get)(struct m0_pools_common *pc, const struct m0_pool *pool, struct m0_pool_version **pver)
Definition: policy.h:77
static bool is_actual_pver(const struct m0_conf_obj *obj)
Definition: pool.c:1630
struct m0_conf_cache * co_cache
Definition: obj.h:251
#define container_of(ptr, type, member)
Definition: misc.h:33
static char * prof
Definition: st_kmain.c:50
M0_INTERNAL void m0_mutex_lock(struct m0_mutex *mutex)
Definition: mutex.c:49
M0_INTERNAL int m0_pools_setup(struct m0_pools_common *pc, const struct m0_fid *profile, struct m0_sm_group *sm_grp, struct m0_dtm *dtm)
Definition: pool.c:1810
M0_INTERNAL void m0_reqh_service_connect_wait(struct m0_reqh_service_ctx *ctx)
Definition: reqh_service.c:822
M0_INTERNAL const char * m0_rpc_machine_ep(const struct m0_rpc_machine *rmach)
Definition: rpc_machine.c:603
struct m0_pool * pv_pool
Definition: pool.h:128
bool pv_is_dirty
Definition: pool.h:116
M0_INTERNAL bool m0_fid_is_set(const struct m0_fid *fid)
Definition: fid.c:106
struct m0_fop_getxattr_rep * rep
Definition: dir.c:455
static struct foo * obj
Definition: tlist.c:302
const char * bt_name
Definition: bob.h:73
M0_INTERNAL struct m0_reqh_service_ctx * m0_pools_common_service_ctx_find(const struct m0_pools_common *pc, const struct m0_fid *id, enum m0_conf_service_type type)
Definition: pool.c:1095
struct m0_poolmach * pn_pm
Definition: pool.h:397
static struct m0_pools_common pc
Definition: iter_ut.c:59
int m0_lid_to_unit_map[]
Definition: layout_pver.c:99
M0_TL_DESCR_DEFINE(be_pool, "list of be_pool_items", static, struct m0_be_pool_item, bpli_link, bpli_magic, M0_BE_POOL_MAGIC, M0_BE_POOL_HEAD_MAGIC)
M0_INTERNAL int m0_pool_init(struct m0_pool *pool, const struct m0_fid *id, enum m0_pver_policy_code pver_policy)
Definition: pool.c:307
static bool is_local_svc(const struct m0_conf_service *svc, enum m0_conf_service_type stype)
Definition: pool.c:935
uint64_t pc_nr_svcs[M0_CST_NR]
Definition: pool.h:190
#define m0_tl_endfor
Definition: tlist.h:700
struct m0_pooldev * pst_devices_array
Definition: pool_machine.h:111
struct m0_fid fid
Definition: di.c:46
struct m0_be_clink pd_clink
Definition: pool.h:447
M0_INTERNAL int m0_confc_root_open(struct m0_confc *confc, struct m0_conf_root **root)
Definition: helpers.c:219
return M0_RC(rc)
Definition: sock.c:754
uint32_t pl_pver_policy
Definition: obj.h:449
static int pools_common_refresh(struct m0_pools_common *pc)
Definition: pool.c:1525
M0_INTERNAL int m0_poolmach_spare_build(struct m0_poolmach *mach, struct m0_pool *pool, enum m0_conf_pver_kind kind)
#define M0_ENTRY(...)
Definition: trace.h:170
M0_INTERNAL struct m0_pool_version * m0_pool_version_md_get(const struct m0_pools_common *pc)
Definition: pool.c:841
M0_INTERNAL struct m0_fid * m0_reqh2profile(struct m0_reqh *reqh)
Definition: reqh.c:758
static bool obj_is_ios_cas_diskv(const struct m0_conf_obj *obj)
Definition: pool.c:440
uint32_t sd_dev_idx
Definition: obj.h:635
int i
Definition: dir.c:1033
M0_INTERNAL int m0_fd_tree_build(const struct m0_conf_pver *pv, struct m0_fd_tree *tree)
Definition: fd.c:598
static struct m0_pool * pool_find(struct m0_pools_common *pc, const struct m0_fid *pool)
Definition: pool.c:240
#define PRIu64
Definition: types.h:58
M0_INTERNAL int m0_reqh_service_disconnect_wait(struct m0_reqh_service_ctx *ctx)
Definition: reqh_service.c:831
#define M0_SET_ARR0(arr)
Definition: misc.h:72
static void pools_common__pools_update_or_cleanup(struct m0_pools_common *pc)
Definition: pool.c:1257
#define M0_ERR_INFO(rc, fmt,...)
Definition: trace.h:215
struct m0_reqh_service_ctx ** pc_mds_map
Definition: pool.h:180
struct m0_conf_root * root
Definition: note.c:50
return M0_ERR(-EOPNOTSUPP)
M0_INTERNAL bool m0_pools_common_conf_ready_async_cb(struct m0_clink *clink)
Definition: pool.c:1358
const char ** cs_endpoints
Definition: obj.h:605
M0_INTERNAL void m0_poolmach_fop_fini(void)
Definition: pool_fops.c:42
M0_INTERNAL void m0_pool_versions_stale_mark(struct m0_pools_common *pc, struct m0_confc_update_state *s)
Definition: pool.c:811
M0_INTERNAL int m0__pools_common_init(struct m0_pools_common *pc, struct m0_rpc_machine *rmach, struct m0_conf_root *root)
Definition: pool.c:1437
M0_INTERNAL const char * m0_pool_dev_state_to_str(enum m0_pool_nd_state state)
Definition: pool.c:290
static bool m0_conf_service_type_is_valid(enum m0_conf_service_type t)
Definition: schema.h:204
M0_INTERNAL uint64_t m0_pool_version2layout_id(const struct m0_fid *pv_fid, uint64_t lid)
Definition: pool.c:1900
M0_INTERNAL int m0_pool_version_get(struct m0_pools_common *pc, const struct m0_fid *pool, struct m0_pool_version **pv)
Definition: pool.c:662
M0_INTERNAL struct m0_confc * m0_reqh2confc(struct m0_reqh *reqh)
Definition: reqh.c:753
static bool pool_version_invariant(const struct m0_pool_version *pv)
Definition: pool.c:335
#define M0_AMB(obj, ptr, field)
Definition: misc.h:320
const struct m0_conf_obj_type M0_CONF_ENCLOSURE_TYPE
Definition: enclosure.c:140
static const struct socktype stype[]
Definition: sock.c:1156
#define m0_tl_teardown(name, head, obj)
Definition: tlist.h:708
struct m0_fid pv_id
Definition: pool.h:113
struct m0_fid rt_imeta_pver
Definition: obj.h:403
M0_INTERNAL struct m0_pver_policy_type * m0_pver_policy_type_find(enum m0_pver_policy_code code)
Definition: policy.c:141
M0_INTERNAL int m0_reqh_service_ctx_create(struct m0_conf_obj *svc_obj, enum m0_conf_service_type stype, struct m0_rpc_machine *rmach, const char *addr, uint32_t max_rpc_nr_in_flight, struct m0_reqh_service_ctx **out)
M0_INTERNAL int m0_conf_devices_count(struct m0_confc *confc, uint64_t svc_types, uint32_t *nr_devices)
Definition: helpers.c:450
struct m0_fd_tree pv_fd_tree
Definition: pool.h:139
M0_INTERNAL int m0_pools_init(void)
Definition: pool.c:273
M0_INTERNAL int m0_pool_device_reopen(struct m0_poolmach *pm, struct m0_reqh *reqh)
Definition: pool.c:2011
M0_BOB_DEFINE(static, &pver_bob, m0_pool_version)
#define m0_free0(pptr)
Definition: memory.h:77
struct m0_clink pc_conf_exp
Definition: pool.h:230
int(* ppto_create)(struct m0_pver_policy **out)
Definition: policy.h:53
struct m0_poolmach * pd_pm
Definition: pool.h:442
M0_INTERNAL int m0_poolmach_fop_init(void)
Definition: pool_fops.c:56
#define M0_ASSERT(cond)
M0_INTERNAL void m0_reqh_service_connect(struct m0_reqh_service_ctx *ctx)
Definition: reqh_service.c:730
m0_pver_policy_code
Definition: policy.h:40
static bool disks_poolmach_state_update_cb(struct m0_clink *cl)
Definition: pool.c:1941
static struct m0_confc * confc
Definition: file.c:94
M0_INTERNAL bool m0_mutex_is_locked(const struct m0_mutex *mutex)
Definition: mutex.c:95
#define M0_PDCLUST_SEED
Definition: pdclust.h:76
M0_INTERNAL int m0_pool_version_device_map_init(struct m0_pool_version *pv, struct m0_conf_pver *pver, struct m0_pools_common *pc)
Definition: pool.c:447
M0_INTERNAL void m0_pools_common_service_ctx_connect_sync(struct m0_pools_common *pc)
Definition: pool.c:1608
struct m0_fid pver
Definition: idx_dix.c:74
static struct net_test_cmd_node nodes[NTC_MULTIPLE_NODES]
Definition: commands.c:74
m0_pool_nd_state
Definition: pool_machine.h:57
M0_INTERNAL void m0_fd_tree_destroy(struct m0_fd_tree *tree)
Definition: fd.c:785
struct m0_conf_obj * m0_conf_obj_grandparent(const struct m0_conf_obj *obj)
Definition: obj.c:384
uint32_t pc_nr_devices
Definition: pool.h:196
static void pool_version__layouts_evict(struct m0_pool_version *pv, struct m0_layout_domain *ldom)
Definition: pool.c:1308
#define m0_streq(a, b)
Definition: string.h:34
struct m0_fd_tile pv_fd_tile
Definition: pool.h:136
M0_INTERNAL int m0_pool_version_init(struct m0_pool_version *pv, const struct m0_fid *id, struct m0_pool *pool, uint32_t pool_width, uint32_t nr_nodes, uint32_t nr_data, uint32_t nr_failures, uint32_t nr_spare)
Definition: pool.c:522
m0_ha_obj_state
Definition: note.h:119
struct m0_tl rh_rpc_machines
Definition: reqh.h:135
static void pool_device_state_last_revert(struct m0_pools_common *pc, struct m0_fid *dev_fid, struct m0_poolmach *pm_stop)
Definition: pool.c:2043
M0_INTERNAL void m0_pool_versions_fini(struct m0_pool *pool)
Definition: pool.c:801
struct m0_conf_cache cc_cache
Definition: confc.h:394
M0_INTERNAL struct m0_reqh * m0_conf_obj2reqh(const struct m0_conf_obj *obj)
Definition: helpers.c:351
static void pools_common__ctx_subscribe_or_abandon(struct m0_pools_common *pc)
Definition: pool.c:1223
int layout_id
Definition: dir.c:331
M0_INTERNAL void m0_pools_service_ctx_destroy(struct m0_pools_common *pc)
Definition: pool.c:1617
M0_INTERNAL bool m0_disk_is_of_type(const struct m0_conf_obj *obj, uint64_t svc_types)
Definition: helpers.c:418
M0_INTERNAL int m0_reqh_mdpool_layout_build(struct m0_reqh *reqh)
Definition: reqh.c:145
static bool obj_is_enclosurev(const struct m0_conf_obj *obj)
Definition: pool.c:387
M0_INTERNAL void m0_mutex_init(struct m0_mutex *mutex)
Definition: mutex.c:35
M0_INTERNAL int m0_layout_init_by_pver(struct m0_layout_domain *dom, struct m0_pool_version *pv, int *count)
Definition: layout_pver.c:118
M0_INTERNAL int m0_conf_obj_find_lock(struct m0_conf_cache *cache, const struct m0_fid *id, struct m0_conf_obj **out)
Definition: obj_ops.c:154
#define M0_POST(cond)
uint32_t rt_mdredundancy
Definition: obj.h:405
struct m0_chan co_ha_chan
Definition: obj.h:248
struct m0_pools_common * pv_pc
Definition: pool.h:130
bool pv_is_stale
Definition: pool.h:119
const struct m0_conf_obj_type M0_CONF_DRIVE_TYPE
Definition: drive.c:108
Definition: reqh.h:94
struct m0_layout_domain rh_ldom
Definition: reqh.h:153
M0_INTERNAL void m0_pools_fini(void)
Definition: pool.c:283
M0_INTERNAL void m0_clink_cleanup(struct m0_clink *link)
Definition: chan.c:310
M0_INTERNAL void m0_reqh_service_ctx_unsubscribe(struct m0_reqh_service_ctx *ctx)
Definition: reqh_service.c:702
struct m0_fid pd_id
Definition: pool.h:428
struct m0_fid rt_mdpool
Definition: obj.h:394
static int pools_common__update_by_conf(struct m0_pools_common *pc)
Definition: pool.c:1333
Definition: chan.h:229
const struct m0_pver_policy_type_ops * ppt_ops
Definition: policy.h:60
enum m0_conf_service_type cs_type
Definition: obj.h:598
#define M0_CONF_CAST(ptr, type)
Definition: obj.h:780
struct m0_conf_sdev * ck_sdev
Definition: obj.h:710
M0_INTERNAL bool m0_confc_is_inited(const struct m0_confc *confc)
Definition: confc.c:448
M0_INTERNAL void m0_pooldev_clink_add(struct m0_clink *link, struct m0_chan *chan)
Definition: pool.c:1999
struct m0_pool_device_to_service * pc_dev2svc
Definition: pool.h:207
static struct m0_clink clink[RDWR_REQUEST_MAX]
struct m0_confc * pc_confc
Definition: pool.h:164
static bool pools_common_invariant(const struct m0_pools_common *pc)
Definition: pool.c:330
M0_INTERNAL void m0_poolnode_clink_add(struct m0_clink *link, struct m0_chan *chan)
Definition: pool.c:1977
#define m0_confc_open_sync(result, origin,...)
Definition: confc.h:707
struct m0_tl pc_svc_ctxs
Definition: pool.h:172
static int pool_device_index(const struct m0_poolmach *pm, const struct m0_fid *fid)
Definition: pool.c:2034
static struct fdmi_ctx ctx
Definition: main.c:80
struct m0_fid pds_sdev_fid
Definition: pool.h:77
#define FID_P(f)
Definition: fid.h:77
static int _nodes_count(struct m0_conf_pver *pver, uint32_t *nodes)
Definition: pool.c:716
M0_INTERNAL int m0_poolmach_init(struct m0_poolmach *pm, struct m0_pool_version *pver, uint32_t nr_nodes, uint32_t nr_devices, uint32_t nr_spare, uint32_t max_node_failures, uint32_t max_device_failures)
Definition: pool_machine.c:380
struct m0_uint128 pa_seed
Definition: pdclust.h:121
M0_INTERNAL struct m0_reqh * m0_confc2reqh(const struct m0_confc *confc)
Definition: helpers.c:342
M0_INTERNAL void m0_pool_version_fini(struct m0_pool_version *pv)
Definition: pool.c:786
static struct m0_pool pool
Definition: iter_ut.c:58
struct m0_reqh_service_ctx * pds_ctx
Definition: pool.h:74
struct m0_rpc_machine * pc_rmach
Definition: pool.h:166
struct m0_conf_node * ce_node
Definition: obj.h:682
M0_INTERNAL bool m0_fid_eq(const struct m0_fid *fid0, const struct m0_fid *fid1)
Definition: fid.c:164
#define m0_forall(var, nr,...)
Definition: misc.h:112
void m0_clink_add_lock(struct m0_chan *chan, struct m0_clink *link)
Definition: chan.c:255
M0_INTERNAL struct m0_conf_obj * m0_conf_diter_result(const struct m0_conf_diter *it)
Definition: diter.c:576
struct m0_conf_obj pc_obj
Definition: obj.h:581
#define m0_conf_diter_init(iter, confc, origin,...)
Definition: diter.h:235
M0_INTERNAL uint32_t m0_ha2pm_state_map(enum m0_ha_obj_state hastate)
Definition: pool.c:1905
static struct m0_fid pv_fid
Definition: iter_ut.c:62
struct m0_tl pc_abandoned_svc_ctxs
Definition: pool.h:228
M0_INTERNAL int64_t m0_ref_read(const struct m0_ref *ref)
Definition: refs.c:44
static int dix_pool_version_get_locked(struct m0_pools_common *pc, struct m0_pool_version **pv)
Definition: pool.c:675
struct m0_reqh reqh
Definition: rm_foms.c:48
#define M0_MAGIX_OFFSET(type, field)
Definition: misc.h:356
static struct m0_fid profile
Definition: rconfc.c:49
M0_INTERNAL int m0_pools_common_init(struct m0_pools_common *pc, struct m0_rpc_machine *rmach)
Definition: pool.c:1425
M0_INTERNAL void m0_pools_lock(struct m0_pools_common *pc)
Definition: pool.c:2105
m0_ha_entrypoint_client_state
Definition: entrypoint.h:86
const int m0_lid_to_unit_map_nr
Definition: layout_pver.c:116
#define M0_CNT_INC(cnt)
Definition: arith.h:226
static struct m0_chan chan[RDWR_REQUEST_MAX]
#define M0_FI_ENABLED(tag)
Definition: finject.h:231
Definition: fid.h:38
const struct m0_conf_obj_type M0_CONF_NODE_TYPE
Definition: node.c:128
M0_INTERNAL struct m0_pool * m0_pool_find(struct m0_pools_common *pc, const struct m0_fid *pool)
Definition: pool.c:257
static void pool__layouts_evict(struct m0_pool *pool, struct m0_layout_domain *ldom)
Definition: pool.c:1296
M0_INTERNAL int m0_conf_pver_find_by_fid(const struct m0_fid *fid, const struct m0_conf_root *root, struct m0_conf_pver **out)
Definition: pvers.c:219
static int service_ctxs_create(struct m0_pools_common *pc, bool service_connect)
Definition: pool.c:985
struct m0_chan rh_conf_cache_exp
Definition: reqh.h:194
#define M0_ALLOC_PTR(ptr)
Definition: memory.h:86
struct m0_conf_obj cn_obj
Definition: obj.h:562
M0_INTERNAL int m0_pool_device_state_update(struct m0_reqh *reqh, struct m0_be_tx *tx, struct m0_fid *dev_fid, enum m0_pool_nd_state new_state)
Definition: pool.c:2068
M0_INTERNAL int m0_pools_service_ctx_create(struct m0_pools_common *pc)
Definition: pool.c:1535
static const char * local_ep(const struct m0_cm *cm)
Definition: cm_utils.c:397
M0_INTERNAL struct m0_pool_version * m0_pool_version_dix_get(const struct m0_pools_common *pc)
Definition: pool.c:833
struct m0_conf_obj cs_obj
Definition: obj.h:595
static int pool_version_get_locked(struct m0_pools_common *pc, const struct m0_fid *pool, struct m0_pool_version **pv)
Definition: pool.c:631
static bool reqh_svc_ctx_is_in_pools(struct m0_pools_common *pc, struct m0_conf_service *cs)
Definition: pool.c:880
M0_INTERNAL void m0_conf_diter_fini(struct m0_conf_diter *it)
Definition: diter.c:313
struct m0_pver_policy * po_pver_policy
Definition: pool.h:96
struct m0_be_clink pn_clink
Definition: pool.h:404
uint32_t pa_P
Definition: pdclust.h:115
static struct m0_net_test_service svc
Definition: service.c:34
static int _pool_create(struct m0_pool **out, const struct m0_conf_pool *conf_pool)
Definition: pool.c:1767
struct m0_conf_obj sd_obj
Definition: obj.h:616
M0_INTERNAL void m0_pools_unlock(struct m0_pools_common *pc)
Definition: pool.c:2110
struct m0_tl pc_pools
Definition: pool.h:162
static void reqh_service_ctx_abandon(struct m0_reqh_service_ctx *ctx)
Definition: pool.c:1179
uint32_t pv_nr_nodes
Definition: pool.h:125
#define _0C(exp)
Definition: assert.h:311
M0_INTERNAL void m0_mutex_fini(struct m0_mutex *mutex)
Definition: mutex.c:42
M0_INTERNAL void m0_clink_fini(struct m0_clink *link)
Definition: chan.c:208
static bool reqh_service_ctx_is_matching(const struct m0_reqh_service_ctx *ctx, const struct m0_conf_obj *svc)
Definition: pool.c:1161
static int pools_common__dev2ios_build(struct m0_pools_common *pc)
Definition: pool.c:1408
struct m0_pools_common * rh_pools
Definition: reqh.h:118
uint32_t psu_device_index
Definition: pool.h:480
M0_INTERNAL void m0_poolmach_fini(struct m0_poolmach *pm)
Definition: pool_machine.c:426
static bool node_poolmach_state_update_cb(struct m0_clink *cl)
Definition: pool.c:1920
#define IS_IN_ARRAY(idx, array)
Definition: misc.h:311
M0_INTERNAL struct m0_chan * m0_ha_entrypoint_client_chan(struct m0_ha_entrypoint_client *ecl)
Definition: entrypoint.c:823
static int active_rm_ctx_create(struct m0_pools_common *pc, bool service_connect)
Definition: pool.c:955
M0_INTERNAL struct m0_confc * m0_confc_from_obj(const struct m0_conf_obj *obj)
Definition: confc.c:592
struct m0_fid po_id
Definition: pool.h:81
static bool obj_is_service(const struct m0_conf_obj *obj)
Definition: pool.c:341
M0_INTERNAL void m0_confc_close(struct m0_conf_obj *obj)
Definition: confc.c:921
static struct m0_reqh_service_ctx * service_ctx_find_by_type(const struct m0_pools_common *pc, enum m0_conf_service_type type)
Definition: pool.c:1087
Definition: dtm.h:529
struct m0_clink pc_conf_ready_async
Definition: pool.h:235
M0_INTERNAL void m0_fd_tile_destroy(struct m0_fd_tile *tile)
Definition: fd.c:590
#define M0_ASSERT_INFO(cond, fmt,...)
M0_INTERNAL struct m0_rpc_session * m0_pools_common_active_rm_session(struct m0_pools_common *pc)
Definition: pool.c:1074
M0_INTERNAL void m0_layout_instance_fini(struct m0_layout_instance *li)
Definition: layout.c:1123
Definition: pool.h:80
static void dix_pool_setup(struct m0_pools_common *pc, const struct m0_fid *imeta_pver)
Definition: pool.c:1782
M0_INTERNAL bool m0_conf_cache_contains(struct m0_conf_cache *cache, const struct m0_fid *fid)
Definition: cache.c:94
Definition: nucleus.c:42
M0_INTERNAL bool m0_conf_obj_is_pool(const struct m0_conf_obj *obj)
Definition: helpers.c:66
M0_INTERNAL void m0_layout_put(struct m0_layout *l)
Definition: layout.c:893
static void abandoned_svc_ctxs_cleanup(struct m0_pools_common *pc)
Definition: pool.c:1579
#define out(...)
Definition: gen.c:41
M0_INTERNAL void m0_reqh_service_disconnect(struct m0_reqh_service_ctx *ctx)
Definition: reqh_service.c:781
M0_INTERNAL void m0_pooldev_clink_del(struct m0_clink *cl)
Definition: pool.c:1984
int type
Definition: dir.c:1031
M0_INTERNAL int m0_pool_versions_setup(struct m0_pools_common *pc)
Definition: pool.c:1640
M0_INTERNAL struct m0_pool_version * m0_pool_clean_pver_find(const struct m0_pool *pool)
Definition: pool.c:556
#define M0_FID0
Definition: fid.h:93
M0_TL_DEFINE(be_pool, static, struct m0_be_pool_item)
M0_INTERNAL void m0_pool_versions_destroy(struct m0_pools_common *pc)
Definition: pool.c:1876
enum m0_pool_nd_state pe_state
Definition: pool_machine.h:199
static bool is_md_pool(const struct m0_pools_common *pc, const struct m0_pool *pool)
Definition: pool.c:617
struct m0_rpc_link sc_rlink
Definition: reqh_service.h:759
M0_INTERNAL void m0_conf_cache_lock(struct m0_conf_cache *cache)
Definition: cache.c:50
#define m0_tl_find(name, var, head,...)
Definition: tlist.h:757
M0_INTERNAL struct m0_pool_version * m0_pool_version_lookup(const struct m0_pools_common *pc, const struct m0_fid *id)
Definition: pool.c:568
struct m0_mutex pc_mutex
Definition: pool.h:221
#define m0_tl_for(name, head, obj)
Definition: tlist.h:695
void m0_free(void *data)
Definition: memory.c:146
static struct m0_addb2_source * s
Definition: consumer.c:39
M0_INTERNAL bool m0_conf_service_ep_is_known(const struct m0_conf_obj *svc_obj, const char *ep_addr)
Definition: helpers.c:197
M0_INTERNAL int m0_conf_pver_formulaic_from_virtual(const struct m0_conf_pver *virtual, const struct m0_conf_root *root, const struct m0_conf_pver **out)
Definition: pvers.c:244
static int pool_from_virtual_pver(const struct m0_conf_pver *virtual, struct m0_confc *confc, struct m0_conf_pool **out)
Definition: pool.c:1703
struct m0_pdclust_attr pv_attr
Definition: pool.h:122
struct m0_ha_entrypoint_rep ecl_rep
Definition: entrypoint.h:114
int32_t rc
Definition: trigger_fop.h:47
struct m0_pool_spare_usage * pst_spare_usage_array
Definition: pool_machine.h:137
#define ARRAY_SIZE(a)
Definition: misc.h:45
uint32_t pd_index
Definition: pool.h:432
M0_INTERNAL void m0_conf_cache_unlock(struct m0_conf_cache *cache)
Definition: cache.c:55
static void pools_common__md_pool_cleanup(struct m0_pools_common *pc)
Definition: pool.c:1249
struct m0_reqh * rm_reqh
Definition: rpc_machine.h:105
static void service_ctxs_destroy(struct m0_pools_common *pc)
Definition: pool.c:848
struct m0_conf_obj ck_obj
Definition: obj.h:707
struct m0_conf_obj * cv_real
Definition: obj.h:558
#define FID_F
Definition: fid.h:75
struct m0_clink pc_ha_clink
Definition: pool.h:217
Definition: tx.h:280
M0_INTERNAL uint64_t m0_hash(uint64_t x)
Definition: hash.c:279
struct m0_ha_entrypoint_client * pc_ha_ecl
Definition: pool.h:216
M0_INTERNAL void m0_pools_destroy(struct m0_pools_common *pc)
Definition: pool.c:1887
struct m0_tl po_failed_devices
Definition: pool.h:93
static int mds_map_fill(struct m0_pools_common *pc, const struct m0_conf_enclosure *encl)
Definition: pool.c:356
M0_INTERNAL void m0_poolmach_state_last_cancel(struct m0_poolmach *pm)
Definition: pool_machine.c:796