Motr  M0
pool_machine.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015-2020 Seagate Technology LLC and/or its Affiliates
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *
16  * For any questions about this software or licensing,
17  * please email opensource@seagate.com or cortx-questions@seagate.com.
18  *
19  */
20 
21 
22 #define M0_TRACE_SUBSYSTEM M0_TRACE_SUBSYS_POOL
23 #include "lib/trace.h"
24 
25 #include "lib/errno.h"
26 #include "lib/memory.h"
27 #include "lib/misc.h"
28 #include "pool/pool.h"
29 #include "reqh/reqh.h" /* m0_reqh */
30 #include "conf/confc.h"
31 #include "conf/diter.h" /* m0_conf_diter_init */
32 #include "conf/obj_ops.h" /* m0_conf_obj_get_lock */
33 #include "conf/helpers.h" /* m0_confc2reqh */
34 #include "ioservice/fid_convert.h" /* m0_fid_convert_gob2cob */
35 #include "ha/failvec.h" /* m0_ha_failvec_fetch */
36 #include "lib/finject.h" /* M0_FI_ENABLED */
37 
43 M0_TL_DESCR_DEFINE(poolmach_events, "pool machine events list", M0_INTERNAL,
44  struct m0_poolmach_event_link, pel_linkage, pel_magic,
46 
47 M0_TL_DEFINE(poolmach_events, M0_INTERNAL, struct m0_poolmach_event_link);
48 
50  /* An event to be queued. */
52  /* Likage into queue. m0_poolmach_state::pst_event_queue */
54  uint64_t pel_magic;
55 };
56 
57 M0_TL_DESCR_DEFINE(poolmach_equeue, "pool machine events queue", static,
58  struct poolmach_equeue_link, pel_linkage, pel_magic,
59  100, 101);
60 
61 M0_TL_DEFINE(poolmach_equeue, static, struct poolmach_equeue_link);
62 
63 static struct m0_clink *poolnode_clink(struct m0_poolnode *pnode)
64 {
65  return &pnode->pn_clink.bc_u.clink;
66 }
67 
68 static struct m0_clink *pooldev_clink(struct m0_pooldev *pdev)
69 {
70  return &pdev->pd_clink.bc_u.clink;
71 }
72 
73 static struct m0_clink *exp_clink(struct m0_poolmach_state *state)
74 {
75  return &state->pst_conf_exp.bc_u.clink;
76 }
77 
78 static struct m0_clink *ready_clink(struct m0_poolmach_state *state)
79 {
80  return &state->pst_conf_ready.bc_u.clink;
81 }
82 
83 static bool is_enclosurev_or_diskv(const struct m0_conf_obj *obj)
84 {
86  M0_IN(m0_conf_obj_type(
87  M0_CONF_CAST(obj, m0_conf_objv)->cv_real),
89 }
90 
92  const struct m0_conf_obj *objv_real,
93  uint32_t *idx_nodes,
94  uint32_t *idx_devices)
95 {
96  int rc = 0;
97 
98  M0_ENTRY(FID_F, FID_P(&objv_real->co_id));
99 
100  if (m0_conf_obj_type(objv_real) == &M0_CONF_ENCLOSURE_TYPE) {
101  struct m0_conf_enclosure *e;
102  struct m0_conf_node *n;
103  struct m0_poolmach_event pme;
104  struct m0_poolnode *pnode;
105 
106  if (*idx_devices != 0)
107  M0_CNT_INC(*idx_nodes);
108 
109  pnode = &st->pst_nodes_array[*idx_nodes];
110 
111  e = M0_CONF_CAST(objv_real, m0_conf_enclosure);
112  n = e->ce_node;
113  pnode->pn_id = n->cn_obj.co_id;
114  pnode->pn_index = *idx_nodes;
115 
116  m0_conf_obj_get_lock(&n->cn_obj);
118  &n->cn_obj.co_ha_chan);
119 
120  pme.pe_type = M0_POOL_NODE;
121  pme.pe_index = *idx_nodes;
122  pme.pe_state = m0_ha2pm_state_map(n->cn_obj.co_ha_state);
123 
124  M0_LOG(M0_DEBUG, "node:"FID_F"index:%d state:%d",
125  FID_P(&pnode->pn_id), pnode->pn_index,
126  pme.pe_state);
127  rc = m0_poolmach_state_transit(pnode->pn_pm, &pme);
128  } else if (m0_conf_obj_type(objv_real) == &M0_CONF_DRIVE_TYPE) {
129  struct m0_conf_drive *d;
130  struct m0_poolmach_event pme;
131  struct m0_pooldev *pdev =
132  &st->pst_devices_array[*idx_devices];
133 
134  d = M0_CONF_CAST(objv_real, m0_conf_drive);
135  pdev->pd_id = d->ck_obj.co_id;
136  pdev->pd_sdev_idx = d->ck_sdev->sd_dev_idx;
137  pdev->pd_index = *idx_devices;
138  pdev->pd_node = &st->pst_nodes_array[*idx_nodes];
141  &d->ck_obj.co_ha_chan);
142 
143  pme.pe_type = M0_POOL_DEVICE;
144  pme.pe_index = *idx_devices;
146 
147  M0_LOG(M0_DEBUG, "device:"FID_F"index:%d dev_idx:%d state:%d",
148  FID_P(&pdev->pd_id), pdev->pd_index,
149  pdev->pd_sdev_idx,
150  pme.pe_state);
151  rc = m0_poolmach_state_transit(pdev->pd_pm, &pme);
152 
153  M0_CNT_INC(*idx_devices);
154  } else {
155  M0_IMPOSSIBLE("Invalid conf_obj type");
156  }
157  return M0_RC(rc);
158 }
159 
161 {
162  struct m0_pooldev *dev;
163  struct m0_poolnode *node;
164  struct m0_clink *cl;
165  struct m0_conf_obj *obj;
166  struct m0_poolmach_state *state =
169  int i;
170 
171  M0_ENTRY();
172  for (i = 0; i < state->pst_nr_devices; ++i) {
173  dev = &state->pst_devices_array[i];
174  cl = pooldev_clink(dev);
175  if (cl->cl_chan == NULL)
176  continue;
177  obj = container_of(cl->cl_chan, struct m0_conf_obj,
178  co_ha_chan);
180  M0_LOG(M0_INFO, "obj "FID_F, FID_P(&obj->co_id));
183  M0_SET0(cl);
184  }
185 
186  for (i = 0; i < state->pst_nr_nodes; ++i) {
187  node = &state->pst_nodes_array[i];
188  cl = poolnode_clink(node);
189  if (cl->cl_chan == NULL)
190  continue;
191  obj = container_of(cl->cl_chan, struct m0_conf_obj,
192  co_ha_chan);
194  M0_LOG(M0_INFO, "obj "FID_F, FID_P(&obj->co_id));
197  M0_SET0(cl);
198  }
199  M0_LEAVE();
200  return true;
201 }
202 
204 {
205  struct m0_pooldev *dev;
206  struct m0_poolmach_state *state =
210  struct m0_reqh,
213  struct m0_conf_obj *obj;
214  int i;
215 
216  M0_ENTRY();
217  /*
218  * TODO: the code should process any updates in the configuration tree.
219  * Currently it expects that an interested object wasn't removed from
220  * the tree or new object (m0_conf_sdev) was added.
221  */
222  for (i = 0; i < state->pst_nr_devices; ++i) {
223  dev = &state->pst_devices_array[i];
225  M0_ASSERT_INFO(_0C(obj != NULL) &&
227  "dev->pd_id "FID_F,
228  FID_P(&dev->pd_id));
229  m0_pooldev_clink_add(pooldev_clink(dev), &obj->co_ha_chan);
231  }
232  M0_LEAVE();
233  return true;
234 }
235 
236 M0_INTERNAL int m0_poolmach_init_by_conf(struct m0_poolmach *pm,
237  struct m0_conf_pver *pver)
238 {
239  struct m0_confc *confc;
240  struct m0_reqh *reqh;
241  struct m0_conf_diter it;
242  uint32_t idx_nodes = 0;
243  uint32_t idx_devices = 0;
244  int rc;
245 
246  M0_ENTRY(FID_F, FID_P(&pver->pv_obj.co_id));
247  confc = m0_confc_from_obj(&pver->pv_obj);
249  rc = m0_conf_diter_init(&it, confc, &pver->pv_obj,
250  M0_CONF_PVER_SITEVS_FID,
251  M0_CONF_SITEV_RACKVS_FID,
252  M0_CONF_RACKV_ENCLVS_FID,
253  M0_CONF_ENCLV_CTRLVS_FID,
254  M0_CONF_CTRLV_DRIVEVS_FID);
255  if (rc != 0)
256  return M0_ERR(rc);
257 
259  M0_CONF_DIRNEXT) {
262  m0_conf_objv)->cv_real,
263  &idx_nodes, &idx_devices);
264  if (rc != 0)
265  break;
266  }
267 
269  if (rc != 0)
270  return M0_RC(rc);
271 
275  exp_clink(pm->pm_state));
277  ready_clink(pm->pm_state));
278  M0_LOG(M0_DEBUG, "nodes:%d devices: %d", idx_nodes, idx_devices);
279  M0_POST(idx_devices <= pm->pm_state->pst_nr_devices);
280  return M0_RC(rc);
281 }
282 
283 static void state_init(struct m0_poolmach_state *state,
284  struct m0_poolnode *nodes_array,
285  uint32_t nr_nodes,
286  struct m0_pooldev *devices_array,
287  uint32_t nr_devices,
288  struct m0_pool_spare_usage *spare_usage_array,
289  uint32_t nr_spare,
290  uint32_t max_node_failures,
291  uint32_t max_device_failures,
292  struct m0_poolmach *pm)
293 {
294  int i;
295 
296  M0_ASSERT(state != NULL);
297  M0_ASSERT(nodes_array != NULL);
298  M0_ASSERT(devices_array != NULL);
299  M0_ASSERT(spare_usage_array != NULL);
300 
301  state->pst_nodes_array = nodes_array;
302  state->pst_devices_array = devices_array;
303  state->pst_spare_usage_array = spare_usage_array;
304  state->pst_nr_nodes = nr_nodes;
305  state->pst_nr_devices = nr_devices;
306  state->pst_max_node_failures = max_node_failures;
307  state->pst_max_device_failures = max_device_failures;
308  state->pst_nr_failures = 0;
309  state->pst_nr_spares = nr_spare;
310 
311  for (i = 0; i < state->pst_nr_nodes; i++) {
313  &(struct m0_format_tag){
314  .ot_version = M0_POOLNODE_FORMAT_VERSION,
315  .ot_type = M0_FORMAT_TYPE_POOLNODE,
316  .ot_footer_offset =
317  offsetof(struct m0_poolnode, pn_footer)
318  });
319  state->pst_nodes_array[i].pn_state = M0_PNDS_ONLINE;
320  M0_SET0(&state->pst_nodes_array[i].pn_id);
321  state->pst_nodes_array[i].pn_index = i;
322  state->pst_nodes_array[i].pn_pm = pm;
324  }
325 
326  for (i = 0; i < state->pst_nr_devices; i++) {
328  &(struct m0_format_tag){
329  .ot_version = M0_POOLDEV_FORMAT_VERSION,
330  .ot_type = M0_FORMAT_TYPE_POOLDEV,
331  .ot_footer_offset =
332  offsetof(struct m0_pooldev, pd_footer)
333  });
334  state->pst_devices_array[i].pd_state = M0_PNDS_UNKNOWN;
335  M0_SET0(&state->pst_devices_array[i].pd_id);
336  state->pst_devices_array[i].pd_node = NULL;
337  state->pst_devices_array[i].pd_sdev_idx = 0;
338  state->pst_devices_array[i].pd_index = i;
339  state->pst_devices_array[i].pd_pm = pm;
340  pool_failed_devs_tlink_init(&state->pst_devices_array[i]);
342  }
343 
344  for (i = 0; i < state->pst_nr_spares; i++) {
347  &(struct m0_format_tag){
348  .ot_version =
349  M0_POOL_SPARE_USAGE_FORMAT_VERSION,
350  .ot_type = M0_FORMAT_TYPE_POOL_SPARE_USAGE,
351  .ot_footer_offset =
352  offsetof(struct m0_pool_spare_usage, psu_footer)
353  });
357  }
358 
359  poolmach_events_tlist_init(&state->pst_events_list);
360  poolmach_equeue_tlist_init(&state->pst_event_queue);
361  /* Gets initialised only after init by configuration. */
362  state->pst_su_initialised = false;
363 }
364 
365 static void poolmach_init(struct m0_poolmach *pm,
366  struct m0_pool_version *pver,
367  struct m0_poolmach_state *pm_state)
368 {
369  M0_ENTRY();
371 
372  M0_SET0(pm);
373  m0_rwlock_init(&pm->pm_lock);
374  pm->pm_state = pm_state;
375  pm->pm_is_initialised = true;
376  pm->pm_pver = pver;
377  M0_LEAVE();
378 }
379 
380 M0_INTERNAL int m0_poolmach_init(struct m0_poolmach *pm,
381  struct m0_pool_version *pver,
382  uint32_t nr_nodes,
383  uint32_t nr_devices,
384  uint32_t nr_spare,
385  uint32_t max_node_failures,
386  uint32_t max_device_failures)
387 {
388  struct m0_poolmach_state *state;
389  struct m0_poolnode *nodes_array;
390  struct m0_pooldev *devices_array;
391  struct m0_pool_spare_usage *spare_usage_array;
392 
393  M0_ALLOC_PTR(state);
394  M0_ALLOC_ARR(nodes_array, nr_nodes);
395  M0_ALLOC_ARR(devices_array, nr_devices);
396  M0_ALLOC_ARR(spare_usage_array, nr_spare);
397  if (M0_IN(NULL,
398  (state, nodes_array, devices_array, spare_usage_array))) {
399  m0_free(state);
400  m0_free(nodes_array);
401  m0_free(devices_array);
402  m0_free(spare_usage_array);
403  return M0_ERR(-ENOMEM);
404  }
405  state_init(state, nodes_array, nr_nodes, devices_array,
406  nr_devices, spare_usage_array, nr_spare,
407  max_node_failures, max_device_failures, pm);
408  poolmach_init(pm, pver, state);
409  return M0_RC(0);
410 }
411 
412 static inline void pool_obj_clink_fini(struct m0_clink *cl)
413 {
414  struct m0_conf_obj *obj;
415 
416  if (cl->cl_chan != NULL) {
417  obj = container_of(cl->cl_chan, struct m0_conf_obj,
418  co_ha_chan);
421  M0_SET0(cl);
423  }
424 }
425 
426 M0_INTERNAL void m0_poolmach_fini(struct m0_poolmach *pm)
427 {
429  struct m0_poolmach_state *state = pm->pm_state;
430  struct m0_pooldev *pd;
431  struct m0_clink *cl;
432  int i;
433 
434  M0_PRE(pm != NULL);
435 
437 
438  m0_tl_for(poolmach_events, &state->pst_events_list, scan) {
439  poolmach_events_tlink_del_fini(scan);
440  m0_free(scan);
441  } m0_tl_endfor;
442 
443  for (i = 0; i < state->pst_nr_devices; ++i) {
444  cl = pooldev_clink(&state->pst_devices_array[i]);
446  pd = &state->pst_devices_array[i];
447  if (pool_failed_devs_tlink_is_in(pd))
448  pool_failed_devs_tlink_del_fini(pd);
449  }
450 
451  for (i = 0; i < state->pst_nr_nodes; ++i) {
452  cl = poolnode_clink(&state->pst_nodes_array[i]);
454  }
455 
456  if (!M0_FI_ENABLED("poolmach_init_by_conf_skipped")) {
457  m0_clink_cleanup(exp_clink(state));
458  m0_clink_fini(exp_clink(state));
460  m0_clink_fini(ready_clink(state));
461  }
463  m0_free(state->pst_devices_array);
464  m0_free(state->pst_nodes_array);
465  m0_free0(&pm->pm_state);
466 
468 
469  pm->pm_is_initialised = false;
470  m0_rwlock_fini(&pm->pm_lock);
471 }
472 
473 static bool disk_is_in(struct m0_tl *head, struct m0_pooldev *pd)
474 {
475  return m0_tl_exists(pool_failed_devs, d, head,
476  m0_fid_eq(&d->pd_id, &pd->pd_id));
477 }
478 
479 static int poolmach_equeue_add(struct m0_poolmach *pm,
480  const struct m0_poolmach_event *event)
481 {
482  struct poolmach_equeue_link *new_link;
483  struct m0_poolmach_state *state;
484  int rc;
485 
487  M0_ALLOC_PTR(new_link);
488  if (new_link == NULL) {
489  rc = -ENOMEM;
490  } else {
491  new_link->pel_event = *event;
492  state = pm->pm_state;
493  poolmach_equeue_tlink_init_at_tail(new_link,
494  &state->pst_event_queue);
495  rc = 0;
496  }
498  return M0_RC(rc);
499 }
500 
501 M0_INTERNAL uint32_t m0_poolmach_equeue_length(struct m0_poolmach *pm)
502 {
503  return m0_tlist_length(&poolmach_equeue_tl,
504  &pm->pm_state->pst_event_queue);
505 }
506 
507 static void spare_usage_arr_update(struct m0_poolmach *pm,
508  const struct m0_poolmach_event *event)
509 {
510  struct m0_poolmach_state *state;
511  struct m0_pool_spare_usage *spare_array;
512  uint32_t i;
513 
514  state = pm->pm_state;
515  spare_array = state->pst_spare_usage_array;
516  /* alloc a sns repare spare slot */
517  for (i = 0; i < state->pst_nr_spares; ++i) {
518  if (spare_array[i].psu_device_index ==
520  spare_array[i].psu_device_index = event->pe_index;
521  spare_array[i].psu_device_state = event->pe_state;
522  break;
523  }
524  }
525  if (state->pst_nr_spares == 0 || (i == state->pst_nr_spares &&
526  i > 0 /* i == 0 in case of mdpool */)) {
527  M0_LOG(M0_ERROR, FID_F": No free spare space slot is found,"
528  " this pool version is in DUD state;"
529  " event_index=%d event_state=%d",
530  FID_P(&pm->pm_pver->pv_id),
531  event->pe_index, event->pe_state);
532  /* TODO add ADDB error message here */
533  }
534 }
535 
536 
554 M0_INTERNAL int m0_poolmach_state_transit(struct m0_poolmach *pm,
555  const struct m0_poolmach_event *event)
556 {
557  struct m0_poolmach_state *state;
558  struct m0_pool_spare_usage *spare_array;
559  struct m0_poolmach_event_link event_link;
560  struct m0_pool *pool;
561  struct m0_pooldev *pd;
562  enum m0_pool_nd_state old_state = M0_PNDS_FAILED;
563  uint32_t i;
564  int rc = 0;
565  struct m0_poolmach_event_link *new_link;
566 
567  M0_ENTRY();
568 
569  M0_PRE(pm != NULL);
570  M0_PRE(event != NULL);
571 
572  M0_SET0(&event_link);
573  state = pm->pm_state;
574  pool = pm->pm_pver->pv_pool;
575 
576  if (!M0_IN(event->pe_type, (M0_POOL_NODE, M0_POOL_DEVICE)))
577  return M0_ERR(-EINVAL);
578 
579  if (!M0_IN(event->pe_state, (M0_PNDS_ONLINE,
585  return M0_ERR(-EINVAL);
586 
587  if ((event->pe_type == M0_POOL_NODE &&
588  event->pe_index >= state->pst_nr_nodes) ||
589  (event->pe_type == M0_POOL_DEVICE &&
590  event->pe_index >= state->pst_nr_devices))
591  return M0_ERR(-EINVAL);
592 
593  switch (event->pe_type) {
594  case M0_POOL_NODE:
595  old_state = state->pst_nodes_array[event->pe_index].pn_state;
596  break;
597  case M0_POOL_DEVICE:
598  old_state = state->pst_devices_array[event->pe_index].pd_state;
599  if (!state->pst_su_initialised) {
600  /*
601  * Failure vector is yet to be fetched from HA.
602  * Add the event to pending queue.
603  */
604  return M0_RC(poolmach_equeue_add(pm, event));
605  }
606  break;
607  default:
608  return M0_ERR(-EINVAL);
609  }
610 
611  if (old_state == event->pe_state)
612  return M0_RC(0);
613 
614  switch (old_state) {
615  case M0_PNDS_UNKNOWN:
616  /*
617  * First state transition could be to any of the
618  * available states.
619  */
620  break;
621  case M0_PNDS_ONLINE:
622  if (!M0_IN(event->pe_state, (M0_PNDS_OFFLINE, M0_PNDS_FAILED)))
623  return M0_ERR(-EINVAL);
624  break;
625  case M0_PNDS_OFFLINE:
626  if (!M0_IN(event->pe_state, (M0_PNDS_ONLINE,
627  M0_PNDS_FAILED)))
628  return M0_ERR(-EINVAL);
629  break;
630  case M0_PNDS_FAILED:
631  if (event->pe_state != M0_PNDS_SNS_REPAIRING)
632  return M0_ERR(-EINVAL);
633  break;
635  if (!M0_IN(event->pe_state, (M0_PNDS_SNS_REPAIRED,
636  M0_PNDS_FAILED)))
637  return M0_ERR(-EINVAL);
638  break;
640  if (event->pe_state != M0_PNDS_SNS_REBALANCING)
641  return M0_ERR(-EINVAL);
642  break;
644  if (!M0_IN(event->pe_state, (M0_PNDS_ONLINE,
646  return M0_ERR(-EINVAL);
647  break;
648  default:
649  return M0_ERR(-EINVAL);
650  }
651  /* Step 1: lock the poolmach */
653 
654  /* Step 2: Update the state according to event */
655  event_link.pel_event = *event;
656  if (event->pe_type == M0_POOL_NODE) {
662  state->pst_nodes_array[event->pe_index].pn_state =
663  event->pe_state;
664  } else if (event->pe_type == M0_POOL_DEVICE) {
665  state->pst_devices_array[event->pe_index].pd_state =
666  event->pe_state;
667  }
668 
669  /* Step 4: Alloc or free a spare slot if necessary.*/
670  if (event->pe_type == M0_POOL_DEVICE) {
671  spare_array = state->pst_spare_usage_array;
672  pd = &state->pst_devices_array[event->pe_index];
673  switch (event->pe_state) {
674  case M0_PNDS_ONLINE:
675  /* clear spare slot usage if it is from rebalancing */
676  for (i = 0; i < state->pst_nr_spares; i++) {
677  if (spare_array[i].psu_device_index ==
678  event->pe_index) {
679  M0_ASSERT(M0_IN(spare_array[i].psu_device_state,
682  spare_array[i].psu_device_index =
684  break;
685  }
686  }
687  if (old_state == M0_PNDS_UNKNOWN) {
688  M0_ASSERT(!pool_failed_devs_tlink_is_in(pd));
689  break;
690  }
691  M0_ASSERT(M0_IN(old_state, (M0_PNDS_OFFLINE,
693  M0_CNT_DEC(state->pst_nr_failures);
694  if (pool_failed_devs_tlink_is_in(pd))
695  pool_failed_devs_tlist_del(pd);
696  pool_failed_devs_tlink_fini(pd);
697  break;
698  case M0_PNDS_OFFLINE:
699  M0_CNT_INC(state->pst_nr_failures);
700  M0_ASSERT(!pool_failed_devs_tlink_is_in(pd));
701  break;
702  case M0_PNDS_FAILED:
703  /*
704  * Alloc a sns repair spare slot only once for
705  * M0_PNDS_ONLINE->M0_PNDS_FAILED or
706  * M0_PNDS_OFFLINE->M0_PNDS_FAILED transition.
707  */
708  if (M0_IN(old_state, (M0_PNDS_UNKNOWN, M0_PNDS_ONLINE,
709  M0_PNDS_OFFLINE)))
710  spare_usage_arr_update(pm, event);
711  if (!M0_IN(old_state, (M0_PNDS_OFFLINE,
713  M0_CNT_INC(state->pst_nr_failures);
714  if (!pool_failed_devs_tlink_is_in(pd) &&
716  pool_failed_devs_tlist_add_tail(
717  &pool->po_failed_devices, pd);
718  break;
722  /* change the repair spare slot usage */
723  for (i = 0; i < state->pst_nr_spares; i++) {
724  if (spare_array[i].psu_device_index ==
725  event->pe_index) {
726  spare_array[i].psu_device_state =
727  event->pe_state;
728  break;
729  }
730  }
731 
732  /* i == 0 in case of mdpool */
733  if (state->pst_nr_spares == 0 ||
734  (i == state->pst_nr_spares && i > 0 ))
735  M0_LOG(M0_ERROR, FID_F": This pool is in "
736  "DUD state; event_index=%d event_state=%d",
737  FID_P(&pm->pm_pver->pv_id),
738  event->pe_index, event->pe_state);
739 
740  /* must be found */
741  if (!pool_failed_devs_tlink_is_in(pd) &&
743  M0_CNT_INC(state->pst_nr_failures);
744  pool_failed_devs_tlist_add_tail(
745  &pool->po_failed_devices, pd);
746  }
747  break;
748  default:
749  /* Do nothing */
750  ;
751  }
752  }
753 
754  M0_ALLOC_PTR(new_link);
755  if (new_link == NULL) {
756  rc = M0_ERR(-ENOMEM);
757  } else {
758  *new_link = event_link;
759  poolmach_events_tlink_init_at_tail(new_link,
760  &state->pst_events_list);
761  }
763  if (event->pe_type == M0_POOL_DEVICE &&
764  state->pst_nr_failures > state->pst_max_device_failures &&
765  state->pst_max_device_failures > 0) { /* Skip mdpool */
766  if (state->pst_nr_failures > state->pst_max_device_failures + 10)
767  M0_LOG(M0_INFO, FID_F": nr_failures:%d max_failures:%d"
768  " event_index:%d event_state:%d"
769  " (a node failure/restart"
770  " or expander reset?)",
771  FID_P(&pm->pm_pver->pv_id),
772  state->pst_nr_failures,
774  event->pe_index,
775  event->pe_state);
776  else
777  M0_LOG(M0_ERROR, FID_F": nr_failures:%d max_failures:%d"
778  " event_index:%d event_state:%d",
779  FID_P(&pm->pm_pver->pv_id),
780  state->pst_nr_failures,
782  event->pe_index,
783  event->pe_state);
785  }
786  pm->pm_pver->pv_is_dirty = state->pst_nr_failures > 0;
787  /* Clear any dirty sns flags set during previous repair */
788  pm->pm_pver->pv_sns_flags = state->pst_nr_failures <=
789  state->pst_max_device_failures ?
790  0 : pm->pm_pver->pv_sns_flags;
791  /* Finally: unlock the poolmach */
793  return M0_RC(rc);
794 }
795 
796 M0_INTERNAL void m0_poolmach_state_last_cancel(struct m0_poolmach *pm)
797 {
798  struct m0_poolmach_state *state;
799  struct m0_poolmach_event_link *link;
800 
801  M0_PRE(pm != NULL);
802 
803  state = pm->pm_state;
804 
806 
807  link = poolmach_events_tlist_tail(&state->pst_events_list);
808  if (link != NULL) {
809  poolmach_events_tlink_del_fini(link);
810  m0_free(link);
811  }
812 
814 }
815 
816 M0_INTERNAL int m0_poolmach_device_state(struct m0_poolmach *pm,
817  uint32_t device_index,
818  enum m0_pool_nd_state *state_out)
819 {
820  M0_PRE(pm != NULL);
821  M0_PRE(state_out != NULL);
822 
823  if (device_index >= pm->pm_state->pst_nr_devices)
824  return M0_ERR_INFO(-EINVAL, "device index:%d total devices:%d",
825  device_index, pm->pm_state->pst_nr_devices);
826 
828  *state_out = pm->pm_state->pst_devices_array[device_index].pd_state;
830  return 0;
831 }
832 
833 M0_INTERNAL int m0_poolmach_node_state(struct m0_poolmach *pm,
834  uint32_t node_index,
835  enum m0_pool_nd_state *state_out)
836 {
837  M0_PRE(pm != NULL);
838  M0_PRE(state_out != NULL);
839 
840  if (node_index >= pm->pm_state->pst_nr_nodes)
841  return M0_ERR(-EINVAL);
842 
844  *state_out = pm->pm_state->pst_nodes_array[node_index].pn_state;
846 
847  return 0;
848 }
849 
850 M0_INTERNAL int m0_poolmach_device_node_return(struct m0_poolmach *pm,
851  uint32_t device_index,
852  struct m0_poolnode **node_out)
853 {
854  M0_PRE(pm != NULL);
855  M0_PRE(node_out != NULL);
856 
857  if (device_index >= pm->pm_state->pst_nr_devices)
858  return M0_ERR_INFO(-EINVAL, "device index:%d total devices:%d",
859  device_index, pm->pm_state->pst_nr_devices);
860 
861  *node_out = pm->pm_state->pst_devices_array[device_index].pd_node;
862 
863  return 0;
864 }
865 
866 M0_INTERNAL bool
868  uint32_t device_index)
869 {
870  return m0_exists(i, pm->pm_state->pst_nr_spares,
872  device_index));
873 }
874 
876  uint32_t device_index,
877  uint32_t *spare_slot_out)
878 {
879  struct m0_pool_spare_usage *spare_usage_array;
880  enum m0_pool_nd_state device_state;
881  uint32_t i;
882  int rc;
883 
884  M0_PRE(pm != NULL);
885  M0_PRE(spare_slot_out != NULL);
886 
887  if (device_index >= pm->pm_state->pst_nr_devices)
888  return M0_ERR(-EINVAL);
889 
890  rc = -ENOENT;
892  device_state = pm->pm_state->pst_devices_array[device_index].pd_state;
893  if (!M0_IN(device_state, (M0_PNDS_FAILED, M0_PNDS_SNS_REPAIRING,
896  goto out;
897  }
898 
899  spare_usage_array = pm->pm_state->pst_spare_usage_array;
900  for (i = 0; i < pm->pm_state->pst_nr_spares; i++) {
901  if (spare_usage_array[i].psu_device_index == device_index) {
902  M0_ASSERT(M0_IN(spare_usage_array[i].psu_device_state,
907  *spare_slot_out = i;
908  rc = 0;
909  break;
910  }
911  }
912 out:
914 
915  return M0_RC(rc);
916 }
917 
918 M0_INTERNAL bool
920  uint32_t spare_slot,
921  bool check_state)
922 {
923  const struct m0_pool_spare_usage *u =
924  &p->pm_state->pst_spare_usage_array[spare_slot];
925  return u->psu_device_index != POOL_PM_SPARE_SLOT_UNUSED &&
926  u->psu_device_state != (check_state ? M0_PNDS_SNS_REPAIRING :
928 }
929 
931  uint32_t device_index,
932  uint32_t *spare_slot_out)
933 {
934  struct m0_pool_spare_usage *spare_usage_array;
935  enum m0_pool_nd_state device_state;
936  uint32_t i;
937  int rc;
938 
939  M0_PRE(pm != NULL);
940  M0_PRE(spare_slot_out != NULL);
941 
942  if (device_index >= pm->pm_state->pst_nr_devices)
943  return M0_ERR(-EINVAL);
944 
945  rc = -ENOENT;
947  device_state = pm->pm_state->pst_devices_array[device_index].pd_state;
948  if (!M0_IN(device_state, (M0_PNDS_SNS_REBALANCING)))
949  goto out;
950 
951  spare_usage_array = pm->pm_state->pst_spare_usage_array;
952  for (i = 0; i < pm->pm_state->pst_max_device_failures; i++) {
953  if (spare_usage_array[i].psu_device_index == device_index) {
954  M0_ASSERT(M0_IN(spare_usage_array[i].psu_device_state,
956  *spare_slot_out = i;
957  rc = 0;
958  break;
959  }
960  }
961 out:
963 
964  return M0_RC(rc);
965 
966 }
967 
968 M0_INTERNAL int m0_poolmach_fid_to_idx(struct m0_poolmach *pm,
969  struct m0_fid *fid, uint32_t *idx)
970 {
971  uint32_t i;
972 
973  M0_LOG(M0_DEBUG, "note:"FID_F, FID_P(fid));
974  for (i = 0; i < pm->pm_state->pst_nr_devices; ++i) {
976  *idx = pm->pm_state->pst_devices_array[i].pd_index;
977  break;
978  }
979  }
980  return i == pm->pm_state->pst_nr_devices ? -ENOENT : 0;
981 }
982 
983 static void poolmach_event_queue_drop(struct m0_poolmach *pm,
984  struct m0_poolmach_event *ev)
985 {
986  struct m0_tl *head = &pm->pm_state->pst_event_queue;
987  struct m0_poolmach_event *e;
988  struct poolmach_equeue_link *scan;
989 
990  m0_tl_for (poolmach_equeue, head, scan) {
991  e = &scan->pel_event;
992  if (e->pe_index == ev->pe_index) {
993  poolmach_equeue_tlink_del_fini(scan);
994  m0_free(scan);
995  }
996  } m0_tl_endfor;
997 }
998 
999 M0_INTERNAL void m0_poolmach_failvec_apply(struct m0_poolmach *pm,
1000  const struct m0_ha_nvec *nvec)
1001 {
1002  struct m0_poolmach_event pme;
1003  struct m0_poolmach_state *state;
1004  struct m0_pooldev *pd;
1005  struct m0_pool *pool;
1006  uint32_t i;
1007  uint32_t pd_idx = 0;
1008  int rc;
1009 
1011 
1012  pm->pm_state->pst_su_initialised = true;
1013  state = pm->pm_state;
1014  pool = pm->pm_pver->pv_pool;
1015  for (i = 0; i < nvec->nv_nr; ++i) {
1016  rc = m0_poolmach_fid_to_idx(pm, &nvec->nv_note[i].no_id,
1017  &pd_idx);
1018  if (rc == -ENOENT)
1019  continue;
1020  M0_ASSERT(rc == 0);
1021  pme.pe_type = M0_POOL_DEVICE;
1022  pme.pe_index = pd_idx;
1024  if (!M0_IN(pme.pe_state, (M0_PNDS_FAILED, M0_PNDS_OFFLINE))) {
1025  /* Update the spare-usage-array. */
1027  spare_usage_arr_update(pm, &pme);
1028  pd = &state->pst_devices_array[pme.pe_index];
1029  if (!pool_failed_devs_tlink_is_in(pd) &&
1030  !disk_is_in(&pool->po_failed_devices, pd)) {
1031  M0_CNT_INC(state->pst_nr_failures);
1032  pool_failed_devs_tlist_add_tail(
1033  &pool->po_failed_devices, pd);
1034  }
1036  }
1037  rc = m0_poolmach_state_transit(pm, &pme);
1038  /*
1039  * Drop the stale (already) event for this disk
1040  * from the queue of deferred events. This will allow
1041  * to avoid some bogus transitions (and errors in syslog),
1042  * like from REPAIRED to ONLINE on startup.
1043  */
1044  poolmach_event_queue_drop(pm, &pme);
1045  /*
1046  * Failvec is applied only once, during initialisation.
1047  * This operation should succeed.
1048  */
1049  M0_ASSERT(rc == 0);
1050  }
1052 }
1053 
1054 static int poolmach_spare_inherit(struct m0_poolmach *pm, struct m0_pool *pool)
1055 {
1056  struct m0_poolmach_event pme;
1057  struct m0_poolmach_state *state;
1058  struct m0_pooldev *pd;
1059  uint32_t i;
1060  uint32_t pd_idx = 0;
1061  int rc = 0;
1062 
1063  M0_ENTRY();
1064 
1066 
1067  pm->pm_state->pst_su_initialised = true;
1068  state = pm->pm_state;
1069 
1070  if (pool_failed_devs_tlist_is_empty(&pool->po_failed_devices)) {
1072  M0_LEAVE("no failed devices");
1073  return M0_RC(0);
1074  }
1075  M0_LOG(M0_DEBUG, "length :%d", (int) pool_failed_devs_tlist_length(
1076  &pool->po_failed_devices));
1077  m0_tl_for (pool_failed_devs, &pool->po_failed_devices, pd) {
1078  for (i = 0; i < pm->pm_state->pst_nr_devices; ++i) {
1080  &pd->pd_id)) {
1081  pd_idx = state->pst_devices_array[i].pd_index;
1082  M0_LOG(M0_DEBUG, "failed device fid index:%d"
1083  FID_F, pd_idx,
1084  FID_P(&pm->pm_pver->pv_id));
1085  break;
1086  }
1087  }
1088  if (i == pm->pm_state->pst_nr_devices) {
1089  M0_LOG(M0_DEBUG, "Failed device "FID_F" is not part of"
1090  " pool version "FID_F, FID_P(&pd->pd_id),
1091  FID_P(&pm->pm_pver->pv_id));
1092  continue;
1093  }
1094  pme.pe_type = M0_POOL_DEVICE;
1095  pme.pe_index = pd_idx;
1096  pme.pe_state = pd->pd_state;
1097  /*
1098  * Update the spare-usage-array in case the device state
1099  * is other than failed.
1100  */
1101  if (pme.pe_state != M0_PNDS_FAILED) {
1103  spare_usage_arr_update(pm, &pme);
1104  M0_CNT_INC(state->pst_nr_failures);
1106  }
1107  rc = m0_poolmach_state_transit(pm, &pme);
1108  if (rc != 0)
1109  break;
1110  } m0_tl_endfor;
1112 
1113  return M0_RC(rc);
1114 }
1115 
1116 M0_INTERNAL int m0_poolmach_spare_build(struct m0_poolmach *mach,
1117  struct m0_pool *pool,
1118  enum m0_conf_pver_kind kind)
1119 {
1120  int rc = 0;
1121  struct m0_mutex chan_lock;
1122  struct m0_chan chan;
1123  struct m0_clink clink;
1124 
1125  if (kind == M0_CONF_PVER_ACTUAL) {
1131  if (rc != 0)
1132  goto end;
1133  /* Waiting to receive a failure vector from HA. */
1134  m0_chan_wait(&clink);
1135  } else if (kind == M0_CONF_PVER_VIRTUAL) {
1137  return M0_RC(rc);
1138  }
1139 end:
1140  if (kind == M0_CONF_PVER_ACTUAL) {
1142  m0_clink_fini(&clink);
1145  }
1146  return M0_RC(rc);
1147 }
1148 
1149 
1150 M0_INTERNAL void m0_poolmach_event_queue_apply(struct m0_poolmach *pm)
1151 {
1152  struct m0_tl *head = &pm->pm_state->pst_event_queue;
1153  struct m0_poolmach_event *event;
1154  struct poolmach_equeue_link *scan;
1155 
1157 
1158  m0_tl_for (poolmach_equeue, head, scan) {
1159  event = &scan->pel_event;
1160  m0_poolmach_state_transit(pm, event);
1161  poolmach_equeue_tlink_del_fini(scan);
1162  m0_free(scan);
1163  } m0_tl_endfor;
1164 }
1165 
1166 static int lno = 0;
1167 
1168 /* Change this value to make it more verbose, e.g. to M0_ERROR */
1169 #define POOL_TRACE_LEVEL M0_DEBUG
1170 
1171 M0_INTERNAL void m0_poolmach_event_dump(const struct m0_poolmach_event *e)
1172 {
1173  M0_LOG(POOL_TRACE_LEVEL, "%4d:pe_type=%6s pe_index=%x, pe_state=%10d",
1174  lno,
1175  e->pe_type == M0_POOL_DEVICE ? "device":"node",
1176  e->pe_index, e->pe_state);
1177  lno++;
1178 }
1179 
1180 M0_INTERNAL void m0_poolmach_event_list_dump(struct m0_poolmach *pm)
1181 {
1182  M0_LOG(POOL_TRACE_LEVEL, ">>>>>");
1186  M0_LOG(POOL_TRACE_LEVEL, "=====");
1187 }
1188 
1190 {
1191  struct m0_tl *head = &pm->pm_state->pst_events_list;
1192  struct m0_poolmach_event_link *scan;
1193  struct m0_poolmach_event *e;
1194  struct m0_tl *qhead = &pm->pm_state->pst_event_queue;
1195  struct poolmach_equeue_link *qscan;
1196  uint32_t i;
1197 
1198  m0_tl_for (poolmach_events, head, scan) {
1199  e = &scan->pel_event;
1200  i = e->pe_index;
1201  if (e->pe_type == M0_POOL_DEVICE &&
1202  !M0_IN(pm->pm_state->pst_devices_array[i].pd_state,
1204  M0_LOG(M0_INFO, "device[%d] "FID_F" state=%d", i,
1206  e->pe_state);
1207  } m0_tl_endfor;
1208 
1209  m0_tl_for (poolmach_equeue, qhead, qscan) {
1210  e = &qscan->pel_event;
1211  i = e->pe_index;
1212  if (e->pe_type == M0_POOL_DEVICE &&
1213  !M0_IN(pm->pm_state->pst_devices_array[i].pd_state,
1215  M0_LOG(M0_INFO, "device[%d] "FID_F" state=%d", i,
1217  e->pe_state);
1218  } m0_tl_endfor;
1219 }
1220 
1221 M0_INTERNAL void m0_poolmach_device_state_dump(struct m0_poolmach *pm)
1222 {
1223  int i;
1224  M0_LOG(POOL_TRACE_LEVEL, ">>>>>");
1225  for (i = 0; i < pm->pm_state->pst_nr_devices; i++) {
1226  M0_LOG(POOL_TRACE_LEVEL, "%04d:device[%d] "FID_F" state=%d",
1228  pm->pm_state->pst_devices_array[i].pd_state);
1229  lno++;
1230  }
1231  M0_LOG(POOL_TRACE_LEVEL, "=====");
1232 }
1233 
1234 M0_INTERNAL uint64_t m0_poolmach_nr_dev_failures(struct m0_poolmach *pm)
1235 {
1236  struct m0_pool_spare_usage *spare_array;
1237 
1238  spare_array = pm->pm_state->pst_spare_usage_array;
1239  return m0_count(i, pm->pm_state->pst_nr_spares,
1240  spare_array[i].psu_device_index !=
1242 }
1243 
1244 M0_INTERNAL void m0_poolmach_gob2cob(struct m0_poolmach *pm,
1245  const struct m0_fid *gfid,
1246  uint32_t idx,
1247  struct m0_fid *cob_fid)
1248 {
1249  struct m0_poolmach_state *pms;
1250 
1251  M0_PRE(pm != NULL);
1252 
1253  pms = pm->pm_state;
1255  pms->pst_devices_array[idx].pd_sdev_idx);
1256 
1257  M0_LOG(M0_DEBUG, "gob fid "FID_F" @%d = cob fid "FID_F, FID_P(gfid),
1258  idx, FID_P(cob_fid));
1259 }
1260 
1261 #undef POOL_TRACE_LEVEL
1262 #undef M0_TRACE_SUBSYSTEM
1263 
1265 /*
1266  * Local variables:
1267  * c-indentation-style: "K&R"
1268  * c-basic-offset: 8
1269  * tab-width: 8
1270  * fill-column: 80
1271  * scroll-step: 1
1272  * End:
1273  */
1274 
const struct m0_conf_obj_type * m0_conf_obj_type(const struct m0_conf_obj *obj)
Definition: obj.c:363
struct m0_fid co_id
Definition: obj.h:208
struct m0_poolmach_state * pm_state
Definition: pool_machine.h:169
Definition: beck.c:235
static struct m0_addb2_philter p
Definition: consumer.c:40
M0_INTERNAL void m0_chan_wait(struct m0_clink *link)
Definition: chan.c:336
static struct m0_clink * ready_clink(struct m0_poolmach_state *state)
Definition: pool_machine.c:78
#define POOL_TRACE_LEVEL
#define M0_PRE(cond)
#define M0_ALLOC_ARR(arr, nr)
Definition: memory.h:84
const struct m0_conf_obj_type M0_CONF_OBJV_TYPE
Definition: objv.c:151
static int poolmach_equeue_add(struct m0_poolmach *pm, const struct m0_poolmach_event *event)
Definition: pool_machine.c:479
struct m0_tl pst_event_queue
Definition: pool_machine.h:151
M0_INTERNAL bool m0_poolmach_sns_repair_spare_contains_data(struct m0_poolmach *p, uint32_t spare_slot, bool check_state)
Definition: pool_machine.c:919
struct m0_format_header pn_header
Definition: pool.h:391
M0_INTERNAL int m0_poolmach_state_transit(struct m0_poolmach *pm, const struct m0_poolmach_event *event)
Definition: pool_machine.c:554
M0_INTERNAL void m0_format_header_pack(struct m0_format_header *dest, const struct m0_format_tag *src)
Definition: format.c:40
#define NULL
Definition: misc.h:38
uint32_t pst_nr_devices
Definition: pool_machine.h:108
M0_INTERNAL void m0_clink_init(struct m0_clink *link, m0_chan_cb_t cb)
Definition: chan.c:201
M0_INTERNAL void m0_clink_del_lock(struct m0_clink *link)
Definition: chan.c:293
static struct m0_mutex chan_lock
Definition: note.c:96
struct m0_be_clink pst_conf_ready
Definition: pool_machine.h:154
static bool disk_is_in(struct m0_tl *head, struct m0_pooldev *pd)
Definition: pool_machine.c:473
struct m0_pool_version * pm_pver
Definition: pool_machine.h:172
M0_INTERNAL void m0_poolmach_event_dump(const struct m0_poolmach_event *e)
M0_TL_DESCR_DEFINE(poolmach_events, "pool machine events list", M0_INTERNAL, struct m0_poolmach_event_link, pel_linkage, pel_magic, M0_POOL_EVENTS_LIST_MAGIC, M0_POOL_EVENTS_HEAD_MAGIC)
#define m0_count(var, nr,...)
Definition: misc.h:79
M0_INTERNAL struct m0_conf_obj * m0_conf_cache_lookup(const struct m0_conf_cache *cache, const struct m0_fid *id)
Definition: cache.c:106
union @126 u
#define M0_LOG(level,...)
Definition: trace.h:167
M0_LEAVE()
M0_INTERNAL int m0_poolmach_init_by_conf(struct m0_poolmach *pm, struct m0_conf_pver *pver)
Definition: pool_machine.c:236
uint32_t pn_index
Definition: pool.h:399
static struct net_test_cmd_node * node
Definition: commands.c:72
M0_INTERNAL void m0_poolmach_device_state_dump(struct m0_poolmach *pm)
M0_INTERNAL void m0_poolnode_clink_del(struct m0_clink *cl)
Definition: pool.c:1962
M0_INTERNAL int m0_poolmach_fid_to_idx(struct m0_poolmach *pm, struct m0_fid *fid, uint32_t *idx)
Definition: pool_machine.c:968
M0_INTERNAL void m0_rwlock_write_lock(struct m0_rwlock *lock)
Definition: rwlock.c:42
M0_INTERNAL int m0_conf_diter_next_sync(struct m0_conf_diter *it, bool(*filter)(const struct m0_conf_obj *obj))
Definition: diter.c:555
static void poolmach_event_queue_drop(struct m0_poolmach *pm, struct m0_poolmach_event *ev)
Definition: pool_machine.c:983
struct m0_chan rh_conf_cache_ready
Definition: reqh.h:205
static struct m0_be_emap_cursor it
Definition: extmap.c:46
struct m0_poolnode * pst_nodes_array
Definition: pool_machine.h:105
static struct m0_addb2_mach * mach
Definition: storage.c:42
#define m0_exists(var, nr,...)
Definition: misc.h:134
M0_INTERNAL int m0_poolmach_device_state(struct m0_poolmach *pm, uint32_t device_index, enum m0_pool_nd_state *state_out)
Definition: pool_machine.c:816
#define container_of(ptr, type, member)
Definition: misc.h:33
#define M0_SET0(obj)
Definition: misc.h:64
struct m0_pool * pv_pool
Definition: pool.h:128
bool pv_is_dirty
Definition: pool.h:116
m0_conf_pver_kind
Definition: obj.h:516
static struct foo * obj
Definition: tlist.c:302
struct m0_poolmach * pn_pm
Definition: pool.h:397
static int poolmach_spare_inherit(struct m0_poolmach *pm, struct m0_pool *pool)
M0_INTERNAL int m0_ha_failvec_fetch(const struct m0_fid *pool_fid, struct m0_poolmach *pmach, struct m0_chan *chan)
Definition: failvec.c:208
M0_INTERNAL void m0_rwlock_init(struct m0_rwlock *lock)
Definition: rwlock.c:32
#define m0_tl_endfor
Definition: tlist.h:700
struct m0_pooldev * pst_devices_array
Definition: pool_machine.h:111
struct m0_fid fid
Definition: di.c:46
struct m0_be_clink pd_clink
Definition: pool.h:447
return M0_RC(rc)
static int head(struct m0_sm *mach)
Definition: sm.c:468
M0_INTERNAL int m0_poolmach_spare_build(struct m0_poolmach *mach, struct m0_pool *pool, enum m0_conf_pver_kind kind)
#define M0_ENTRY(...)
Definition: trace.h:170
uint32_t sd_dev_idx
Definition: obj.h:635
int i
Definition: dir.c:1033
struct m0_poolnode * pd_node
Definition: pool.h:440
M0_INTERNAL void m0_poolmach_event_list_dump_locked(struct m0_poolmach *pm)
uint32_t pst_nr_spares
Definition: pool_machine.h:120
#define M0_ERR_INFO(rc, fmt,...)
Definition: trace.h:215
int32_t nv_nr
Definition: note.h:196
return M0_ERR(-EOPNOTSUPP)
static void state_init(struct m0_poolmach_state *state, struct m0_poolnode *nodes_array, uint32_t nr_nodes, struct m0_pooldev *devices_array, uint32_t nr_devices, struct m0_pool_spare_usage *spare_usage_array, uint32_t nr_spare, uint32_t max_node_failures, uint32_t max_device_failures, struct m0_poolmach *pm)
Definition: pool_machine.c:283
Definition: trace.h:482
const struct m0_conf_obj_type M0_CONF_ENCLOSURE_TYPE
Definition: enclosure.c:140
struct m0_fid pv_id
Definition: pool.h:113
struct m0_be_clink pst_conf_exp
Definition: pool_machine.h:153
struct m0_format_header psu_header
Definition: pool.h:475
M0_INTERNAL void m0_poolmach_event_list_dump(struct m0_poolmach *pm)
#define m0_free0(pptr)
Definition: memory.h:77
M0_INTERNAL void m0_chan_init(struct m0_chan *chan, struct m0_mutex *ch_guard)
Definition: chan.c:96
struct m0_poolmach * pd_pm
Definition: pool.h:442
#define M0_ASSERT(cond)
static struct m0_confc * confc
Definition: file.c:94
struct m0_format_header pd_header
Definition: pool.h:422
struct m0_fid pver
Definition: idx_dix.c:74
m0_pool_nd_state
Definition: pool_machine.h:57
enum m0_ha_obj_state co_ha_state
Definition: obj.h:241
Definition: tlist.h:251
M0_INTERNAL void m0_poolmach_failvec_apply(struct m0_poolmach *pm, const struct m0_ha_nvec *nvec)
Definition: pool_machine.c:999
static struct m0_fid cob_fid
Definition: net.c:116
struct m0_conf_cache cc_cache
Definition: confc.h:394
static void poolmach_init(struct m0_poolmach *pm, struct m0_pool_version *pver, struct m0_poolmach_state *pm_state)
Definition: pool_machine.c:365
M0_INTERNAL void m0_mutex_init(struct m0_mutex *mutex)
Definition: mutex.c:35
M0_INTERNAL size_t m0_tlist_length(const struct m0_tl_descr *d, const struct m0_tl *list)
Definition: tlist.c:117
uint32_t pd_sdev_idx
Definition: pool.h:437
#define M0_POST(cond)
M0_INTERNAL bool m0_poolmach_device_is_in_spare_usage_array(struct m0_poolmach *pm, uint32_t device_index)
Definition: pool_machine.c:867
struct m0_chan co_ha_chan
Definition: obj.h:248
const struct m0_conf_obj_type M0_CONF_DRIVE_TYPE
Definition: drive.c:108
static bool is_enclosurev_or_diskv(const struct m0_conf_obj *obj)
Definition: pool_machine.c:83
Definition: reqh.h:94
static bool poolmach_conf_expired_cb(struct m0_clink *clink)
Definition: pool_machine.c:160
M0_INTERNAL void m0_clink_cleanup(struct m0_clink *link)
Definition: chan.c:310
struct m0_fid pd_id
Definition: pool.h:428
Definition: chan.h:229
M0_INTERNAL void m0_rwlock_write_unlock(struct m0_rwlock *lock)
Definition: rwlock.c:47
#define M0_CONF_CAST(ptr, type)
Definition: obj.h:780
struct m0_conf_sdev * ck_sdev
Definition: obj.h:710
M0_INTERNAL void m0_pooldev_clink_add(struct m0_clink *link, struct m0_chan *chan)
Definition: pool.c:1999
static struct m0_clink clink[RDWR_REQUEST_MAX]
M0_INTERNAL void m0_poolmach_event_queue_apply(struct m0_poolmach *pm)
M0_INTERNAL int m0_poolmach_device_node_return(struct m0_poolmach *pm, uint32_t device_index, struct m0_poolnode **node_out)
Definition: pool_machine.c:850
M0_INTERNAL void m0_poolnode_clink_add(struct m0_clink *link, struct m0_chan *chan)
Definition: pool.c:1977
#define FID_P(f)
Definition: fid.h:77
struct m0_fid no_id
Definition: note.h:180
M0_INTERNAL void m0_conf_obj_get_lock(struct m0_conf_obj *obj)
Definition: obj_ops.c:198
M0_INTERNAL int m0_poolmach_init(struct m0_poolmach *pm, struct m0_pool_version *pver, uint32_t nr_nodes, uint32_t nr_devices, uint32_t nr_spare, uint32_t max_node_failures, uint32_t max_device_failures)
Definition: pool_machine.c:380
static int lno
M0_INTERNAL struct m0_reqh * m0_confc2reqh(const struct m0_confc *confc)
Definition: helpers.c:342
static struct m0_pool pool
Definition: iter_ut.c:58
static struct m0_clink * poolnode_clink(struct m0_poolnode *pnode)
Definition: pool_machine.c:63
struct m0_conf_node * ce_node
Definition: obj.h:682
M0_INTERNAL bool m0_fid_eq(const struct m0_fid *fid0, const struct m0_fid *fid1)
Definition: fid.c:164
void m0_clink_add_lock(struct m0_chan *chan, struct m0_clink *link)
Definition: chan.c:255
M0_INTERNAL uint64_t m0_poolmach_nr_dev_failures(struct m0_poolmach *pm)
M0_INTERNAL struct m0_conf_obj * m0_conf_diter_result(const struct m0_conf_diter *it)
Definition: diter.c:576
#define m0_conf_diter_init(iter, confc, origin,...)
Definition: diter.h:235
M0_INTERNAL uint32_t m0_ha2pm_state_map(enum m0_ha_obj_state hastate)
Definition: pool.c:1905
uint64_t n
Definition: fops.h:107
uint32_t pst_max_node_failures
Definition: pool_machine.h:117
struct m0_reqh reqh
Definition: rm_foms.c:48
#define M0_CNT_INC(cnt)
Definition: arith.h:226
uint32_t pv_sns_flags
Definition: pool.h:143
static struct m0_chan chan[RDWR_REQUEST_MAX]
#define M0_FI_ENABLED(tag)
Definition: finject.h:231
Definition: fid.h:38
struct m0_confc rc_confc
Definition: rconfc.h:235
M0_INTERNAL void m0_format_footer_update(const void *buffer)
Definition: format.c:95
uint32_t psu_device_index
Definition: pool.h:413
struct m0_chan rh_conf_cache_exp
Definition: reqh.h:194
#define M0_ALLOC_PTR(ptr)
Definition: memory.h:86
uint32_t pst_nr_failures
Definition: pool_machine.h:131
M0_INTERNAL void m0_conf_diter_fini(struct m0_conf_diter *it)
Definition: diter.c:313
struct m0_be_clink pn_clink
Definition: pool.h:404
M0_INTERNAL int m0_poolmach_sns_repair_spare_query(struct m0_poolmach *pm, uint32_t device_index, uint32_t *spare_slot_out)
Definition: pool_machine.c:875
static void pool_obj_clink_fini(struct m0_clink *cl)
Definition: pool_machine.c:412
struct m0_fid pn_id
Definition: pool.h:395
#define _0C(exp)
Definition: assert.h:311
M0_INTERNAL void m0_mutex_fini(struct m0_mutex *mutex)
Definition: mutex.c:42
M0_INTERNAL void m0_clink_fini(struct m0_clink *link)
Definition: chan.c:208
uint32_t psu_device_index
Definition: pool.h:480
uint32_t pst_max_device_failures
Definition: pool_machine.h:126
M0_INTERNAL void m0_rwlock_read_lock(struct m0_rwlock *lock)
Definition: rwlock.c:52
M0_INTERNAL void m0_poolmach_fini(struct m0_poolmach *pm)
Definition: pool_machine.c:426
bool pm_is_initialised
Definition: pool_machine.h:175
M0_INTERNAL void m0_rwlock_fini(struct m0_rwlock *lock)
Definition: rwlock.c:37
M0_INTERNAL struct m0_confc * m0_confc_from_obj(const struct m0_conf_obj *obj)
Definition: confc.c:592
struct m0_fid po_id
Definition: pool.h:81
M0_INTERNAL void m0_confc_close(struct m0_conf_obj *obj)
Definition: confc.c:921
#define M0_CNT_DEC(cnt)
Definition: arith.h:219
struct m0_rconfc rh_rconfc
Definition: reqh.h:166
#define M0_ASSERT_INFO(cond, fmt,...)
Definition: pool.h:80
M0_INTERNAL uint32_t m0_poolmach_equeue_length(struct m0_poolmach *pm)
Definition: pool_machine.c:501
struct m0_rwlock pm_lock
Definition: pool_machine.h:178
M0_INTERNAL void m0_rwlock_read_unlock(struct m0_rwlock *lock)
Definition: rwlock.c:57
static void spare_usage_arr_update(struct m0_poolmach *pm, const struct m0_poolmach_event *event)
Definition: pool_machine.c:507
uint32_t pst_nr_nodes
Definition: pool_machine.h:102
M0_INTERNAL void m0_fid_convert_gob2cob(const struct m0_fid *gob_fid, struct m0_fid *cob_fid, uint32_t device_id)
Definition: fid_convert.c:55
#define out(...)
Definition: gen.c:41
M0_INTERNAL void m0_pooldev_clink_del(struct m0_clink *cl)
Definition: pool.c:1984
struct m0_fid gfid
Definition: dir.c:626
struct m0_ha_note * nv_note
Definition: note.h:197
enum m0_pool_nd_state pe_state
Definition: pool_machine.h:199
static bool poolmach_conf_ready_cb(struct m0_clink *clink)
Definition: pool_machine.c:203
static int scan(struct scanner *s)
Definition: beck.c:963
struct m0_tl pst_events_list
Definition: pool_machine.h:145
#define m0_tl_for(name, head, obj)
Definition: tlist.h:695
M0_INTERNAL void m0_chan_fini_lock(struct m0_chan *chan)
Definition: chan.c:112
void m0_free(void *data)
Definition: memory.c:146
Definition: mutex.h:47
static struct m0_clink * pooldev_clink(struct m0_pooldev *pdev)
Definition: pool_machine.c:68
static int poolmach_state_update(struct m0_poolmach_state *st, const struct m0_conf_obj *objv_real, uint32_t *idx_nodes, uint32_t *idx_devices)
Definition: pool_machine.c:91
int32_t rc
Definition: trigger_fop.h:47
struct m0_pool_spare_usage * pst_spare_usage_array
Definition: pool_machine.h:137
M0_TL_DEFINE(poolmach_events, M0_INTERNAL, struct m0_poolmach_event_link)
M0_INTERNAL int m0_poolmach_node_state(struct m0_poolmach *pm, uint32_t node_index, enum m0_pool_nd_state *state_out)
Definition: pool_machine.c:833
M0_INTERNAL int m0_poolmach_sns_rebalance_spare_query(struct m0_poolmach *pm, uint32_t device_index, uint32_t *spare_slot_out)
Definition: pool_machine.c:930
uint32_t pd_index
Definition: pool.h:432
M0_INTERNAL void m0_poolmach_gob2cob(struct m0_poolmach *pm, const struct m0_fid *gfid, uint32_t idx, struct m0_fid *cob_fid)
#define m0_tl_exists(name, var, head,...)
Definition: tlist.h:774
M0_INTERNAL bool m0_conf_obj_invariant(const struct m0_conf_obj *obj)
Definition: obj_ops.c:52
struct m0_conf_obj ck_obj
Definition: obj.h:707
#define FID_F
Definition: fid.h:75
static struct m0_clink * exp_clink(struct m0_poolmach_state *state)
Definition: pool_machine.c:73
#define M0_IMPOSSIBLE(fmt,...)
uint32_t no_state
Definition: note.h:182
struct m0_tl po_failed_devices
Definition: pool.h:93
M0_INTERNAL void m0_poolmach_state_last_cancel(struct m0_poolmach *pm)
Definition: pool_machine.c:796