Motr  M0
crate_io.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
83 #include <stdlib.h>
84 #include <stdio.h>
85 #include <fcntl.h>
86 #include <sys/types.h>
87 #include <sys/time.h>
88 #include <assert.h>
89 #include <stdarg.h>
90 #include <unistd.h>
91 
92 #include "lib/finject.h"
93 #include "lib/trace.h"
94 #include "motr/client.h"
95 #include "motr/client_internal.h"
96 #include "motr/idx.h"
97 
98 #include "motr/m0crate/logger.h"
99 #include "motr/m0crate/workload.h"
102 
103 
104 void integrity(struct m0_uint128 object_id, unsigned char **md5toverify,
105  int block_count, int idx_op);
106 void list_index_return(struct workload *w);
107 
119 };
120 
121 typedef int (*cr_operation_t)(struct m0_workload_io *cwi,
122  struct m0_task_io *cti,
123  struct m0_op_context *op_ctx,
124  struct m0_obj *obj,
125  int free_slot,
126  int obj_idx,
127  int op_index);
128 
134 static size_t cr_rand___range_l(size_t end)
135 {
136  size_t val_h;
137  size_t val_l;
138  size_t res;
139 
140  val_l = rand();
141  val_h = rand();
142  res = (val_h << 32) | val_l;
143  return res % end;
144 }
145 
147 {
148  *t1 = m0_time_add(*t1, t2);
149 }
150 
151 void cr_op_stable(struct m0_op *op)
152 {
153  struct m0_op_context *op_context;
154  struct m0_task_io *cti;
155  m0_time_t op_time;
156 
157  M0_PRE(op != NULL);
158  op_context = op->op_datum;
159  if (op_context != NULL) {
160  cti = op_context->coc_task;
161 
162  op_context->coc_op_finish = m0_time_now();
163  cti->cti_op_status[op_context->coc_index] = CR_OP_COMPLETE;
164  cti->cti_nr_ops_done++;
165  op_time = m0_time_sub(op_context->coc_op_finish,
166  op_context->coc_op_launch);
167  cr_time_acc(&cti->cti_op_acc_time, op_time);
169  op_context->coc_buf_vec = NULL;
170  }
171 }
172 
173 void cr_op_failed(struct m0_op *op)
174 {
175  int op_idx;
176  struct m0_op_context *op_context;
177 
178  M0_PRE(op != NULL);
179  op_context = op->op_datum;
180 
181  cr_log(CLL_DEBUG, "Operation is failed:%d", op->op_sm.sm_rc);
182  if (op_context != NULL) {
183  op_idx = op_context->coc_index;
184  op_context->coc_op_finish = m0_time_now();
185  op_context->coc_task->cti_op_status[op_idx] = CR_OP_COMPLETE;
187  }
188 }
189 
190 static void cti_cleanup_op(struct m0_task_io *cti, int i)
191 {
192  struct m0_op *op = cti->cti_ops[i];
193  struct m0_op_context *op_ctx = op->op_datum;
194 
195  /*
196  * Only capture the op_rc when it is non-zero, otherwise a 0
197  * op_rc could overwrite an error captured into cti->cti_op_rc[i]
198  * if the operation was re-used which previously had failed */
199  if (op->op_rc != 0)
200  cti->cti_op_rcs[i] = op->op_rc;
201  m0_op_fini(op);
202  m0_op_free(op);
203  cti->cti_ops[i] = NULL;
204  if (op_ctx->coc_op_code == CR_WRITE ||
205  op_ctx->coc_op_code == CR_READ) {
206  m0_bufvec_free(op_ctx->coc_buf_vec);
207  m0_bufvec_free(op_ctx->coc_attr);
209  m0_free(op_ctx->coc_buf_vec);
210  m0_free(op_ctx->coc_attr);
211  m0_free(op_ctx->coc_index_vec);
212  }
213  m0_free(op_ctx);
214  cti->cti_op_status[i] = CR_OP_NEW;
215 }
216 
217 int cr_free_op_idx(struct m0_task_io *cti, uint32_t nr_ops)
218 {
219  int i;
220 
221  for (i = 0; i < nr_ops; i++) {
222  if (cti->cti_op_status[i] == CR_OP_NEW ||
223  cti->cti_op_status[i] == CR_OP_COMPLETE)
224  break;
225  }
226 
227  M0_ASSERT(i < nr_ops);
228 
229  if (cti->cti_op_status[i] == CR_OP_COMPLETE)
230  cti_cleanup_op(cti, i);
231 
232  return i;
233 }
234 
235 static void cr_cti_cleanup(struct m0_task_io *cti, int nr_ops)
236 {
237  int i;
238 
239  for (i = 0; i < nr_ops; i++)
240  if (cti->cti_op_status[i] == CR_OP_COMPLETE)
241  cti_cleanup_op(cti, i);
242 }
243 
245  struct m0_task_io *cti,
246  struct m0_op_context *op_ctx,
247  int obj_idx,
248  int op_index)
249 {
250  int i;
251  int rc;
252  uint64_t bitmap_index;
253  uint64_t nr_segments;
254  uint64_t start_offset;
255  uint64_t op_start_offset = 0;
256  size_t rand_offset;
257  uint64_t io_size = cwi->cwi_io_size;
258  uint64_t offset;
259  struct m0_bufvec *buf_vec = NULL;
260  struct m0_bufvec *attr = NULL;
261  struct m0_indexvec *index_vec = NULL;
262  struct m0_bitmap segment_indices;
263 
264  index_vec = m0_alloc(sizeof *index_vec);
265  attr = m0_alloc(sizeof *attr);
266  if (index_vec == NULL || attr == NULL)
267  goto enomem;
268 
269  rc = m0_indexvec_alloc(index_vec, cwi->cwi_bcount_per_op) ?:
271  if (rc != 0)
272  goto enomem;
273  /*
274  * When io_size is not a multiple of cwi_bs then bitmap_index can
275  * become equal to io_size/cwi->cwi_bs, hence '+1' to the size
276  * of bitmap.
277  */
278  nr_segments = io_size/cwi->cwi_bs + 1;
279  rc = m0_bitmap_init(&segment_indices, nr_segments);
280  if (rc != 0)
281  goto enomem;
282 
283  if (!cwi->cwi_random_io)
284  op_start_offset = op_index * cwi->cwi_bs *
285  cwi->cwi_bcount_per_op;
286 
287  if (op_ctx->coc_op_code == CR_READ)
288  buf_vec = &cti->cti_rd_bufvec[obj_idx];
289  else
290  buf_vec = &cti->cti_bufvec[obj_idx];
291 
292  M0_ASSERT(buf_vec != NULL);
293  M0_ASSERT(nr_segments > cwi->cwi_bcount_per_op);
294  for (i = 0; i < cwi->cwi_bcount_per_op; i ++) {
295  if (cwi->cwi_random_io) {
296  do {
297  /* Generate the random offset. */
298  rand_offset = cr_rand___range_l(io_size);
299  /*
300  * m0_round_down() would prevent partially
301  * overlapping indexvec segments.
302  */
303  offset = m0_round_down(rand_offset,
304  cwi->cwi_bs);
305  bitmap_index = offset / cwi->cwi_bs;
306  } while (m0_bitmap_get(&segment_indices, bitmap_index));
307 
308  m0_bitmap_set(&segment_indices, bitmap_index, true);
309  } else
310  offset = op_start_offset + cwi->cwi_bs * i;
311 
312  /* If writing on shared object, start from the alloted range. */
313  if (cwi->cwi_share_object) {
314  start_offset = cti->cti_task_idx * io_size;
315  index_vec->iv_index[i] = start_offset + offset;
316  } else
317  index_vec->iv_index[i] = offset;
318 
319  index_vec->iv_vec.v_count[i] = cwi->cwi_bs;
320  }
321 
322  m0_bitmap_fini(&segment_indices);
323  op_ctx->coc_buf_vec = buf_vec;
324  op_ctx->coc_index_vec = index_vec;
326  op_ctx->coc_attr = NULL;
327 
328  return 0;
329 enomem:
330  if (index_vec != NULL)
331  m0_indexvec_free(index_vec);
332  m0_bufvec_free(buf_vec);
333  m0_free(attr);
334  m0_free(index_vec);
335  m0_free(buf_vec);
336  return -ENOMEM;
337 }
338 
339 static struct m0_fid * check_fid(struct m0_fid *id)
340 {
341  if (m0_fid_is_set(id) && m0_fid_is_valid(id))
342  return id;
343  else
344  return NULL;
345 }
346 
348  struct m0_task_io *cti,
349  struct m0_op_context *op_ctx,
350  struct m0_obj *obj,
351  int free_slot,
352  int obj_idx,
353  int op_index)
354 {
355  return m0_entity_create(check_fid(&cwi->cwi_pool_id),
356  &obj->ob_entity, &cti->cti_ops[free_slot]);
357 }
358 
364  struct m0_task_io *cti,
365  struct m0_op_context *op_ctx,
366  struct m0_obj *obj,
367  int free_slot,
368  int obj_idx,
369  int op_index)
370 {
371  const struct m0_uint128 *id = &cti->cti_ids[op_index];
372  M0_PRE(obj != NULL);
373  M0_SET0(obj);
375  return m0_entity_open(&obj->ob_entity, &cti->cti_ops[free_slot]);
376 }
377 
378 
380  struct m0_task_io *cti,
381  struct m0_op_context *op_ctx,
382  struct m0_obj *obj,
383  int free_slot,
384  int obj_idx,
385  int op_index)
386 {
387  return m0_entity_delete(&obj->ob_entity,
388  &cti->cti_ops[free_slot]);
389 }
390 
391 int cr_io_write(struct m0_workload_io *cwi,
392  struct m0_task_io *cti,
393  struct m0_op_context *op_ctx,
394  struct m0_obj *obj,
395  int free_slot,
396  int obj_idx,
397  int op_index)
398 {
399  int rc;
400 
401  op_ctx->coc_op_code = CR_WRITE;
403  rc = cr_io_vector_prep(cwi, cti, op_ctx, obj_idx, op_index);
404  if (rc != 0)
405  return rc;
407  op_ctx->coc_index_vec, op_ctx->coc_buf_vec,
408  op_ctx->coc_attr, 0, 0, &cti->cti_ops[free_slot]);
409  if (rc != 0)
410  M0_ERR(rc);
411  return rc;
412 }
413 
414 int cr_io_read(struct m0_workload_io *cwi,
415  struct m0_task_io *cti,
416  struct m0_op_context *op_ctx,
417  struct m0_obj *obj,
418  int free_slot,
419  int obj_idx,
420  int op_index)
421 {
422  int rc;
423 
424  op_ctx->coc_op_code = CR_READ;
426  rc = cr_io_vector_prep(cwi, cti, op_ctx, obj_idx, op_index);
427  if (rc != 0)
428  return rc;
430  op_ctx->coc_index_vec, op_ctx->coc_buf_vec,
431  op_ctx->coc_attr, 0, 0, &cti->cti_ops[free_slot]);
432  if (rc != 0)
433  M0_ERR(rc);
434  return rc;
435 }
436 
440  [CR_WRITE] = cr_io_write,
441  [CR_READ] = cr_io_read,
443 };
444 
445 int cr_execute_ops(struct m0_workload_io *cwi, struct m0_task_io *cti,
446  struct m0_obj *obj, struct m0_op_ops *cbs,
447  enum m0_operations op_code, int obj_idx)
448 {
449  int rc = 0;
450  int i;
451  int idx;
452  struct m0_op_context *op_ctx;
453  cr_operation_t spec_op;
454 
455  for (i = 0; i < cti->cti_nr_ops; i++) {
457  /* We can launch at least one more operation. */
458  idx = cr_free_op_idx(cti, cwi->cwi_max_nr_ops);
459  op_ctx = m0_alloc(sizeof *op_ctx);
460  M0_ASSERT(op_ctx != NULL);
461 
462  op_ctx->coc_index = idx;
463  op_ctx->coc_obj_index = obj_idx;
464  op_ctx->coc_task = cti;
465  op_ctx->coc_cwi = cwi;
466  op_ctx->coc_op_code = op_code;
467 
468  spec_op = opcode_operation_map[op_code];
469  rc = spec_op(cwi, cti, op_ctx, obj, idx, obj_idx, i);
470  if (rc != 0)
471  break;
472 
473  M0_ASSERT(cti->cti_ops[idx] != NULL);
474  cti->cti_ops[idx]->op_datum = op_ctx;
475  m0_op_setup(cti->cti_ops[idx], cbs, 0);
476  cti->cti_op_status[idx] = CR_OP_EXECUTING;
477  op_ctx->coc_op_launch = m0_time_now();
478  m0_op_launch(&cti->cti_ops[idx], 1);
479  }
480  return rc;
481 }
482 
483 void cr_cti_report(struct m0_task_io *cti, enum m0_operations op_code)
484 {
485  struct m0_workload_io *cwi = cti->cti_cwi;
486 
488  cr_time_acc(&cwi->cwi_g.cg_cwi_acc_time[op_code], cti->cti_op_acc_time);
489  cwi->cwi_ops_done[op_code] += cti->cti_nr_ops_done;
491 
492  cti->cti_op_acc_time = 0;
493  cti->cti_nr_ops_done = 0;
494 }
495 
496 int cr_op_namei(struct m0_workload_io *cwi, struct m0_task_io *cti,
497  enum m0_operations op_code)
498 {
499  int i;
500  int idx;
501  m0_time_t stime;
502  m0_time_t etime;
503  struct m0_op_context *op_ctx;
504  struct m0_op_ops *cbs;
505  cr_operation_t spec_op;
506  int rc = 0;
507 
508  cbs = m0_alloc(sizeof *cbs);
509  M0_ASSERT(cbs != NULL);
510  cbs->oop_executed = NULL;
511  cbs->oop_stable = cr_op_stable;
512  cbs->oop_failed = cr_op_failed;
513 
514  cr_log(CLL_TRACE, TIME_F" t%02d: %s objects...\n",
516  op_code == CR_CREATE ? "Creating" :
517  op_code == CR_OPEN ? "Opening" : "Deleting");
519  stime = m0_time_now();
520 
521  for (i = 0; i < cwi->cwi_nr_objs; i++) {
523  /* We can launch at least one more operation. */
524  idx = cr_free_op_idx(cti, cwi->cwi_max_nr_ops);
525  op_ctx = m0_alloc(sizeof *op_ctx);
526  M0_ASSERT(op_ctx != NULL);
527 
528  op_ctx->coc_index = idx;
529  op_ctx->coc_task = cti;
530  op_ctx->coc_op_code = op_code;
531  spec_op = opcode_operation_map[op_code];
532  spec_op(cwi, cti, op_ctx, &cti->cti_objs[i], idx, 0, i);
533 
534  cti->cti_ops[idx]->op_datum = op_ctx;
535  m0_op_setup(cti->cti_ops[idx], cbs, 0);
536  cti->cti_op_status[idx] = CR_OP_EXECUTING;
537  op_ctx->coc_op_launch = m0_time_now();
538  m0_op_launch(&cti->cti_ops[idx], 1);
539  }
540  /* Task is done. Wait for all operations to complete. */
541  for (i = 0; i < cwi->cwi_max_nr_ops; i++)
543 
544  etime = m0_time_sub(m0_time_now(), stime);
546  if (etime > cwi->cwi_time[op_code])
547  cwi->cwi_time[op_code] = etime;
549  cr_cti_report(cti, op_code);
550  cr_cti_cleanup(cti, cwi->cwi_max_nr_ops);
552  m0_free(cbs);
553 
554  /*
555  * check for op_rc of all the client operations.
556  * If any of the client operation is failed, then set
557  * rc to cti->cti_op_rcs[i].
558  **/
559  for (i=0; i < cwi->cwi_max_nr_ops; i++) {
560  if (cti->cti_op_rcs[i] != 0) {
561  rc = cti->cti_op_rcs[i];
562  break;
563  }
564  }
565 
566  /*
567  * Create - creates an object and close it.
568  * Open *should* be called before any further operation.
569  * Open call would initialise client object, object would
570  * remain opened till all IO operation are complete.
571  */
572  if (op_code == CR_CREATE && cti->cti_objs != NULL)
574  for (i = 0; i < cti->cti_cwi->cwi_nr_objs; i++)
575  m0_obj_fini(&cti->cti_objs[i]);
576  cr_log(CLL_TRACE, TIME_F" t%02d: %s done.\n",
578  op_code == CR_CREATE ? "Creation" :
579  op_code == CR_OPEN ? "Open" : "Deletion");
580 
581  return rc;
582 }
583 
584 int cr_op_io(struct m0_workload_io *cwi, struct m0_task_io *cti,
585  enum m0_operations op_code)
586 {
587  int rc = 0;
588  int i;
589  m0_time_t stime;
590  m0_time_t etime;
591  struct m0_op_ops *cbs;
592 
593  M0_ALLOC_PTR(cbs);
594  if (cbs == NULL) {
595  m0_free(cbs);
596  return -ENOMEM;
597  }
598 
599  cbs->oop_executed = NULL;
600  cbs->oop_stable = cr_op_stable;
601  cbs->oop_failed = cr_op_failed;
602  cr_log(CLL_TRACE, TIME_F" t%02d: %s objects...\n",
604  op_code == CR_WRITE ? "Writing" : "Reading");
606  stime = m0_time_now();
607 
608  for (i = 0; i < cwi->cwi_nr_objs; i++) {
609  rc = cr_execute_ops(cwi, cti, &cti->cti_objs[i], cbs, op_code,
610  i);
611  if (rc != 0)
612  break;
613 
614  }
615  /* Wait for all operations to complete. */
616  for (i = 0; i < cwi->cwi_max_nr_ops; i++)
618 
619  etime = m0_time_sub(m0_time_now(), stime);
621  if (etime > cwi->cwi_time[op_code])
622  cwi->cwi_time[op_code] = etime;
624  cr_cti_report(cti, op_code);
625  cr_cti_cleanup(cti, cwi->cwi_max_nr_ops);
627  m0_free(cbs);
628  cr_log(CLL_TRACE, TIME_F" t%02d: %s done.\n",
630  op_code == CR_WRITE ? "Write" : "Read");
631 
632  return rc;
633 }
634 
640 {
641  int rc;
642  struct m0_workload_io *cwi;
643 
644  cwi = cti->cti_cwi;
645 
647  M0_ASSERT(cwi->cwi_opcode == CR_WRITE);
649  if (!cwi->cwi_g.cg_created) {
650  rc = cr_op_namei(cti->cti_cwi, cti, CR_CREATE);
651  if (rc != 0) {
653  return rc;
654  }
655  cwi->cwi_g.cg_created = true;
656  }
658 
659  cr_op_io(cti->cti_cwi, cti, cwi->cwi_opcode);
660 
662  cwi->cwi_g.cg_nr_tasks--;
663  if (cwi->cwi_g.cg_nr_tasks == 0) {
664  cr_op_namei(cti->cti_cwi, cti, CR_DELETE);
665  cwi->cwi_g.cg_created = false;
666  }
668 
669  return 0;
670 }
671 
672 int cr_task_execute(struct m0_task_io *cti)
673 {
674  struct m0_workload_io *cwi = cti->cti_cwi;
675  int rc = 0;
681  switch (cwi->cwi_opcode) {
682  case CR_CREATE:
683  cr_op_namei(cwi, cti, CR_CREATE);
684  rc = cr_op_namei(cwi, cti, CR_OPEN);
685  if (rc == 0)
686  cr_op_namei(cwi, cti, CR_DELETE);
687  break;
688  case CR_OPEN:
689  cr_op_namei(cwi, cti, CR_CREATE);
690  rc = cr_op_namei(cwi, cti, CR_OPEN);
691  if (rc == 0)
692  cr_op_namei(cwi, cti, CR_DELETE);
693  break;
694  case CR_WRITE:
695  cr_op_namei(cwi, cti, CR_CREATE);
696  rc = cr_op_namei(cwi, cti, CR_OPEN);
697  if (rc == 0) {
698  cr_op_io(cwi, cti, CR_WRITE);
699  cr_op_namei(cwi, cti, CR_DELETE);
700  }
701  break;
702  case CR_READ:
703  cr_op_namei(cwi, cti, CR_CREATE);
704  rc = cr_op_namei(cwi, cti, CR_OPEN);
705  if (rc == 0) {
706  cr_op_io(cwi, cti, CR_WRITE);
707  cr_op_io(cwi, cti, CR_READ);
708  cr_op_namei(cwi, cti, CR_DELETE);
709  }
710  break;
711  case CR_DELETE:
712  cr_op_namei(cwi, cti, CR_CREATE);
713  rc = cr_op_namei(cwi, cti, CR_OPEN);
714  if (rc == 0)
715  cr_op_namei(cwi, cti, CR_DELETE);
716  break;
717  case CR_POPULATE:
718  cr_op_namei(cwi, cti, CR_CREATE);
719  rc = cr_op_namei(cwi, cti, CR_OPEN);
720  if (rc == 0)
721  rc = cr_op_io(cwi, cti, CR_WRITE);
722  break;
723  case CR_CLEANUP:
724  rc = cr_op_namei(cwi, cti, CR_OPEN);
725  if (rc == 0)
726  cr_op_namei(cwi, cti, CR_DELETE);
727  break;
728  case CR_READ_ONLY:
729  rc = cr_op_namei(cwi, cti, CR_OPEN);
730  if (rc == 0) {
731  cr_op_io(cwi, cti, CR_READ);
732  }
733  break;
734  }
735  return rc;
736 }
737 
738 
739 static int cr_adopt_motr_thread(struct m0_task_io *cti)
740 {
741  int rc = 0;
742  struct m0_thread *mthread;
743  cti->cti_mthread = NULL;
744  if (m0_thread_tls() == NULL) {
745  mthread = m0_alloc(sizeof(struct m0_thread));
746  if (mthread == NULL)
747  return -ENOMEM;
748 
749  memset(mthread, 0, sizeof(struct m0_thread));
750  rc = m0_thread_adopt(mthread, m0_instance->m0c_motr);
751  cti->cti_mthread = mthread;
752  }
753  return rc;
754 }
755 
756 static int cr_release_motr_thread(struct m0_task_io *cti)
757 {
758  if (cti->cti_mthread) {
759  m0_thread_shun();
760  m0_free(cti->cti_mthread);
761  }
762  return 0;
763 }
764 
765 int cr_buffer_read(char *buffer, const char *filename, uint64_t size)
766 {
767  FILE *fp;
768  size_t bytes;
769 
770  fp = fopen(filename, "r");
771  if (fp == NULL) {
772  cr_log(CLL_ERROR, "Unable to open a file: %s\n", filename);
773  return -errno;
774  }
775 
776  bytes = fread(buffer, 1, size, fp);
777  if (bytes < size) {
778  fclose(fp);
779  return -EINVAL;
780  }
781  fclose(fp);
782  return 0;
783 }
784 
785 static uint64_t nz_rand(void)
786 {
787  uint64_t r;
788 
789  do {
790  r = ((uint64_t)rand() << 32) | rand();
791  } while (r == 0);
792 
793  return r;
794 }
795 
796 void cr_get_oids(struct m0_uint128 *ids, uint32_t nr_objs)
797 {
798  int i;
799  for (i = 0; i < nr_objs; i++) {
800  ids[i].u_lo = nz_rand();
801  ids[i].u_hi = nz_rand();
802  /* Highest 8 bits are left for Motr. */
803  ids[i].u_hi = ids[i].u_hi & ~(0xFFUL << 56);
804  cr_log(CLL_TRACE, "oid %016" PRIx64 ":%016" PRIx64 "\n",
805  ids[i].u_hi, ids[i].u_lo);
806  }
807 }
808 
809 void cr_task_bufs_free(struct m0_task_io *cti, int idx)
810 {
812  M0_DEFAULT_BUF_SHIFT);
814  M0_DEFAULT_BUF_SHIFT);
815 }
816 
817 void cr_task_io_cleanup(struct m0_task_io **cti_p)
818 {
819  int i;
820  struct m0_task_io *cti = *cti_p;
821  struct m0_workload_io *cwi = cti->cti_cwi;
822 
823  if (cti->cti_objs != NULL)
824  for (i = 0; i < cti->cti_cwi->cwi_nr_objs; i++) {
825  m0_obj_fini(&cti->cti_objs[i]);
826  if (cwi->cwi_opcode != CR_CLEANUP)
827  cr_task_bufs_free(cti, i);
828  }
829  m0_free(cti->cti_objs);
830  m0_free(cti->cti_ops);
831  m0_free(cti->cti_bufvec);
832  m0_free(cti->cti_rd_bufvec);
833  m0_free(cti->cti_op_status);
834  m0_free(cti->cti_op_rcs);
835  m0_free0(cti_p);
836 }
837 
839  struct m0_task_io *cti)
840 {
841  int i;
842  int k;
843  int rc;
844 
845  M0_ALLOC_ARR(cti->cti_bufvec, cwi->cwi_nr_objs);
847 
848  for (i = 0; i < cwi->cwi_nr_objs; i++) {
850  cwi->cwi_bcount_per_op,
851  cwi->cwi_bs,
852  M0_DEFAULT_BUF_SHIFT) ?:
854  cwi->cwi_bcount_per_op,
855  cwi->cwi_bs,
856  M0_DEFAULT_BUF_SHIFT);
857  if (rc != 0)
858  return rc;
859  for (k = 0; k < cwi->cwi_bcount_per_op; k++) {
860  rc = cr_buffer_read(cti->cti_bufvec[i].ov_buf[k],
861  cwi->cwi_filename, cwi->cwi_bs);
862  if (rc != 0)
863  return rc;
864  }
865  }
866  return 0;
867 }
868 
870  struct m0_task_io **cti_out)
871 {
872  int rc;
873  int i;
874  struct m0_task_io *cti;
875 
876  if (M0_ALLOC_PTR(*cti_out) == NULL)
877  return -ENOMEM;
878  cti = *cti_out;
879 
880  cti->cti_cwi = cwi;
881  cti->cti_progress = 0;
882 
883  if (cwi->cwi_opcode != CR_CLEANUP) {
884  cti->cti_nr_ops = (cwi->cwi_io_size /
885  (cwi->cwi_bs * cwi->cwi_bcount_per_op)) ?: 1;
886  rc = cr_task_prep_bufs(cwi, cti);
887  if (rc != 0)
888  goto error_rc;
889  }
890 
891  M0_ALLOC_ARR(cti->cti_ids, cwi->cwi_nr_objs);
892  if (cti->cti_ids == NULL)
893  goto enomem;
894 
895  if (cwi->cwi_share_object) {
896  cti->cti_ids[0] = cwi->cwi_g.cg_oid;
897  } else if (M0_IN(cwi->cwi_opcode, (CR_POPULATE, CR_CLEANUP,
898  CR_READ_ONLY))) {
899  int i;
900  for (i = 0; i< cwi->cwi_nr_objs; i++) {
901  cwi->cwi_start_obj_id.u_lo++;
902  cti->cti_ids[i] = cwi->cwi_start_obj_id;
903  }
904  } else {
905  cti->cti_start_offset = 0;
906  cr_get_oids(cti->cti_ids, cwi->cwi_nr_objs);
907  }
908 
909  M0_ALLOC_ARR(cti->cti_objs, cwi->cwi_nr_objs);
910  if (cti->cti_objs == NULL)
911  goto enomem;
912 
913  for (i = 0; i < cwi->cwi_nr_objs; i++)
914  m0_obj_init(&cti->cti_objs[i],
916  &cti->cti_ids[i], cwi->cwi_layout_id);
917 
918  M0_ALLOC_ARR(cti->cti_ops, cwi->cwi_max_nr_ops);
919  if (cti->cti_ops == NULL)
920  goto enomem;
921 
923  if (cti->cti_op_status == NULL)
924  goto enomem;
925 
927  if (cti->cti_op_rcs == NULL)
928  goto enomem;
929 
930  return 0;
931 enomem:
932  rc = -ENOMEM;
933 error_rc:
934  cr_task_io_cleanup(cti_out);
935  return rc;
936 }
937 
939 {
940  int i;
941  int rc;
942  uint32_t nr_tasks;
943  struct m0_workload_io *cwi = w->u.cw_io;
944  struct m0_task_io **cti;
945 
946  nr_tasks = w->cw_nr_thread;
947  if (cwi->cwi_opcode == CR_CLEANUP)
948  cwi->cwi_share_object = false;
949 
950  if (cwi->cwi_share_object) {
951  /* Generate only one id */
952  cwi->cwi_nr_objs = 1;
953  cr_get_oids(&cwi->cwi_g.cg_oid, 1);
954  cwi->cwi_g.cg_nr_tasks = nr_tasks;
955  cwi->cwi_g.cg_created = false;
956  }
957 
958  for (i = 0; i < nr_tasks; i++) {
959  cti = (struct m0_task_io **)&tasks[i].u.m0_task;
960  rc = cr_task_prep_one(cwi, cti);
961  if (rc != 0) {
962  cti = NULL;
963  return rc;
964  }
965  M0_ASSERT(*cti != NULL);
966 
967  (*cti)->cti_task_idx = i;
968  }
969  return 0;
970 }
971 
973 {
974  int i;
975  uint32_t nr_tasks;
976  struct m0_task_io *cti;
977 
978  nr_tasks = w->cw_nr_thread;
979 
980  for (i = 0; i < nr_tasks; i++) {
981  cti = tasks[i].u.m0_task;
982  if (cti != NULL)
983  cr_task_io_cleanup(&cti);
984  }
985 
986  return 0;
987 }
988 
990 {
991  struct m0_workload_io *cwi;
993 
994  cwi = w->u.cw_io;
995  time_now = m0_time_now();
996 
997  if (cwi->cwi_execution_time == M0_TIME_NEVER)
998  return true;
999  return m0_time_sub(time_now, cwi->cwi_start_time) <
1000  cwi->cwi_execution_time ? true : false;
1001 }
1002 
1004 static uint64_t bw(uint64_t bytes, m0_time_t time)
1005 {
1006  return bytes * M0_TIME_ONE_MSEC / (time / 1000);
1007 }
1008 
1009 void run(struct workload *w, struct workload_task *tasks)
1010 {
1011  int i;
1012  uint64_t written;
1013  uint64_t read;
1014  int rc;
1015  struct m0_workload_io *cwi = w->u.cw_io;
1016  struct m0_uint128 start_obj_id;
1017 
1018  start_obj_id = cwi->cwi_start_obj_id;
1019  m0_mutex_init(&cwi->cwi_g.cg_mutex);
1020  cwi->cwi_start_time = m0_time_now();
1021  if (M0_IN(cwi->cwi_opcode, (CR_POPULATE, CR_CLEANUP)) &&
1023  cwi->cwi_start_obj_id = M0_ID_APP;
1024  for (i = 0; i < cwi->cwi_rounds && cr_time_not_expired(w); i++) {
1025  cr_log(CLL_INFO, "cwi->cwi_rounds : %d, iteration : %d\n",
1026  cwi->cwi_rounds, i);
1027  rc = cr_tasks_prepare(w, tasks);
1028  if (rc != 0) {
1029  cr_tasks_release(w, tasks);
1030  m0_mutex_fini(&cwi->cwi_g.cg_mutex);
1031  cr_log(CLL_ERROR, "Task preparation failed.\n");
1032  return;
1033  }
1034  workload_start(w, tasks);
1035  workload_join(w, tasks);
1036  cr_tasks_release(w, tasks);
1037 
1038  /*
1039  * When cwi->cwi_rounds > 1 then we need to re-set starting
1040  * object id to original one, so the read only operation can
1041  * start reading the populated data from that object index.
1042  **/
1043  if (cwi->cwi_opcode == CR_READ_ONLY) {
1044  cwi->cwi_start_obj_id = start_obj_id;
1045  }
1046  }
1047 
1048  m0_mutex_fini(&cwi->cwi_g.cg_mutex);
1049  cwi->cwi_finish_time = m0_time_now();
1050 
1051  cr_log(CLL_INFO, "I/O workload is finished.\n");
1052  cr_log(CLL_INFO, "Total: time="TIME_F" objs=%d ops=%" PRIu64 "\n",
1054  cwi->cwi_nr_objs * w->cw_nr_thread,
1055  cwi->cwi_ops_done[CR_WRITE] + cwi->cwi_ops_done[CR_READ]);
1056  if (cwi->cwi_ops_done[CR_CREATE] != 0)
1057  cr_log(CLL_INFO, "C: "TIME_F" ("TIME_F" per op)\n",
1058  TIME_P(cwi->cwi_time[CR_CREATE]),
1060  cwi->cwi_ops_done[CR_CREATE]));
1061  if (cwi->cwi_ops_done[CR_OPEN] != 0)
1062  cr_log(CLL_INFO, "O: "TIME_F" ("TIME_F" per op)\n",
1063  TIME_P(cwi->cwi_time[CR_OPEN]),
1065  cwi->cwi_ops_done[CR_OPEN]));
1066  if (cwi->cwi_ops_done[CR_DELETE] != 0)
1067  cr_log(CLL_INFO, "D: "TIME_F" ("TIME_F" per op)\n",
1068  TIME_P(cwi->cwi_time[CR_DELETE]),
1070  cwi->cwi_ops_done[CR_DELETE]));
1071  if (cwi->cwi_ops_done[CR_WRITE] == 0)
1072  return;
1073  written = cwi->cwi_bs * cwi->cwi_bcount_per_op *
1074  cwi->cwi_ops_done[CR_WRITE];
1075  cr_log(CLL_INFO, "W: "TIME_F" ("TIME_F" per op), "
1076  "%" PRIu64 " KiB, %" PRIu64 " KiB/s\n",
1077  TIME_P(cwi->cwi_time[CR_WRITE]),
1079  cwi->cwi_ops_done[CR_WRITE]), written/1024,
1080  bw(written, cwi->cwi_time[CR_WRITE]) /1024);
1081  if (cwi->cwi_ops_done[CR_READ] == 0)
1082  return;
1083  read = cwi->cwi_bs * cwi->cwi_bcount_per_op *
1084  cwi->cwi_ops_done[CR_READ];
1085  cr_log(CLL_INFO, "R: "TIME_F" ("TIME_F" per op), "
1086  "%" PRIu64 " KiB, %" PRIu64 " KiB/s\n",
1087  TIME_P(cwi->cwi_time[CR_READ]),
1089  cwi->cwi_ops_done[CR_READ]), read/1024,
1090  bw(read, cwi->cwi_time[CR_READ]) /1024);
1091 }
1092 
1093 void m0_op_run(struct workload *w, struct workload_task *task,
1094  const struct workload_op *op)
1095 {
1096  struct m0_task_io *cti = task->u.m0_task;
1097  int rc;
1098 
1100  if (cti == NULL)
1101  return;
1102 
1103  rc = cr_adopt_motr_thread(cti);
1104  if (rc < 0)
1105  cr_log(CLL_ERROR, "Motr adoption failed with rc=%d", rc);
1106 
1107  if (cti->cti_cwi->cwi_share_object)
1108  cr_task_share_execute(cti);
1109  else {
1110  rc = cr_task_execute(cti);
1111  if (rc < 0)
1112  cr_log(CLL_ERROR, "task execution failed with rc=%d", rc);
1113  }
1115 }
1116 
1117 /*
1118  * Local variables:
1119  * c-indentation-style: "K&R"
1120  * c-basic-offset: 8
1121  * tab-width: 8
1122  * fill-column: 80
1123  * scroll-step: 1
1124  * End:
1125  */
struct m0_task_io * coc_task
Definition: crate_io.c:115
uint64_t id
Definition: cob.h:2380
#define M0_PRE(cond)
#define M0_ALLOC_ARR(arr, nr)
Definition: memory.h:84
uint32_t cwi_rounds
Definition: crate_client.h:175
struct m0_mutex cg_mutex
Definition: crate_client.h:155
M0_INTERNAL int m0_bitmap_init(struct m0_bitmap *map, size_t nr)
Definition: bitmap.c:86
M0_INTERNAL void m0_mutex_unlock(struct m0_mutex *mutex)
Definition: mutex.c:66
Definition: client.h:788
int cr_buffer_read(char *buffer, const char *filename, uint64_t size)
Definition: crate_io.c:765
uint64_t cwi_ops_done[CR_OPS_NR]
Definition: crate_client.h:171
M0_INTERNAL int m0_indexvec_alloc(struct m0_indexvec *ivec, uint32_t len)
Definition: vec.c:532
#define NULL
Definition: misc.h:38
bool cr_time_not_expired(struct workload *w)
Definition: crate_io.c:989
M0_INTERNAL void m0_bitmap_fini(struct m0_bitmap *map)
Definition: bitmap.c:97
struct m0_semaphore cti_max_ops_sem
Definition: crate_client.h:210
int32_t cwi_nr_objs
Definition: crate_client.h:174
int cr_op_io(struct m0_workload_io *cwi, struct m0_task_io *cti, enum m0_operations op_code)
Definition: crate_io.c:584
void list_index_return(struct workload *w)
uint64_t cti_nr_ops_done
Definition: crate_client.h:201
M0_INTERNAL bool entity_id_is_valid(const struct m0_uint128 *id)
Definition: client.c:354
const m0_time_t M0_TIME_NEVER
Definition: time.c:108
void m0_op_fini(struct m0_op *op)
Definition: client.c:847
union @126 u
uint64_t m0_time_t
Definition: time.h:37
void cr_cti_report(struct m0_task_io *cti, enum m0_operations op_code)
Definition: crate_io.c:483
void m0_op_run(struct workload *w, struct workload_task *task, const struct workload_op *op)
Definition: crate_io.c:1093
void(* oop_executed)(struct m0_op *op)
Definition: client.h:909
void run(struct workload *w, struct workload_task *tasks)
Definition: crate_io.c:1009
int cr_task_prep_one(struct m0_workload_io *cwi, struct m0_task_io **cti_out)
Definition: crate_io.c:869
uint32_t * cti_op_status
Definition: crate_client.h:194
uint64_t cwi_bs
Definition: crate_client.h:163
void cr_time_acc(m0_time_t *t1, m0_time_t t2)
Definition: crate_io.c:146
uint64_t cti_start_offset
Definition: crate_client.h:197
m0_operations
Definition: crate_client.h:127
uint64_t u_lo
Definition: types.h:58
M0_INTERNAL void m0_indexvec_free(struct m0_indexvec *ivec)
Definition: vec.c:553
static void cr_cti_cleanup(struct m0_task_io *cti, int nr_ops)
Definition: crate_io.c:235
char * cwi_filename
Definition: crate_client.h:184
int cr_task_share_execute(struct m0_task_io *cti)
Definition: crate_io.c:639
void cr_op_stable(struct m0_op *op)
Definition: crate_io.c:151
int cr_tasks_prepare(struct workload *w, struct workload_task *tasks)
Definition: crate_io.c:938
M0_INTERNAL int m0_thread_adopt(struct m0_thread *thread, struct m0 *instance)
Definition: thread.c:127
#define M0_SET0(obj)
Definition: misc.h:64
M0_INTERNAL void m0_mutex_lock(struct m0_mutex *mutex)
Definition: mutex.c:49
struct m0_uint128 cg_oid
Definition: crate_client.h:151
int32_t cwi_opcode
Definition: crate_client.h:178
#define TIME_P(t)
Definition: time.h:45
M0_INTERNAL bool m0_fid_is_set(const struct m0_fid *fid)
Definition: fid.c:106
m0_time_t cwi_finish_time
Definition: crate_client.h:181
void ** ov_buf
Definition: vec.h:149
static struct foo * obj
Definition: tlist.c:302
#define PRIx64
Definition: types.h:61
int cr_task_execute(struct m0_task_io *cti)
Definition: crate_io.c:672
void workload_start(struct workload *w, struct workload_task *task)
Definition: crate.c:340
static int cr_adopt_motr_thread(struct m0_task_io *cti)
Definition: crate_io.c:739
int cr_namei_open(struct m0_workload_io *cwi, struct m0_task_io *cti, struct m0_op_context *op_ctx, struct m0_obj *obj, int free_slot, int obj_idx, int op_index)
Definition: crate_io.c:363
int cr_free_op_idx(struct m0_task_io *cti, uint32_t nr_ops)
Definition: crate_io.c:217
m0_time_t cwi_time[CR_OPS_NR]
Definition: crate_client.h:183
struct cwi_global cwi_g
Definition: crate_client.h:160
int cr_execute_ops(struct m0_workload_io *cwi, struct m0_task_io *cti, struct m0_obj *obj, struct m0_op_ops *cbs, enum m0_operations op_code, int obj_idx)
Definition: crate_io.c:445
void cr_op_failed(struct m0_op *op)
Definition: crate_io.c:173
const struct m0_uint128 M0_ID_APP
Definition: client.c:92
int m0_bufvec_alloc_aligned(struct m0_bufvec *bufvec, uint32_t num_segs, m0_bcount_t seg_size, unsigned shift)
Definition: vec.c:355
M0_INTERNAL uint64_t m0_round_down(uint64_t val, uint64_t size)
Definition: misc.c:187
M0_INTERNAL int m0_bufvec_alloc(struct m0_bufvec *bufvec, uint32_t num_segs, m0_bcount_t seg_size)
Definition: vec.c:220
op
Definition: libdemo.c:64
int m0_obj_op(struct m0_obj *obj, enum m0_obj_opcode opcode, struct m0_indexvec *ext, struct m0_bufvec *data, struct m0_bufvec *attr, uint64_t mask, uint32_t flags, struct m0_op **op)
Definition: io.c:717
struct m0_bufvec * coc_buf_vec
Definition: crate_io.c:116
M0_INTERNAL struct m0_thread_tls * m0_thread_tls(void)
Definition: kthread.c:67
int cr_op_namei(struct m0_workload_io *cwi, struct m0_task_io *cti, enum m0_operations op_code)
Definition: crate_io.c:496
m0_time_t cg_cwi_acc_time[CR_OPS_NR]
Definition: crate_client.h:154
static void t2(int n)
Definition: thread.c:48
M0_INTERNAL void m0_bufvec_free(struct m0_bufvec *bufvec)
Definition: vec.c:395
struct m0_op ** cti_ops
Definition: crate_client.h:199
union workload::@328 u
void cr_task_io_cleanup(struct m0_task_io **cti_p)
Definition: crate_io.c:817
#define TIME_F
Definition: time.h:44
int i
Definition: dir.c:1033
#define PRIu64
Definition: types.h:58
uint32_t cwi_bcount_per_op
Definition: crate_client.h:168
cr_operation_t opcode_operation_map[]
Definition: crate_io.c:437
Definition: client.h:641
static struct m0_fid * check_fid(struct m0_fid *id)
Definition: crate_io.c:339
void * cw_io
Definition: workload.h:98
return M0_ERR(-EOPNOTSUPP)
static void attr(struct m0_addb2__context *ctx, const uint64_t *v, char *buf)
Definition: dump.c:949
void workload_join(struct workload *w, struct workload_task *task)
Definition: crate.c:363
void(* oop_stable)(struct m0_op *op)
Definition: client.h:911
int cr_io_read(struct m0_workload_io *cwi, struct m0_task_io *cti, struct m0_op_context *op_ctx, struct m0_obj *obj, int free_slot, int obj_idx, int op_index)
Definition: crate_io.c:414
#define m0_free0(pptr)
Definition: memory.h:77
m0_time_t cti_op_acc_time
Definition: crate_client.h:207
#define M0_ASSERT(cond)
struct m0_bufvec * cti_bufvec
Definition: crate_client.h:204
union workload_task::@329 u
m0_time_t m0_time_now(void)
Definition: time.c:134
void m0_obj_fini(struct m0_obj *obj)
Definition: client.c:467
int cr_task_prep_bufs(struct m0_workload_io *cwi, struct m0_task_io *cti)
Definition: crate_io.c:838
struct m0_uint128 * cti_ids
Definition: crate_client.h:206
struct m0_indexvec * coc_index_vec
Definition: crate_io.c:118
void cr_task_bufs_free(struct m0_task_io *cti, int idx)
Definition: crate_io.c:809
int rand(void)
void m0_op_launch(struct m0_op **op, uint32_t nr)
Definition: client.c:725
uint64_t u_hi
Definition: types.h:57
struct m0_workload_io * cti_cwi
Definition: crate_client.h:192
m0_time_t cwi_start_time
Definition: crate_client.h:180
M0_INTERNAL int m0_semaphore_init(struct m0_semaphore *semaphore, unsigned value)
Definition: semaphore.c:38
void * m0_alloc(size_t size)
Definition: memory.c:126
M0_INTERNAL void m0_mutex_init(struct m0_mutex *mutex)
Definition: mutex.c:35
int(* cr_operation_t)(struct m0_workload_io *cwi, struct m0_task_io *cti, struct m0_op_context *op_ctx, struct m0_obj *obj, int free_slot, int obj_idx, int op_index)
Definition: crate_io.c:121
int cr_namei_delete(struct m0_workload_io *cwi, struct m0_task_io *cti, struct m0_op_context *op_ctx, struct m0_obj *obj, int free_slot, int obj_idx, int op_index)
Definition: crate_io.c:379
void cr_get_oids(struct m0_uint128 *ids, uint32_t nr_objs)
Definition: crate_io.c:796
M0_INTERNAL void m0_bitmap_set(struct m0_bitmap *map, size_t idx, bool val)
Definition: bitmap.c:139
struct m0_fid cwi_pool_id
Definition: crate_client.h:169
static void cti_cleanup_op(struct m0_task_io *cti, int i)
Definition: crate_io.c:190
static m0_bindex_t offset
Definition: dump.c:173
void(* oop_failed)(struct m0_op *op)
Definition: client.h:910
m0_time_t m0_time_add(const m0_time_t t1, const m0_time_t t2)
Definition: time.c:47
struct m0_addb2__id_intrp ids[]
Definition: dump.c:1074
M0_INTERNAL void m0_thread_shun(void)
Definition: thread.c:134
void * m0_task
Definition: workload.h:146
M0_INTERNAL void m0_bufvec_free_aligned(struct m0_bufvec *bufvec, unsigned shift)
Definition: vec.c:436
uint32_t * cti_op_rcs
Definition: crate_client.h:195
unsigned cw_nr_thread
Definition: workload.h:78
uint64_t time_now(void)
Definition: st_misc.c:69
int m0_entity_create(struct m0_fid *pool, struct m0_entity *entity, struct m0_op **op)
Definition: obj.c:801
static uint64_t bw(uint64_t bytes, m0_time_t time)
Definition: crate_io.c:1004
uint64_t cti_nr_ops
Definition: crate_client.h:200
Definition: fid.h:38
struct m0_uint128 cwi_start_obj_id
Definition: crate_client.h:179
uint32_t cwi_max_nr_ops
Definition: crate_client.h:172
static uint64_t nz_rand(void)
Definition: crate_io.c:785
#define M0_ALLOC_PTR(ptr)
Definition: memory.h:86
M0_INTERNAL void m0_semaphore_fini(struct m0_semaphore *semaphore)
Definition: semaphore.c:45
static int r[NR]
Definition: thread.c:46
enum m0_operations coc_op_code
Definition: crate_io.c:114
static int cr_release_motr_thread(struct m0_task_io *cti)
Definition: crate_io.c:756
Definition: addb2.c:200
int cti_task_idx
Definition: crate_client.h:193
m0_time_t m0_time_sub(const m0_time_t t1, const m0_time_t t2)
Definition: time.c:65
void cr_log(enum cr_log_level lev, const char *fmt,...)
Definition: logger.c:39
M0_INTERNAL bool m0_bitmap_get(const struct m0_bitmap *map, size_t idx)
Definition: bitmap.c:105
m0_bcount_t size
Definition: di.c:39
m0_time_t coc_op_finish
Definition: crate_io.c:110
static size_t cr_rand___range_l(size_t end)
Definition: crate_io.c:134
M0_INTERNAL void m0_mutex_fini(struct m0_mutex *mutex)
Definition: mutex.c:42
void m0_obj_init(struct m0_obj *obj, struct m0_realm *parent, const struct m0_uint128 *id, uint64_t layout_id)
Definition: client.c:403
int cr_io_vector_prep(struct m0_workload_io *cwi, struct m0_task_io *cti, struct m0_op_context *op_ctx, int obj_idx, int op_index)
Definition: crate_io.c:244
Definition: tasks.py:1
int cr_io_write(struct m0_workload_io *cwi, struct m0_task_io *cti, struct m0_op_context *op_ctx, struct m0_obj *obj, int free_slot, int obj_idx, int op_index)
Definition: crate_io.c:391
void * op_datum
Definition: client.h:679
void integrity(struct m0_uint128 object_id, unsigned char **md5toverify, int block_count, int idx_op)
struct m0_bufvec * coc_attr
Definition: crate_io.c:117
int cr_tasks_release(struct workload *w, struct workload_task *tasks)
Definition: crate_io.c:972
static void t1(int n)
Definition: mutex.c:48
int m0_entity_delete(struct m0_entity *entity, struct m0_op **op)
Definition: obj.c:824
struct m0_obj * cti_objs
Definition: crate_client.h:198
uint32_t cwi_layout_id
Definition: crate_client.h:161
M0_INTERNAL void m0_semaphore_down(struct m0_semaphore *semaphore)
Definition: semaphore.c:49
uint64_t cwi_io_size
Definition: crate_client.h:170
struct m0_realm * crate_uber_realm()
void m0_op_free(struct m0_op *op)
Definition: client.c:885
Definition: rcv_session.c:58
M0_INTERNAL void m0_semaphore_up(struct m0_semaphore *semaphore)
Definition: semaphore.c:65
struct m0_bufvec * cti_rd_bufvec
Definition: crate_client.h:205
int m0_entity_open(struct m0_entity *entity, struct m0_op **op)
Definition: obj.c:885
struct m0_workload_io * coc_cwi
Definition: crate_io.c:113
int32_t cti_progress
Definition: crate_client.h:196
M0_INTERNAL bool m0_fid_is_valid(const struct m0_fid *fid)
Definition: fid.c:96
uint64_t u_lo
Definition: types.h:37
struct m0_thread * cti_mthread
Definition: crate_client.h:203
m0_time_t coc_op_launch
Definition: crate_io.c:109
void m0_free(void *data)
Definition: memory.c:146
int cr_namei_create(struct m0_workload_io *cwi, struct m0_task_io *cti, struct m0_op_context *op_ctx, struct m0_obj *obj, int free_slot, int obj_idx, int op_index)
Definition: crate_io.c:347
bool cg_created
Definition: crate_client.h:152
int32_t rc
Definition: trigger_fop.h:47
m0_time_t cwi_execution_time
Definition: crate_client.h:182
Definition: vec.h:145
int coc_obj_index
Definition: crate_io.c:112
void m0_op_setup(struct m0_op *op, const struct m0_op_ops *cbs, m0_time_t linger)
Definition: client.c:908