Motr  M0
crate_index.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2017-2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 
23 #include <errno.h>
24 #include "lib/bitmap.h"
25 #include "lib/trace.h"
26 #include "lib/time.h"
27 #include "motr/m0crate/logger.h"
30 
144 #define LOG_PREFIX "dix: "
145 
146 /* XXX: Disable tracking system to increase performance. */
147 #ifdef NTRACK
148 # undef M0_RC
149 # define M0_RC(X) (X)
150 # undef M0_ERR
151 # define M0_ERR(X) (X)
152 # undef M0_PRE
153 # define M0_PRE(X)
154 # undef M0_POST
155 # define M0_POST(X)
156 # undef M0_ASSERT
157 # define M0_ASSERT(X)
158 #endif
159 
161 enum {
165  CR_MIN_KEY_SIZE = sizeof(struct m0_fid),
169  CR_MAX_NEXT_RECORDS = (1 << 30),
170 };
171 
177 };
178 
179 /* RANDOM */
181 static int cr_rand_pos_range(int end)
182 {
183  return rand() % end;
184 }
185 
187 static size_t cr_rand_pos_range_l(size_t end)
188 {
189  size_t val_h;
190  size_t val_l;
191  size_t res;
192 
193  val_l = rand();
194  val_h = rand();
195 
196  res = (val_h << 32) | val_l;
197 
198  return res % end;
199 }
200 
202 static bool cr_rand_bool()
203 {
204  return rand() % 2 == 1;
205 }
206 
208 static void cr_get_random_string(char *dest, size_t length)
209 {
210  char set[] = "0123456789"
211  "abcdefghijklmnopqrstuvwxyz"
212  "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
213  const size_t set_len = strlen(set);
214  int index;
215  int i;
216 
217  for (i = 0; i < length - 1; i++)
218  {
219  index = cr_rand_pos_range(set_len);
220  dest[i] = set[index];
221  }
222 
223  dest[length - 1] = '\0';
224 }
225 
226 
227 /* TIME MEASUREMENTS */
234  double elapsed;
235 };
236 
237 /* CLIENT INDEX RESULT MEASUREMENTS */
239  "PUT",
240  "GET",
241  "NEXT",
242  "DEL"
243 };
246  const char *cior_op_label;
257 };
258 
268 };
269 
270 static double cr_time_in_seconds(m0_time_t mtime)
271 {
272  return (m0_time_seconds(mtime)) +
273  ( m0_time_nanoseconds(mtime) / (double) M0_TIME_ONE_SECOND );
274 }
275 
277 {
278  *t = (struct cr_time_measure_ctx) {};
279  t->ts = m0_time_now();
280 }
281 
283 {
284  m0_time_t d = m0_time_sub(m0_time_now(), t->ts);
285  return (m0_time_seconds(d)) +
286  ( m0_time_nanoseconds(d) / (double) M0_TIME_ONE_SECOND );
287 }
288 
290 {
291  t->elapsed = cr_time_measure_elapsed_now(t);
292  t->ts_next = m0_time_now();
293 }
294 
295 /* BITMAP */
296 static void m0_bitmap_print(struct m0_bitmap *bm)
297 {
298  int i;
299  int nr = bm->b_nr;
300 
301  if (nr <= 1024) {
302  cr_log_ex(CLL_DEBUG, LOG_PREFIX, "", "len = %d, map = {", nr);
303  for (i = 0; i < nr; i++)
304  cr_log_ex(CLL_SAME, "", "", "%d", m0_bitmap_get(bm, i));
305  cr_log_ex(CLL_SAME, "", "", "}\n");
306  } else
307  crlog(CLL_DEBUG, "Bitmap is too large to print.");
308 }
309 
310 /* WATCHDOG */
312 static int cr_watchdog_init(struct m0_workload_index *wt);
313 static void cr_watchdog_fini();
314 static void cr_watchdog_touch();
315 
316 
317 /* CLIENT OPCODES UTILS */
318 static const char *crate_op_to_string(enum cr_opcode op);
319 static bool cr_opcode_valid(enum cr_opcode opcode);
320 
321 
322 /* CLIENT INDEX WORKLOAD */
325  int nr;
327  bool pending;
329  int prev_key;
332 };
333 
335 struct cr_idx_w {
337  struct m0_bitmap bm;
344  int nr_keys;
348  size_t exec_time;
351 };
352 
353 static int cr_idx_w_init(struct cr_idx_w *ciw,
354  struct m0_workload_index *wit);
355 static void cr_idx_w_fini(struct cr_idx_w *ciw);
356 static void cr_idx_w_seq_keys_init(struct cr_idx_w *w, int keys_nr);
357 static void cr_idx_w_seq_keys_fini(struct cr_idx_w *w);
358 static int cr_idx_w_seq_keys_get_last(struct cr_idx_w *w,
359  int nr, enum cr_opcode op);
360 static bool cr_idx_w_seq_keys_enabled(struct cr_idx_w *w);
361 static void cr_idx_w_seq_keys_save(struct cr_idx_w *w,
362  int *keys, size_t keys_nr,
363  enum cr_opcode op);
364 static size_t cr_idx_w_get_value_size(struct cr_idx_w *w);
365 static int cr_idx_w_get_nr_keys_per_op(struct cr_idx_w *w, enum cr_opcode op);
366 static int cr_idx_w_find_seq_k(struct cr_idx_w *w,
367  enum cr_opcode opcode,
368  int *keys, size_t nr_keys);
369 static int cr_idx_w_find_rnd_k(struct cr_idx_w *w,
370  enum cr_opcode opcode,
371  int *keys, size_t nr_keys);
372 static bool cr_idx_w_timeout_expired(struct cr_idx_w *w);
373 static int cr_idx_w_execute(struct cr_idx_w *w,
374  enum cr_opcode opcode,
375  bool random,
376  size_t nr_keys,
377  bool *missing_key);
378 static int cr_idx_w_get_nr_remained_ops(struct cr_idx_w *w);
379 static enum cr_opcode cr_idx_w_select_op(struct cr_idx_w *w);
380 static int cr_idx_w_get_nr_remained_op_types(struct cr_idx_w *w);
381 static void cr_idx_w_print_ops_table(struct cr_idx_w *w);
382 static bool cr_idx_w_rebalance_ops(struct cr_idx_w *w, enum cr_opcode op);
383 static int cr_idx_w_common(struct cr_idx_w *w);
384 static int cr_idx_w_warmup(struct cr_idx_w *w);
385 
388  struct cr_idx_w *ciw)
389 {
390  int i = 0;
391  struct cr_idx_ops_result *op_results = ciw->ciw_results.ciwr_ops_result;
392  ciw->ciw_results.ciwr_total_time_s = t->elapsed;
393  ciw->ciw_results.ciwr_total_time_m0 = m0_time_sub(t->ts_next, t->ts);
396 
397  /* Calculate Results for each op (PUT, GET, NEXT and DEL) */
398  for (i = 0; i < CRATE_OP_NR; i++) {
399  if (op_results[i].cior_op_count) {
400  op_results[i].cior_ops_total_time_s =
403  op_results[i].cior_ops_total_time_m0 /
404  op_results[i].cior_op_count);
405  }
406  }
407 
408 }
411  struct cr_idx_w w)
412 {
413  int i = 0;
414  struct cr_idx_ops_result *op_results = w.ciw_results.ciwr_ops_result;
415 
416  /* Results in parsable format */
417  fprintf(stdout, "result: total_s, %f, avg_time_per_op_ns, %.1f,"
418  " key_size_bytes, %d, value_size_bytes, %d, ops, %d\n",
419  t->elapsed, w.ciw_results.ciwr_time_per_op_ns,
421 
422  for (i = 0; i < CRATE_OP_NR; i++) {
423  fprintf(stdout, "result: %s, total_time_s, %f, avg_time_per_op_ns, "
424  "%.1f, ops, %d\n",
425  op_results[i].cior_op_label,
426  op_results[i].cior_ops_total_time_s,
427  op_results[i].cior_time_per_op_ns,
428  op_results[i].cior_op_count);
429  }
430 
431  /* Results in m0crate format */
432  fprintf(stdout, "\nTotal: time="TIME_F" ("TIME_F" per op) ops=%d\n",
435  w.nr_ops_total),
436  w.nr_ops_total);
437 
438  for (i = 0; i < CRATE_OP_NR; i++) {
439  if (op_results[i].cior_op_count) {
440  fprintf(stdout, "%s: "TIME_F" ("TIME_F" per op) ops=%d\n",
441  op_results[i].cior_op_label,
442  TIME_P(op_results[i].cior_ops_total_time_m0),
443  TIME_P(op_results[i].cior_ops_total_time_m0 /
444  op_results[i].cior_op_count),
445  op_results[i].cior_op_count);
446  }
447  }
448 }
449 
450 
451 /* IMPLEMENTATION */
452 
453 static int cr_idx_w_init(struct cr_idx_w *ciw,
454  struct m0_workload_index *wit)
455 {
456  int rc;
457  int i;
458 
462 
463  *ciw = (typeof(*ciw)) {};
464 
465  ciw->wit = wit;
466  ciw->nr_ops_total = 0;
467 
468  /* Setting up min_key_size default to MIN_KEY_SIZE */
470 
471  /* Init crate index result */
472  for (i = 0; i < CRATE_OP_NR; i++) {
479  }
480 
481  /* XXX: If opcount is unlimited, then make it limited */
482  if (wit->op_count < 0) {
483  wit->op_count = INT_MAX / (128 * wit->num_kvs);
484  }
485 
486  for (i = 0; i < ARRAY_SIZE(ciw->nr_ops); i++) {
487  ciw->nr_ops[i].nr = wit->op_count *
488  ((double) wit->opcode_prcnt[i] / 100);
489  ciw->nr_ops_total += ciw->nr_ops[i].nr;
491  ciw->nr_ops[i].nr;
492  }
493  if (wit->warmup_put_cnt == -1) {
494  ciw->warmup_put_cnt = ciw->nr_ops_total;
495  ciw->nr_keys = ciw->nr_ops_total;
496  } else {
497  ciw->warmup_put_cnt = wit->warmup_put_cnt;
498  ciw->nr_keys = ciw->warmup_put_cnt > ciw->nr_ops_total ?
499  ciw->warmup_put_cnt : ciw->nr_ops_total;
500  }
501 
502  ciw->nr_kv_per_op = wit->num_kvs;
503  ciw->nr_keys *= ciw->nr_kv_per_op;
504  ciw->ordered_keys = wit->keys_ordered;
505 
506  /* If key size is random, generate it using rand(). */
507  if (ciw->wit->key_size < 0)
508  ciw->wit->key_size = ciw->wit->min_key_size +
510 
511  if (wit->warmup_del_ratio > 0)
512  ciw->warmup_del_cnt =
513  ciw->warmup_put_cnt / wit->warmup_del_ratio;
514  else
515  ciw->warmup_del_cnt = 0;
516 
517  rc = m0_bitmap_init(&ciw->bm, ciw->nr_keys);
518  if (rc != 0)
519  return M0_ERR(rc);
520 
521  srand(wit->seed);
522 
523  ciw->key_prefix = wit->key_prefix;
524 
525  if (ciw->key_prefix.f_container == -1)
527 
529  ciw->exec_time = wit->exec_time;
530 
532  ciw->op_selector = CR_OP_SEL_RND;
533  else
534  ciw->op_selector = CR_OP_SEL_RR;
535 
536  /* if DEL or GET or NEXT and PUT given, then we have to
537  * use round-robin selector to avoid cases when
538  * we perform GET/DEL/NEXT on empty index.
539  */
540  if ((ciw->nr_ops[CRATE_OP_DEL].nr != 0 || ciw->nr_ops[CRATE_OP_GET].nr != 0 ||
541  ciw->nr_ops[CRATE_OP_NEXT].nr != 0) && ciw->nr_ops[CRATE_OP_PUT].nr != 0) {
542  ciw->op_selector = CR_OP_SEL_RR;
543  }
544 
545  M0_POST(ciw->nr_kv_per_op < ciw->nr_keys);
546  M0_POST(ciw->nr_kv_per_op > 0);
547  M0_POST(ciw->nr_keys > 0);
548 
549  return M0_RC(0);
550 }
551 
552 static void cr_idx_w_fini(struct cr_idx_w *ciw)
553 {
554  m0_bitmap_fini(&ciw->bm);
555 }
556 
557 /* Seq keys */
558 static void cr_idx_w_seq_keys_init(struct cr_idx_w *w, int keys_nr)
559 {
560  int i;
561 
562  w->prev_key_used = true;
563 
564  for (i = 0; i < ARRAY_SIZE(w->nr_ops); i++)
565  w->nr_ops[i].prev_key = -1;
566 }
567 
568 static void cr_idx_w_seq_keys_fini(struct cr_idx_w *w)
569 {
570  w->prev_key_used = false;
571 }
572 
574  int nr, enum cr_opcode op)
575 {
576  M0_PRE(nr > 0);
578 
579  return (w->nr_ops[op].prev_key + nr > w->nr_keys) ?
580  0 : w->nr_ops[op].prev_key + 1;
581 }
582 
583 static bool cr_idx_w_seq_keys_enabled(struct cr_idx_w *w)
584 {
585  return w->prev_key_used;
586 }
587 
588 static void cr_idx_w_seq_keys_save(struct cr_idx_w *w,
589  int *keys, size_t keys_nr,
590  enum cr_opcode op)
591 {
593 
594  if (w->prev_key_used) {
595  w->nr_ops[op].prev_key = keys[keys_nr - 1];
596  w->nr_ops[op].prev_key_nr = keys_nr;
597  }
598 }
599 
600 
601 /* Key-Value pairs (records) */
602 struct kv_pair {
603  struct m0_bufvec *k;
604  struct m0_bufvec *v;
605 };
606 
607 static void idx_bufvec_free(struct m0_bufvec *bv)
608 {
609  uint32_t i;
610 
611  if (bv == NULL)
612  return;
613 
614  if (bv->ov_buf != NULL) {
615  for (i = 0; i < bv->ov_vec.v_nr; ++i)
616  free(bv->ov_buf[i]);
617  free(bv->ov_buf);
618  }
619  free(bv->ov_vec.v_count);
620  free(bv);
621 }
622 
623 static void kv_pair_fini(struct kv_pair *p)
624 {
625  idx_bufvec_free(p->k);
626  idx_bufvec_free(p->v);
627 }
628 
629 static struct m0_bufvec* idx_bufvec_alloc(int nr)
630 {
631  struct m0_bufvec *bv;
632 
633  M0_ALLOC_PTR(bv);
634  if (bv == NULL)
635  return NULL;
636 
637  bv->ov_vec.v_nr = nr;
638 
640  if (bv->ov_vec.v_count == NULL)
641  goto do_free;
642 
643  M0_ALLOC_ARR(bv->ov_buf, nr);
644  if (bv->ov_buf == NULL)
645  goto do_free;
646 
647  return bv;
648 
649 do_free:
650  m0_bufvec_free(bv);
651  return NULL;
652 }
653 
654 static size_t cr_idx_w_get_value_size(struct cr_idx_w *w)
655 {
656  int vlen;
657  /* If value_size is random, generate it using rand() */
658  if (w->wit->value_size < 0)
659  vlen = w->wit->key_size +
661  else
662  vlen = w->wit->value_size;
663 
665  M0_POST(vlen > 0);
666  return vlen;
667 }
668 
670 {
671  size_t result;
672 
673  if (op == CRATE_OP_NEXT && w->wit->next_records > 0) {
674  result = 1 + cr_rand_pos_range(w->wit->next_records);
675  M0_POST(result < CR_MAX_NEXT_RECORDS);
676  } else
677  result = w->nr_kv_per_op;
678 
679  return result;
680 }
681 
682 static int fill_kv_del(struct cr_idx_w *w,
683  struct m0_fid *k, struct kv_pair *p, size_t nr,
684  int kpart_one_size, char *kpart_one)
685 {
686  int i;
687 
688  p->k = idx_bufvec_alloc(nr);
689  if (p->k == NULL)
690  return M0_ERR(-ENOMEM);
691 
692  p->v = NULL;
693 
694  M0_ASSERT(p->k->ov_vec.v_nr == nr);
695 
696  for (i = 0; i < nr; i++) {
697  p->k->ov_vec.v_count[i] = w->wit->key_size;
698  p->k->ov_buf[i] = m0_alloc(w->wit->key_size);
699  memcpy(p->k->ov_buf[i], (void*)kpart_one, kpart_one_size);
700  memcpy(p->k->ov_buf[i] + kpart_one_size, &k[i], sizeof(*k));
701 
702  crlog(CLL_DEBUG, "Generated k=%s:" FID_F, kpart_one, FID_P(&k[i]));
703  }
704  return M0_RC(0);
705 }
706 
707 static int fill_kv_next(struct cr_idx_w *w,
708  struct m0_fid *k, struct kv_pair *p, size_t nr,
709  int kpart_one_size, char *kpart_one)
710 {
711  p->k = idx_bufvec_alloc(nr);
712  if (p->k == NULL)
713  return M0_ERR(-ENOMEM);
714 
715  p->v = idx_bufvec_alloc(nr);
716  if (p->v == NULL) {
717  idx_bufvec_free(p->k);
718  return M0_ERR(-ENOMEM);
719  }
720 
721  M0_ASSERT(p->k->ov_vec.v_nr == nr);
722  M0_ASSERT(p->v->ov_vec.v_nr == nr);
723 
724  p->k->ov_vec.v_count[0] = w->wit->key_size;
725  p->k->ov_buf[0] = m0_alloc(w->wit->key_size);
726  memcpy(p->k->ov_buf[0], (void*)kpart_one, kpart_one_size);
727  memcpy(p->k->ov_buf[0] + kpart_one_size, &k[0], sizeof(*k));
728  crlog(CLL_DEBUG, "Generated next k=%s:" FID_F, kpart_one, FID_P(&k[0]));
729 
730  return M0_RC(0);
731 }
732 
733 static int fill_kv_get(struct cr_idx_w *w,
734  struct m0_fid *k, struct kv_pair *p, size_t nr,
735  int kpart_one_size, char *kpart_one)
736 {
737  int i;
738 
739  p->k = idx_bufvec_alloc(nr);
740  if (p->k == NULL)
741  return M0_ERR(-ENOMEM);
742 
743  p->v = idx_bufvec_alloc(nr);
744  if (p->v == NULL) {
745  idx_bufvec_free(p->k);
746  return M0_ERR(-ENOMEM);
747  }
748 
749  M0_ASSERT(p->k->ov_vec.v_nr == nr);
750  M0_ASSERT(p->v->ov_vec.v_nr == nr);
751 
752  for (i = 0; i < nr; i++) {
753  p->k->ov_vec.v_count[i] = w->wit->key_size;
754  p->k->ov_buf[i] = m0_alloc(w->wit->key_size);
755  memcpy(p->k->ov_buf[i], (void*)kpart_one, kpart_one_size);
756  memcpy(p->k->ov_buf[i] + kpart_one_size, &k[i], sizeof(*k));
757  crlog(CLL_DEBUG, "Generated k=%s:" FID_F, kpart_one, FID_P(&k[i]));
758  }
759 
760  return M0_RC(0);
761 }
762 static int fill_kv_put(struct cr_idx_w *w,
763  struct m0_fid *k, struct kv_pair *p, size_t nr,
764  int kpart_one_size, char *kpart_one)
765 {
766  int vlen;
767  int i;
768 
769  p->k = idx_bufvec_alloc(nr);
770  if (p->k == NULL)
771  return M0_ERR(-ENOMEM);
772 
773  p->v = idx_bufvec_alloc(nr);
774  if (p->v == NULL) {
775  idx_bufvec_free(p->k);
776  return M0_ERR(-ENOMEM);
777  }
778 
779  M0_ASSERT(p->k->ov_vec.v_nr == nr);
780  M0_ASSERT(p->v->ov_vec.v_nr == nr);
781 
782  for (i = 0; i < nr; i++) {
783  p->k->ov_vec.v_count[i] = w->wit->key_size;
784  p->k->ov_buf[i] = m0_alloc_aligned(w->wit->key_size,
785  m0_pageshift_get());
786  memcpy(p->k->ov_buf[i], (void*)kpart_one, kpart_one_size);
787  memcpy(p->k->ov_buf[i] + kpart_one_size, &k[i], sizeof(*k));
788  vlen = cr_idx_w_get_value_size(w);
789  p->v->ov_vec.v_count[i] = vlen;
790  p->v->ov_buf[i] = m0_alloc_aligned(vlen, m0_pageshift_get());
791  cr_get_random_string(p->v->ov_buf[i], vlen);
792  crlog(CLL_DEBUG, "Generated k=%s:" FID_F ",v=%s",
793  kpart_one,
794  FID_P(&k[i]),
795  vlen > 128 ? "<...>": (char *) p->v->ov_buf[i]);
796  }
797 
798  return M0_RC(0);
799 }
800 
801 typedef int (*cr_idx_w_find_k_t)(struct cr_idx_w *w, enum cr_opcode opcode,
802  int *keys, size_t nr_keys);
803 
804 struct cr_idx_w_ops {
805  enum cr_opcode op;
807  bool empty_bit;
809  int (*fill_kv)(struct cr_idx_w *w,
810  struct m0_fid *k,
811  struct kv_pair *p,
812  size_t nr,
813  int kpart_one_size,
814  char *kpart_one);
816  bool readonly;
818  bool is_set_op;
822  const char *name;
823 };
824 
826  [CRATE_OP_PUT] = {
827  .op = CRATE_OP_PUT,
828  .empty_bit = false,
829  .fill_kv = fill_kv_put,
830  .readonly = false,
831  .is_set_op = true,
832  .m0_op = M0_IC_PUT,
833  .name = "put",
834  },
835  [CRATE_OP_DEL] = {
836  .op = CRATE_OP_DEL,
837  .empty_bit = true,
838  .fill_kv = fill_kv_del,
839  .readonly = false,
840  .is_set_op = false,
841  .m0_op = M0_IC_DEL,
842  .name = "del",
843  },
844  [CRATE_OP_GET] = {
845  .op = CRATE_OP_GET,
846  .empty_bit = true,
847  .fill_kv = fill_kv_get,
848  .readonly = true,
849  .is_set_op = false,
850  .m0_op = M0_IC_GET,
851  .name = "get",
852  },
853  [CRATE_OP_NEXT] = {
854  .op = CRATE_OP_NEXT,
855  .empty_bit = true,
856  .fill_kv = fill_kv_next,
857  .readonly = true,
858  .is_set_op = false,
859  .m0_op = M0_IC_NEXT,
860  .name = "next",
861  },
862 };
863 
864 static const char *crate_op_to_string(enum cr_opcode op)
865 {
866  return cr_idx_w_ops[op].name;
867 }
868 
870 {
871  return opcode != CRATE_OP_INVALID;
872 }
873 
875 static M0_UNUSED bool int_array_is_set(int *vals, size_t nr)
876 {
877  int i;
878  int j;
879 
880  for (i = 0; i < nr; i++) {
881  for (j = 0; j < nr; j++) {
882  if (i != j && vals[i] == vals[j])
883  return false;
884  }
885  }
886 
887  return true;
888 }
889 
890 static int cr_idx_w_find_seq_k(struct cr_idx_w *w,
891  enum cr_opcode opcode,
892  int *keys, size_t nr_keys)
893 {
894  struct cr_idx_w_ops *op = &cr_idx_w_ops[opcode];
895  int key_iter = 0;
896  int start_key = 0;
897  int rc = 0;
898  int i;
899 
900  M0_PRE(w->nr_keys > 0);
901  M0_PRE(nr_keys != 0);
902 
903  if (cr_idx_w_seq_keys_enabled(w)) {
904  start_key = cr_idx_w_seq_keys_get_last(w, nr_keys, opcode);
905  crlog(CLL_DEBUG, "starting_key_num=%d", start_key);
906  }
907 
908  for (i = start_key; i < w->nr_keys; i++) {
909  if (m0_bitmap_get(&w->bm, i) == op->empty_bit) {
910  keys[key_iter++] = i;
911  if (key_iter == nr_keys)
912  break;
913  }
914  }
915 
916  if (key_iter != nr_keys) {
917  crlog(CLL_ERROR, "Unable to find enough keys for opcode '%s'",
919  m0_bitmap_print(&w->bm);
920  rc = M0_ERR(-ENOENT);
921  } else
922  M0_POST(int_array_is_set(keys, nr_keys));
923 
924  return M0_RC(rc);
925 }
926 
927 static bool m0_bitmap_is_fulfilled(struct m0_bitmap *bm, bool fill)
928 {
929  return (m0_forall(i, bm->b_nr, fill == m0_bitmap_get(bm, i)));
930 }
931 
932 static int cr_idx_w_find_rnd_k(struct cr_idx_w *w,
933  enum cr_opcode opcode,
934  int *keys, size_t nr_keys)
935 {
936  struct cr_idx_w_ops *op = &cr_idx_w_ops[opcode];
937  int rc = 0;
938  int r;
939  int key_iter = 0;
940  size_t attempts = 0;
941 
942  M0_PRE(w->nr_keys > 0);
943 
944  while (key_iter != nr_keys) {
945  if (attempts > w->bm.b_nr &&
946  m0_bitmap_is_fulfilled(&w->bm, !op->empty_bit)) {
947  crlog(CLL_ERROR, "Map hasn't available bits.");
948  rc = M0_ERR(-ENOENT);
949  break;
950  }
951 
953  M0_ASSERT(r < w->nr_keys);
954  attempts++;
955 
956  /* skip key if it already present in 'keys' or
957  * or if it can not be used for op */
958  if (!m0_exists(i, key_iter, keys[i] == r) &&
959  m0_bitmap_get(&w->bm, r) == op->empty_bit) {
960  attempts = 0;
961  keys[key_iter] = r;
962  key_iter++;
963  }
964  }
965 
966  if (rc != 0) {
967  crlog(CLL_ERROR, "Unable to find key for opcode '%s'",
969  m0_bitmap_print(&w->bm);
970  } else
971  M0_POST(int_array_is_set(keys, nr_keys));
972 
973  return M0_RC(rc);
974 }
975 
976 static int cr_execute_query(struct m0_fid *id,
977  struct kv_pair *p,
978  enum cr_opcode opcode)
979 {
980  struct m0_op *ops[1] = { [0] = NULL };
981  int32_t *rcs;
982  int rc;
983  struct m0_idx idx = {};
984  int kv_nr;
985  int flags = 0;
986  int kv_index;
987  struct cr_idx_w_ops *op = &cr_idx_w_ops[opcode];
988 
989  kv_nr = p->k->ov_vec.v_nr;
990 
991  if (NULL == M0_ALLOC_ARR(rcs, kv_nr))
992  return M0_ERR(-ENOMEM);
993 
994  m0_idx_init(&idx, crate_uber_realm(), (struct m0_uint128 *) id);
995 
996  rc = m0_idx_op(&idx, op->m0_op, p->k, p->v, rcs, flags, &ops[0]);
997  if (rc != 0) {
998  crlog(CLL_ERROR, "Unable to init Client idx op: %s",
999  strerror(-rc));
1000  goto end;
1001  }
1002 
1003  m0_op_launch(ops, 1);
1004 
1006  M0_TIME_NEVER);
1007  if (rc != 0) {
1008  crlog(CLL_ERROR, "Client op failed: %s", strerror(-rc));
1009  goto end;
1010  }
1011 
1012  if (m0_exists(i, kv_nr, (kv_index = i, (rc = rcs[i]) != 0))) {
1013  /* XXX: client destroys keys, if NEXT op failed */
1014  if (op->m0_op == M0_IC_NEXT) {
1015  crlog(CLL_ERROR,
1016  "Failed to perform client operation %s: k="
1017  FID_F " %s (%d), index=%d",
1018  crate_op_to_string(op->op),
1019  FID_P((struct m0_fid *) p->k->ov_buf[0]),
1020  strerror(-rc), rc, kv_index);
1021  } else {
1022  crlog(CLL_ERROR,
1023  "Failed to perform client operation %s: k="
1024  FID_F " %s (%d)",
1025  crate_op_to_string(op->op),
1026  FID_P((struct m0_fid *) p->k->ov_buf[kv_index]),
1027  strerror(-rc), rc);
1028  }
1029  } else
1030  rc = ops[0]->op_sm.sm_rc;
1031 
1032 end:
1033  if (ops[0] != NULL) {
1034  m0_op_fini(ops[0]);
1035  m0_op_free(ops[0]);
1036  }
1037  m0_idx_fini(&idx);
1038  m0_free0(&rcs);
1039 
1040  return M0_RC(rc);
1041 }
1042 
1043 static bool cr_idx_w_timeout_expired(struct cr_idx_w *w)
1044 {
1045  return ergo(w->exec_time > 0,
1047  w->exec_time);
1048 }
1049 
1050 static int cr_idx_w_execute(struct cr_idx_w *w,
1051  enum cr_opcode opcode,
1052  bool random,
1053  size_t nr_keys,
1054  bool *missing_key)
1055 {
1056  struct cr_idx_w_ops *op = &cr_idx_w_ops[opcode];
1057  cr_idx_w_find_k_t find_fn;
1058  int rc;
1059  int *ikeys = NULL;
1060  struct m0_fid *keys = NULL;
1061  struct kv_pair kv = {0};
1062  int i;
1063  /* key will contain <const value:random or sequential generated value> */
1064  int kpart_one_size = w->wit->key_size - w->wit->min_key_size;
1065  char kpart_one[kpart_one_size];
1066  m0_time_t op_start_time;
1067  m0_time_t op_time;
1068 
1069  M0_PRE(nr_keys > 0);
1070 
1072 
1073  if (cr_idx_w_timeout_expired(w)) {
1074  crlog(CLL_ERROR, "Timeout expired.");
1075  return M0_ERR(-ETIME);
1076  }
1077 
1078  if (NULL == M0_ALLOC_ARR(ikeys, nr_keys)) {
1079  rc = M0_ERR(-ENOMEM);
1080  goto do_exit;
1081  }
1082 
1083  if (NULL == M0_ALLOC_ARR(keys, nr_keys)) {
1084  rc = M0_ERR(-ENOMEM);
1085  goto do_exit_ik;
1086  }
1087 
1088  find_fn = (random ? cr_idx_w_find_rnd_k : cr_idx_w_find_seq_k);
1089  rc = find_fn(w, opcode, ikeys, nr_keys);
1090 
1091  if (rc != 0) {
1092  if (missing_key)
1093  *missing_key = true;
1094  rc = M0_ERR(rc);
1095  goto do_exit_keys;
1096  }
1097 
1098  for (i = 0; i < nr_keys; i++) {
1099  keys[i].f_key = ikeys[i];
1100  keys[i].f_container = w->key_prefix.f_container;
1101  }
1102 
1103  /* populating key prefix */
1104  memset(kpart_one, 'A', kpart_one_size);
1105 
1106  rc = op->fill_kv(w, keys, &kv, nr_keys, kpart_one_size, kpart_one);
1107  if (rc != 0) {
1108  rc = M0_ERR(rc);
1109  goto do_exit_kv;
1110  }
1111  /* accumulate time required by each op on opcode basis. */
1112  op_start_time = m0_time_now();
1113  rc = cr_execute_query(&w->wit->index_fid, &kv, opcode);
1114  op_time = m0_time_sub(m0_time_now(), op_start_time);
1117  op_time);
1118  if (rc != 0) {
1119  rc = M0_ERR(rc);
1120  goto do_exit_kv;
1121  }
1122 
1123  if (op->readonly) {
1124  if (!random)
1125  cr_idx_w_seq_keys_save(w, ikeys, nr_keys, opcode);
1126  } else {
1127  for (i = 0; i < nr_keys; i++)
1128  m0_bitmap_set(&w->bm, ikeys[i], op->is_set_op);
1129  }
1130 
1131  crlog(CLL_TRACE, "Executed op: %s", crate_op_to_string(opcode));
1132 
1133 do_exit_kv:
1134  kv_pair_fini(&kv);
1135 do_exit_keys:
1136  m0_free0(&keys);
1137 do_exit_ik:
1138  m0_free0(&ikeys);
1139 do_exit:
1140  return M0_RC(rc);
1141 }
1142 
1143 
1145 {
1146  int result = 0;
1147  int i;
1148 
1149  if (w->nr_ops_total > 0)
1150  for (i = 0; i < ARRAY_SIZE(w->nr_ops); i++)
1151  result += w->nr_ops[i].nr;
1152 
1153  return result;
1154 }
1155 
1156 static enum cr_opcode cr_idx_w_select_op_rr(struct cr_idx_w *w, int depth)
1157 {
1158  enum cr_opcode rc = CRATE_OP_START;
1159  int max_ops_cnt = 0;
1160  int i;
1161 
1163 
1164  /* find opcode, which has max value of ops */
1165  for (i = 0; i < ARRAY_SIZE(w->nr_ops); i++) {
1166  if (w->nr_ops[i].pending)
1167  continue;
1168  if (w->nr_ops[i].nr > max_ops_cnt) {
1169  max_ops_cnt = w->nr_ops[i].nr;
1170  rc = (enum cr_opcode) i;
1171  }
1172  }
1173 
1174  if (max_ops_cnt == 0) {
1175  rc = CRATE_OP_INVALID;
1176  /* try to use pending operations */
1177  for (i = 0; i < ARRAY_SIZE(w->nr_ops); i++) {
1178  if (w->nr_ops[i].pending) {
1179  w->nr_ops[i].pending = false;
1181  if (cr_opcode_valid(rc))
1182  break;
1183  }
1184  }
1185  }
1186 
1187  return rc;
1188 }
1189 
1190 static enum cr_opcode cr_idx_w_select_op(struct cr_idx_w *w)
1191 {
1192  enum cr_opcode rc = CRATE_OP_START;
1193 
1194  if (cr_idx_w_get_nr_remained_ops(w) == 0) {
1195  crlog(CLL_INFO, "End of operations.");
1196  return CRATE_OP_INVALID;
1197  }
1198 
1199  switch (w->op_selector) {
1200  case CR_OP_SEL_RR:
1201  rc = cr_idx_w_select_op_rr(w, 0);
1202  break;
1203 
1204  case CR_OP_SEL_RND:
1205  do {
1207  } while (w->nr_ops[rc].nr == 0);
1208  break;
1209  }
1210 
1211  return rc;
1212 }
1213 
1215 {
1216  int result = 0;
1217  int i;
1218 
1219  for (i = 0; i < ARRAY_SIZE(w->nr_ops); i++) {
1220  if (w->nr_ops[i].nr)
1221  result++;
1222  }
1223 
1224  return result;
1225 
1226 }
1227 
1228 static void cr_idx_w_print_ops_table(struct cr_idx_w *w)
1229 {
1230  M0_PRE(ARRAY_SIZE(w->nr_ops) == 4);
1231  crlog(CLL_TRACE, "ops remaining: [%d, %d, %d, %d]",
1232  w->nr_ops[0].nr,
1233  w->nr_ops[1].nr,
1234  w->nr_ops[2].nr,
1235  w->nr_ops[3].nr);
1236 }
1237 
1238 static bool cr_idx_w_rebalance_ops(struct cr_idx_w *w, enum cr_opcode op)
1239 {
1240  bool result = false;
1241  int i;
1242 
1243  if (cr_idx_w_get_nr_remained_ops(w) > 0
1245  for (i = 0; i < ARRAY_SIZE(w->nr_ops); i++)
1246  w->nr_ops[i].pending = cr_rand_bool();
1247  result = true;
1248  } else
1249  crlog(CLL_WARN, "Reached end of keys.");
1250 
1251  return result;
1252 }
1253 
1254 static int cr_idx_w_common(struct cr_idx_w *w)
1255 {
1256  int rc;
1257  enum cr_opcode op;
1258  bool is_random;
1259  bool missing_key = false;
1260  int nr_kv_per_op;
1261 
1263 
1264  while (true) {
1265  op = cr_idx_w_select_op(w);
1267 
1268  if (!cr_opcode_valid(op)) {
1269  rc = M0_RC(0);
1270  break;
1271  }
1272 
1273  m0_bitmap_print(&w->bm);
1274 
1275  is_random = !w->wit->keys_ordered;
1276 
1277  nr_kv_per_op = cr_idx_w_get_nr_keys_per_op(w, op);
1278  crlog(CLL_DEBUG, "nr_kv_per_op: %d", nr_kv_per_op);
1279 
1280  rc = cr_idx_w_execute(w, op, is_random, nr_kv_per_op,
1281  &missing_key);
1282  if (rc != 0) {
1283  /* try to select another op type */
1284  if (missing_key && (cr_idx_w_rebalance_ops(w, op)))
1285  continue;
1286  break;
1287  }
1288  w->nr_ops[op].nr--;
1289  }
1290 
1291  m0_bitmap_print(&w->bm);
1293 
1294  return M0_RC(rc);
1295 }
1296 
1297 static int cr_idx_w_warmup(struct cr_idx_w *w)
1298 {
1299  int rc = 0;
1300  int i;
1301 
1302  for (i = 0; i < w->warmup_put_cnt && rc == 0; i++) {
1303  m0_bitmap_print(&w->bm);
1304  rc = cr_idx_w_execute(w, CRATE_OP_PUT, false,
1305  w->nr_kv_per_op, NULL);
1306  }
1307 
1308  for (i = 0; i < w->warmup_del_cnt && rc == 0; i++) {
1309  m0_bitmap_print(&w->bm);
1310  rc = cr_idx_w_execute(w, CRATE_OP_DEL, true,
1311  w->nr_kv_per_op, NULL);
1312  }
1313 
1314  return M0_RC(rc);
1315 }
1316 
1317 
1318 static M0_UNUSED int delete_index(struct m0_uint128 id)
1319 {
1320  int rc;
1321  struct m0_op *ops[1] = { NULL };
1322  struct m0_idx idx = {};
1323 
1325  M0_PRE(crate_uber_realm()->re_instance != NULL);
1326 
1327  /* Set an index deletion operation. */
1328  m0_idx_init(&idx, crate_uber_realm(), &id);
1329 
1330  rc = m0_entity_delete(&idx.in_entity, &ops[0]);
1331  if (rc == 0) {
1332  /* Launch and wait for op to complete */
1333  m0_op_launch(ops, 1);
1335  M0_OS_STABLE),
1336  M0_TIME_NEVER);
1337 
1338  if (rc != 0)
1339  crlog(CLL_ERROR, "Unable to perform index del");
1340  else if (ops[0]->op_sm.sm_rc != 0) {
1341  if (ops[0]->op_sm.sm_rc == -ENOENT)
1342  crlog(CLL_ERROR, "Index not found");
1343  else
1344  rc = ops[0]->op_sm.sm_rc;
1345  }
1346  } else
1347  crlog(CLL_ERROR, "Unable to set delete operation.");
1348 
1349  /* fini and release */
1350  if (ops[0] != NULL) {
1351  m0_op_fini(ops[0]);
1352  m0_op_free(ops[0]);
1353  }
1354  m0_idx_fini(&idx);
1355 
1356  return M0_RC(rc);
1357 }
1358 
1359 static int create_index(struct m0_uint128 id)
1360 {
1361  int rc;
1362  struct m0_op *ops[1] = { NULL };
1363  struct m0_idx idx = {};
1364 
1366  M0_PRE(crate_uber_realm()->re_instance != NULL);
1367 
1368  /* Set an index creation operation. */
1369  m0_idx_init(&idx, crate_uber_realm(), &id);
1370 
1371  rc = m0_entity_create(NULL, &idx.in_entity, &ops[0]);
1372  if (rc == 0) {
1373  /* Launch and wait for op to complete */
1374  m0_op_launch(ops, 1);
1376  M0_OS_STABLE),
1377  M0_TIME_NEVER);
1378 
1379  if (rc != 0)
1380  crlog(CLL_ERROR, "Unable to perform index create");
1381  else if (ops[0]->op_sm.sm_rc != 0) {
1382  if (ops[0]->op_sm.sm_rc == -EEXIST)
1383  crlog(CLL_WARN, "Index alredy exists.");
1384  else
1385  rc = ops[0]->op_sm.sm_rc;
1386  }
1387  } else
1388  crlog(CLL_ERROR, "Unable to set index create operation.");
1389 
1390  /* fini and release */
1391  if (ops[0] != NULL) {
1392  m0_op_fini(ops[0]);
1393  m0_op_free(ops[0]);
1394  }
1395  m0_idx_fini(&idx);
1396 
1397  return M0_RC(rc);
1398 }
1399 
1400 struct cr_watchdog {
1401  struct m0_mutex lock;
1402  uint64_t counter;
1403  uint64_t sleep_sec;
1404  struct m0_thread thr;
1405 };
1406 
1407 static void cr_watchdog_thread(void *arg)
1408 {
1409  struct cr_watchdog *w = arg;
1410 
1411  crlog(CLL_INFO, "Watchdog started");
1412  while (true) {
1413  m0_mutex_lock(&w->lock);
1414  if (w->counter == 0) {
1415  crlog(CLL_ERROR, "watchdog: blocked task detected."
1416  " Exiting.");
1417  m0_nanosleep(m0_time(0, 100000), NULL);
1418  exit(EXIT_FAILURE);
1419  }
1420  w->counter = 0;
1421  m0_mutex_unlock(&w->lock);
1422  sleep(w->sleep_sec);
1423  }
1424 }
1425 
1426 static void cr_watchdog_touch()
1427 {
1428  struct cr_watchdog *w = cr_watchdog;
1429 
1430  /* don't touch wg if it isn't exists */
1431  if (w == NULL)
1432  return;
1433 
1434  m0_mutex_lock(&w->lock);
1435  w->counter++;
1436  m0_mutex_unlock(&w->lock);
1437 }
1438 
1439 static int cr_watchdog_init(struct m0_workload_index *wt)
1440 {
1441  int rc;
1442 
1444  if (cr_watchdog == NULL)
1445  rc = M0_ERR(-ENOMEM);
1446  else {
1448  cr_watchdog->counter = wt->exec_time;
1450 
1452  cr_watchdog, "watchdog");
1453  }
1454 
1455  if (rc != 0) {
1457  crlog(CLL_ERROR, "Unable to init watchdog (%s)", strerror(-rc));
1458  }
1459 
1460  return M0_RC(rc);
1461 }
1462 
1463 static void cr_watchdog_fini()
1464 {
1465  if (cr_watchdog == NULL)
1466  return;
1467  (void) m0_thread_signal(&cr_watchdog->thr, SIGTERM);
1468  (void) m0_thread_join(&cr_watchdog->thr);
1472 }
1473 
1474 static int index_operation(struct workload *wt,
1475  struct m0_workload_task *task)
1476 {
1477  struct cr_idx_w w = {};
1478  struct cr_time_measure_ctx t;
1479  struct m0_workload_index *wit = wt->u.cw_index;
1480  struct m0_uint128 index_fid;
1481  int rc;
1482 
1484  M0_PRE(crate_uber_realm()->re_instance != NULL);
1485 
1486  M0_CASSERT(sizeof(struct m0_uint128) == sizeof(struct m0_fid));
1487  memcpy(&index_fid, &wit->index_fid, sizeof(index_fid));
1488 
1490 
1491  if (wit->exec_time > 0) {
1492  rc = cr_watchdog_init(wit);
1493  if (rc != 0)
1494  goto do_exit;
1495  }
1496 
1497  rc = cr_idx_w_init(&w, wit);
1498  if (rc != 0)
1499  goto do_exit_wg;
1500 
1501  rc = create_index(index_fid);
1502  if (rc != 0)
1503  goto do_exit_idx_w;
1504 
1505  rc = cr_idx_w_warmup(&w);
1506  if (rc != 0)
1507  goto do_del_idx;
1508 
1509  rc = cr_idx_w_common(&w);
1510  if (rc != 0)
1511  goto do_del_idx;
1512 
1513 do_del_idx:
1514  /* XXX: index deletion disabled. */
1515 #if 0
1516  rc = delete_index(index_fid);
1517  M0_ASSERT(rc != 0);
1518 #endif
1519 do_exit_idx_w:
1520  cr_idx_w_fini(&w);
1521 do_exit_wg:
1522  cr_watchdog_fini();
1524  cr_time_capture_results(&t, &w);
1526 do_exit:
1527  return M0_RC(rc);
1528 }
1529 
1530 void run_index(struct workload *w, struct workload_task *tasks)
1531 {
1532  workload_start(w, tasks);
1533  workload_join(w, tasks);
1534 }
1535 
1536 void m0_op_run_index(struct workload *w, struct workload_task *task,
1537  const struct workload_op *op)
1538 {
1539  struct m0_workload_task *m0_task;
1540  int rc;
1541  bool is_m0_thread;
1542 
1544  M0_PRE(crate_uber_realm()->re_instance != NULL);
1545 
1546  if (NULL == M0_ALLOC_PTR(m0_task)) {
1547  crlog(CLL_ERROR, "Out of memory.");
1548  exit(EXIT_FAILURE);
1549  }
1550 
1551  is_m0_thread = m0_thread_tls() != NULL;
1552 
1553  if (!is_m0_thread) {
1554  rc = adopt_motr_thread(m0_task);
1555  if (rc != 0) {
1556  crlog(CLL_ERROR, "Unable to adopt thread (%s)",
1557  strerror(-rc));
1558  goto exit_free;
1559  }
1560  }
1561 
1562  rc = index_operation(w, m0_task);
1563  if (rc != 0) {
1564  crlog(CLL_ERROR,
1565  "Failed to perform index operation (%s)",
1566  strerror(-rc));
1567  }
1568 
1569  if (!is_m0_thread) {
1570  release_motr_thread(m0_task);
1571  }
1572 
1573 exit_free:
1574  m0_free(m0_task);
1575 }
1576 
1577 /*
1578  * Local variables:
1579  * c-indentation-style: "K&R"
1580  * c-basic-offset: 8
1581  * tab-width: 8
1582  * fill-column: 80
1583  * scroll-step: 1
1584  * End:
1585  */
1586 /*
1587  * vim: tabstop=8 shiftwidth=8 noexpandtab textwidth=80 nowrap
1588  */
static void cr_idx_w_fini(struct cr_idx_w *ciw)
Definition: crate_index.c:552
struct cr_watchdog * cr_watchdog
Definition: crate_index.c:311
uint64_t sleep_sec
Definition: crate_index.c:1403
static struct m0_addb2_philter p
Definition: consumer.c:40
static size_t nr
Definition: dump.c:1505
void * cw_index
Definition: workload.h:99
static void cr_idx_w_print_ops_table(struct cr_idx_w *w)
Definition: crate_index.c:1228
static size_t cr_rand_pos_range_l(size_t end)
Definition: crate_index.c:187
void m0_op_run_index(struct workload *w, struct workload_task *task, const struct workload_op *op)
Definition: crate_index.c:1536
static int cr_idx_w_warmup(struct cr_idx_w *w)
Definition: crate_index.c:1297
#define M0_PRE(cond)
#define M0_ALLOC_ARR(arr, nr)
Definition: memory.h:84
M0_INTERNAL int m0_bitmap_init(struct m0_bitmap *map, size_t nr)
Definition: bitmap.c:86
Definition: client.h:835
M0_INTERNAL void m0_mutex_unlock(struct m0_mutex *mutex)
Definition: mutex.c:66
static bool cr_idx_w_rebalance_ops(struct cr_idx_w *w, enum cr_opcode op)
Definition: crate_index.c:1238
static bool m0_bitmap_is_fulfilled(struct m0_bitmap *bm, bool fill)
Definition: crate_index.c:927
int const char const void size_t int flags
Definition: dir.c:328
static void cr_get_random_string(char *dest, size_t length)
Definition: crate_index.c:208
#define NULL
Definition: misc.h:38
M0_INTERNAL void m0_bitmap_fini(struct m0_bitmap *map)
Definition: bitmap.c:97
m0_idx_opcode
Definition: client.h:550
#define LOG_PREFIX
Definition: crate_index.c:144
struct m0_bufvec * v
Definition: crate_index.c:604
struct m0_workload_index * wit
Definition: crate_index.c:336
#define ergo(a, b)
Definition: misc.h:293
double ciwr_time_per_op_ns
Definition: crate_index.c:265
int m0_thread_join(struct m0_thread *q)
Definition: kthread.c:169
const m0_time_t M0_TIME_NEVER
Definition: time.c:108
void m0_op_fini(struct m0_op *op)
Definition: client.c:847
struct m0_mutex lock
Definition: crate_index.c:1401
uint64_t m0_time_t
Definition: time.h:37
double cior_time_per_op_ns
Definition: crate_index.c:256
void run_index(struct workload *w, struct workload_task *tasks)
Definition: crate_index.c:1530
#define M0_CASSERT(cond)
static int cr_idx_w_seq_keys_get_last(struct cr_idx_w *w, int nr, enum cr_opcode op)
Definition: crate_index.c:573
bool ordered_keys
Definition: crate_index.c:340
struct m0_vec ov_vec
Definition: vec.h:147
uint64_t m0_time_nanoseconds(const m0_time_t time)
Definition: time.c:89
struct m0_fid index_fid
Definition: crate_client.h:113
struct m0_fid key_prefix
Definition: crate_client.h:101
static int cr_idx_w_init(struct cr_idx_w *ciw, struct m0_workload_index *wit)
Definition: crate_index.c:453
static void cr_time_capture_results(struct cr_time_measure_ctx *t, struct cr_idx_w *ciw)
Definition: crate_index.c:387
#define m0_exists(var, nr,...)
Definition: misc.h:134
#define M0_BITS(...)
Definition: misc.h:236
void m0_idx_fini(struct m0_idx *idx)
Definition: idx.c:643
M0_INTERNAL int m0_pageshift_get(void)
Definition: memory.c:238
int nr_keys
Definition: crate_index.c:344
M0_INTERNAL void m0_mutex_lock(struct m0_mutex *mutex)
Definition: mutex.c:49
#define TIME_P(t)
Definition: time.h:45
int opcode_prcnt[CRATE_OP_TYPES]
Definition: crate_client.h:83
enum cr_opcode op
Definition: crate_index.c:805
const char * cior_op_label
Definition: crate_index.c:246
m0_time_t m0_time(uint64_t secs, long ns)
Definition: time.c:41
static void cr_watchdog_touch()
Definition: crate_index.c:1426
void ** ov_buf
Definition: vec.h:149
static int cr_rand_pos_range(int end)
Definition: crate_index.c:181
int warmup_put_cnt
Definition: crate_index.c:341
void workload_start(struct workload *w, struct workload_task *task)
Definition: crate.c:340
int adopt_motr_thread(struct m0_workload_task *task)
static int fill_kv_next(struct cr_idx_w *w, struct m0_fid *k, struct kv_pair *p, size_t nr, int kpart_one_size, char *kpart_one)
Definition: crate_index.c:707
int32_t m0_op_wait(struct m0_op *op, uint64_t bits, m0_time_t to)
Definition: client.c:739
int m0_idx_op(struct m0_idx *idx, enum m0_idx_opcode opcode, struct m0_bufvec *keys, struct m0_bufvec *vals, int32_t *rcs, uint32_t flags, struct m0_op **op)
Definition: idx.c:554
return M0_RC(rc)
op
Definition: libdemo.c:64
static enum cr_opcode cr_idx_w_select_op(struct cr_idx_w *w)
Definition: crate_index.c:1190
static int cr_idx_w_find_seq_k(struct cr_idx_w *w, enum cr_opcode opcode, int *keys, size_t nr_keys)
Definition: crate_index.c:890
M0_INTERNAL struct m0_thread_tls * m0_thread_tls(void)
Definition: kthread.c:67
uint64_t counter
Definition: crate_index.c:1402
M0_INTERNAL void m0_bufvec_free(struct m0_bufvec *bufvec)
Definition: vec.c:395
static void cr_idx_w_seq_keys_fini(struct cr_idx_w *w)
Definition: crate_index.c:568
struct m0_entity in_entity
Definition: client.h:836
int(* cr_idx_w_find_k_t)(struct cr_idx_w *w, enum cr_opcode opcode, int *keys, size_t nr_keys)
Definition: crate_index.c:801
cr_opcode
Definition: crate_client.h:65
static int cr_idx_w_get_nr_remained_op_types(struct cr_idx_w *w)
Definition: crate_index.c:1214
union workload::@328 u
int opcode
Definition: crate.c:301
#define TIME_F
Definition: time.h:44
int i
Definition: dir.c:1033
static void cr_watchdog_thread(void *arg)
Definition: crate_index.c:1407
static unsigned depth
Definition: base.c:377
static void cr_idx_w_seq_keys_save(struct cr_idx_w *w, int *keys, size_t keys_nr, enum cr_opcode op)
Definition: crate_index.c:588
static void cr_time_measure_begin(struct cr_time_measure_ctx *t)
Definition: crate_index.c:276
Definition: client.h:641
return M0_ERR(-EOPNOTSUPP)
int(* fill_kv)(struct cr_idx_w *w, struct m0_fid *k, struct kv_pair *p, size_t nr, int kpart_one_size, char *kpart_one)
Definition: crate_index.c:809
static double cr_time_measure_elapsed_now(struct cr_time_measure_ctx *t)
Definition: crate_index.c:282
cr_op_selector
Definition: crate_index.c:172
static int create_index(struct m0_uint128 id)
Definition: crate_index.c:1359
void workload_join(struct workload *w, struct workload_task *task)
Definition: crate.c:363
static int cr_idx_w_get_nr_keys_per_op(struct cr_idx_w *w, enum cr_opcode op)
Definition: crate_index.c:669
static size_t cr_idx_w_get_value_size(struct cr_idx_w *w)
Definition: crate_index.c:654
#define m0_free0(pptr)
Definition: memory.h:77
M0_INTERNAL int m0_thread_signal(struct m0_thread *q, int sig)
Definition: kthread.c:192
static int fill_kv_del(struct cr_idx_w *w, struct m0_fid *k, struct kv_pair *p, size_t nr, int kpart_one_size, char *kpart_one)
Definition: crate_index.c:682
#define M0_ASSERT(cond)
m0_time_t m0_time_now(void)
Definition: time.c:134
enum cr_op_selector op_selector
Definition: crate_index.c:349
static void cr_time_measure_end(struct cr_time_measure_ctx *t)
Definition: crate_index.c:289
static int fill_kv_put(struct cr_idx_w *w, struct m0_fid *k, struct kv_pair *p, size_t nr, int kpart_one_size, char *kpart_one)
Definition: crate_index.c:762
static struct m0_thread t[8]
Definition: service_ut.c:1230
struct cr_idx_ops_result ciwr_ops_result[CRATE_OP_NR]
Definition: crate_index.c:267
struct cr_time_measure_ctx exec_time_ctx
Definition: crate_index.c:347
int m0_thread_init(struct m0_thread *q, int(*init)(void *), void(*func)(void *), void *arg, const char *namefmt,...)
Definition: thread.c:41
int rand(void)
static M0_UNUSED bool int_array_is_set(int *vals, size_t nr)
Definition: crate_index.c:875
void m0_thread_fini(struct m0_thread *q)
Definition: thread.c:92
struct m0_fid key_prefix
Definition: crate_index.c:343
void m0_op_launch(struct m0_op **op, uint32_t nr)
Definition: client.c:725
void * m0_alloc(size_t size)
Definition: memory.c:126
static void cr_watchdog_fini()
Definition: crate_index.c:1463
M0_INTERNAL void m0_mutex_init(struct m0_mutex *mutex)
Definition: mutex.c:35
uint64_t f_container
Definition: fid.h:39
#define M0_POST(cond)
static bool cr_idx_w_seq_keys_enabled(struct cr_idx_w *w)
Definition: crate_index.c:583
static int index_operation(struct workload *wt, struct m0_workload_task *task)
Definition: crate_index.c:1474
#define UINT64_MAX
Definition: types.h:44
M0_INTERNAL void m0_bitmap_set(struct m0_bitmap *map, size_t idx, bool val)
Definition: bitmap.c:139
uint32_t v_nr
Definition: vec.h:51
m0_bcount_t * v_count
Definition: vec.h:53
m0_time_t m0_time_add(const m0_time_t t1, const m0_time_t t2)
Definition: time.c:47
#define FID_P(f)
Definition: fid.h:77
uint64_t m0_time_seconds(const m0_time_t time)
Definition: time.c:83
static void kv_pair_fini(struct kv_pair *p)
Definition: crate_index.c:623
static bool cr_rand_bool()
Definition: crate_index.c:202
static void cr_idx_w_seq_keys_init(struct cr_idx_w *w, int keys_nr)
Definition: crate_index.c:558
static int cr_idx_w_common(struct cr_idx_w *w)
Definition: crate_index.c:1254
#define m0_forall(var, nr,...)
Definition: misc.h:112
struct m0_bufvec * k
Definition: crate_index.c:603
int m0_entity_create(struct m0_fid *pool, struct m0_entity *entity, struct m0_op **op)
Definition: obj.c:801
int warmup_del_cnt
Definition: crate_index.c:342
struct cr_idx_w_results ciw_results
Definition: crate_index.c:350
static void m0_bitmap_print(struct m0_bitmap *bm)
Definition: crate_index.c:296
Definition: fid.h:38
double ciwr_total_time_s
Definition: crate_index.c:263
uint64_t f_key
Definition: fid.h:40
size_t exec_time
Definition: crate_index.c:348
static int cr_watchdog_init(struct m0_workload_index *wt)
Definition: crate_index.c:1439
m0_time_t cior_ops_total_time_m0
Definition: crate_index.c:252
static enum cr_opcode cr_idx_w_select_op_rr(struct cr_idx_w *w, int depth)
Definition: crate_index.c:1156
#define M0_ALLOC_PTR(ptr)
Definition: memory.h:86
struct cr_ops_counter nr_ops[CRATE_OP_NR]
Definition: crate_index.c:338
static int r[NR]
Definition: thread.c:46
struct m0_thread thr
Definition: crate_index.c:1404
char * cr_idx_op_labels[CRATE_OP_NR]
Definition: crate_index.c:238
m0_time_t m0_time_sub(const m0_time_t t1, const m0_time_t t2)
Definition: time.c:65
enum m0_idx_opcode m0_op
Definition: crate_index.c:820
M0_INTERNAL bool m0_bitmap_get(const struct m0_bitmap *map, size_t idx)
Definition: bitmap.c:105
M0_INTERNAL void m0_mutex_fini(struct m0_mutex *mutex)
Definition: mutex.c:42
static M0_UNUSED int delete_index(struct m0_uint128 id)
Definition: crate_index.c:1318
struct m0_bitmap bm
Definition: crate_index.c:337
int nr_ops_total
Definition: crate_index.c:339
int nr_kv_per_op
Definition: crate_index.c:345
static int cr_idx_w_execute(struct cr_idx_w *w, enum cr_opcode opcode, bool random, size_t nr_keys, bool *missing_key)
Definition: crate_index.c:1050
static int cr_execute_query(struct m0_fid *id, struct kv_pair *p, enum cr_opcode opcode)
Definition: crate_index.c:976
static bool cr_idx_w_timeout_expired(struct cr_idx_w *w)
Definition: crate_index.c:1043
Definition: tasks.py:1
double cior_ops_total_time_s
Definition: crate_index.c:254
static int cr_idx_w_find_rnd_k(struct cr_idx_w *w, enum cr_opcode opcode, int *keys, size_t nr_keys)
Definition: crate_index.c:932
int m0_entity_delete(struct m0_entity *entity, struct m0_op **op)
Definition: obj.c:824
static double cr_time_in_seconds(m0_time_t mtime)
Definition: crate_index.c:270
static struct m0_bufvec * idx_bufvec_alloc(int nr)
Definition: crate_index.c:629
const char * name
Definition: crate_index.c:822
struct m0_realm * crate_uber_realm()
void m0_op_free(struct m0_op *op)
Definition: client.c:885
#define crlog(level,...)
Definition: logger.h:55
size_t b_nr
Definition: bitmap.h:44
M0_INTERNAL void * m0_alloc_aligned(size_t size, unsigned shift)
Definition: memory.c:168
struct m0_fom_ops ops
Definition: io_foms.c:623
static void cr_time_measure_report(struct cr_time_measure_ctx *t, struct cr_idx_w w)
Definition: crate_index.c:410
static int cr_idx_w_get_nr_remained_ops(struct cr_idx_w *w)
Definition: crate_index.c:1144
void m0_free(void *data)
Definition: memory.c:146
Definition: mutex.h:47
static void idx_bufvec_free(struct m0_bufvec *bv)
Definition: crate_index.c:607
int32_t rc
Definition: trigger_fop.h:47
#define ARRAY_SIZE(a)
Definition: misc.h:45
void m0_idx_init(struct m0_idx *idx, struct m0_realm *parent, const struct m0_uint128 *id)
Definition: idx.c:626
void release_motr_thread(struct m0_workload_task *task)
static int fill_kv_get(struct cr_idx_w *w, struct m0_fid *k, struct kv_pair *p, size_t nr, int kpart_one_size, char *kpart_one)
Definition: crate_index.c:733
#define FID_F
Definition: fid.h:75
void cr_log_ex(enum cr_log_level lev, const char *pre, const char *post, const char *fmt,...)
Definition: logger.c:69
Definition: vec.h:145
m0_time_t ciwr_total_time_m0
Definition: crate_index.c:261
static bool cr_opcode_valid(enum cr_opcode opcode)
Definition: crate_index.c:869
bool prev_key_used
Definition: crate_index.c:346
static const char * crate_op_to_string(enum cr_opcode op)
Definition: crate_index.c:864
#define M0_UNUSED
Definition: misc.h:380
int m0_nanosleep(const m0_time_t req, m0_time_t *rem)
Definition: ktime.c:73