Motr  M0
log.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2013-2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 
23 #include "be/io.h"
24 #include "be/log.h"
25 #include "lib/chan.h"
26 #include "lib/errno.h"
27 #include "lib/memory.h"
28 #include "stob/domain.h"
29 #include "stob/stob.h"
30 #include "ut/stob.h"
31 #include "ut/threads.h"
32 #include "ut/ut.h"
33 
34 enum {
35  BE_UT_LOG_SIZE = 1024 * 1024,
36  /* We try to reserve more than actual record's size */
42 };
43 
44 const char *be_ut_log_sdom_location = "linuxstob:./log";
45 const char *be_ut_log_sdom_init_cfg = "directio=true";
46 const char *be_ut_log_sdom_create_cfg = "";
47 
49 
50 static void be_ut_log_got_space_cb(struct m0_be_log *log)
51 {
52  /* TODO make some locking mechanism, so threads can repeat reservation */
53 }
54 
55 static void be_ut_log_cfg_set(struct m0_be_log_cfg *log_cfg,
56  struct m0_mutex *lock)
57 {
58  *log_cfg = (struct m0_be_log_cfg){
59  .lc_store_cfg = {
60  /* temporary solution BEGIN */
61  .lsc_stob_domain_location = "linuxstob:./log_store-tmp",
62  .lsc_stob_domain_init_cfg = "directio=true",
63  .lsc_stob_domain_key = 0x1000,
64  .lsc_stob_domain_create_cfg = NULL,
65  /* temporary solution END */
66  .lsc_size = BE_UT_LOG_SIZE,
67  .lsc_stob_create_cfg = NULL,
68  .lsc_rbuf_nr = 3,
69  },
70  .lc_got_space_cb = &be_ut_log_got_space_cb,
71  .lc_lock = lock,
72  };
75  &log_cfg->lc_store_cfg.lsc_stob_id);
76 }
77 
78 static void be_ut_log_init(struct m0_be_log *log, struct m0_mutex *lock)
79 {
80  struct m0_be_log_cfg log_cfg;
81  int rc;
82 
88  M0_UT_ASSERT(rc == 0);
89  be_ut_log_cfg_set(&log_cfg, lock);
90  rc = m0_be_log_create(log, &log_cfg);
91  M0_UT_ASSERT(rc == 0);
92 }
93 
94 static void be_ut_log_fini(struct m0_be_log *log)
95 {
96  int rc;
97 
98  m0_be_log_destroy(log);
100  M0_UT_ASSERT(rc == 0);
101 }
102 
103 static int be_ut_log_open(struct m0_be_log *log, struct m0_mutex *lock)
104 {
105  struct m0_be_log_cfg log_cfg;
106 
107  M0_SET0(log);
108  be_ut_log_cfg_set(&log_cfg, lock);
109  return m0_be_log_open(log, &log_cfg);
110 }
111 
115  char *bult_data;
125 };
126 
128 {
129  struct m0_be_log_record *record = &ctx->bult_record;
130  struct m0_be_log *log = ctx->bult_log;
131  struct m0_mutex *lock = ctx->bult_lock;
132  struct m0_be_op *op = &ctx->bult_op;
133  struct m0_bufvec *bvec;
134  int lio_nr = ctx->bult_lio_nr;
135  m0_bcount_t lio_size = ctx->bult_lio_size;
136  m0_bcount_t reserve_size = ctx->bult_reserve_size;
137  int64_t index;
138  int rc;
139  int i;
140 
142  rc = m0_be_log_reserve(log, reserve_size);
143  M0_UT_ASSERT(rc == 0);
145  for (i = 0; i < lio_nr; ++i)
148  index = m0_atomic64_add_return(ctx->bult_atom, 1) - 1;
149  ctx->bult_order[index] = ctx->bult_index;
152 
153  for (i = 0; i < lio_nr; ++i) {
155  M0_UT_ASSERT(bvec->ov_vec.v_nr == 1 &&
156  bvec->ov_vec.v_count[0] == lio_size);
157  memcpy(bvec->ov_buf[0], ctx->bult_data, lio_size);
158  }
159 
161  m0_be_op_wait(op);
162 
164  if (ctx->bult_discard)
165  m0_be_log_record_discard(record->lgr_log, record->lgr_size);
166  else
169 }
170 
172 
173 static void be_ut_log_multi_ut(int thread_nr, bool discard,
174  int lio_nr, m0_bcount_t lio_size)
175 {
176  struct m0_be_log log = {};
177  struct m0_mutex lock = {};
178  struct be_ut_log_thread_ctx *ctxs;
179  struct m0_be_log_record *record;
180  struct m0_be_log_record_iter *iters;
181  struct m0_bufvec *bvec;
182  struct m0_atomic64 atom;
183  m0_bcount_t reserve_size;
184  int *order;
185  int rc;
186  int c;
187  int i;
188  int j;
189 
191  be_ut_log_init(&log, &lock);
192 
193  M0_ALLOC_ARR(ctxs, thread_nr);
194  M0_UT_ASSERT(ctxs != NULL);
195  M0_ALLOC_ARR(order, thread_nr);
196  M0_UT_ASSERT(order != NULL);
197  m0_atomic64_set(&atom, 0);
198  reserve_size = m0_round_up(lio_size, 1ULL << m0_be_log_bshift(&log));
199 
200  /* Preallocation */
201 
202  for (i = 0; i < thread_nr; ++i) {
203  ctxs[i] = (struct be_ut_log_thread_ctx){
204  .bult_index = i,
205  .bult_order = order,
206  .bult_atom = &atom,
207  .bult_log = &log,
208  .bult_lock = &lock,
209  .bult_discard = discard,
210  .bult_lio_nr = lio_nr,
211  .bult_lio_size = lio_size,
212  .bult_reserve_size = reserve_size * lio_nr,
213  };
214  m0_be_op_init(&ctxs[i].bult_op);
215  ctxs[i].bult_data = m0_alloc(lio_size);
216  M0_UT_ASSERT(ctxs[i].bult_data != NULL);
217  c = i % 62;
218  c = c < 10 ? c + '0' :
219  c < 36 ? c - 10 + 'a' :
220  c - 36 + 'A';
221  memset(ctxs[i].bult_data, c, lio_size);
222 
223  record = &ctxs[i].bult_record;
225  for (j = 0; j < lio_nr; ++j) {
226  rc = m0_be_log_record_io_create(record, lio_size);
227  M0_UT_ASSERT(rc == 0);
228  }
230  M0_UT_ASSERT(rc == 0);
231  }
232 
233  /* Start writing */
234 
235  M0_UT_THREADS_START(be_ut_log, thread_nr, ctxs);
236  M0_UT_THREADS_STOP(be_ut_log);
237 
238  /* With delayed discard all records must be discarded in order
239  * of writing to log. Therefore, discard all records here.
240  */
242  for (i = 0; i < thread_nr; ++i) {
243  record = &ctxs[i].bult_record;
244  if (discard) {
246  record->lgr_size);
247  } else {
249  }
250  }
252 
253  /* Check written records */
254 
255  M0_ALLOC_ARR(iters, thread_nr + 1);
256  M0_UT_ASSERT(iters != NULL);
259 
261  for (i = 0; i < lio_nr; ++i) {
262  rc = m0_be_log_record_io_create(record, lio_size);
263  M0_UT_ASSERT(rc == 0);
264  }
266  M0_UT_ASSERT(rc == 0);
267  for (i = 0; i < thread_nr; ++i) {
268  m0_be_log_record_iter_init(&iters[i]);
269  rc = i == 0 ? m0_be_log_record_initial(&log, &iters[i]) :
270  m0_be_log_record_next(&log, &iters[i-1], &iters[i]);
271  M0_UT_ASSERT(rc == 0);
273  m0_be_log_record_assign(record, &iters[i], false);
277 
280  bo_sm.sm_rc);
281  M0_UT_ASSERT(rc == 0);
282  for (j = 0; j < lio_nr; ++j) {
284  M0_UT_ASSERT(bvec->ov_vec.v_nr == 1 &&
285  bvec->ov_vec.v_count[0] == lio_size);
286  M0_UT_ASSERT(memcmp(ctxs[order[i]].bult_data,
287  bvec->ov_buf[0],
288  lio_size) == 0);
289  }
290  }
293  m0_free(record);
294  /* log must contain exactly thread_nr records */
295  rc = m0_be_log_record_next(&log, &iters[thread_nr-1], &iters[thread_nr]);
296  M0_UT_ASSERT(rc != 0);
297 
298  /* Finalisation */
299 
300  for (i = 0; i < thread_nr; ++i) {
301  m0_be_log_record_iter_fini(&iters[i]);
302  m0_free(ctxs[i].bult_data);
303  m0_be_op_fini(&ctxs[i].bult_op);
304  m0_be_log_record_deallocate(&ctxs[i].bult_record);
305  m0_be_log_record_fini(&ctxs[i].bult_record);
306  }
307  m0_free(iters);
308  m0_free(order);
309  m0_free(ctxs);
310 
311  be_ut_log_fini(&log);
313 }
314 
316  struct m0_be_log *log,
317  struct m0_mutex *lock,
318  struct m0_be_op *op)
319 {
320  m0_bcount_t reserve_size;
322  int rc;
323 
325  reserve_size = m0_be_log_reserved_size(log, &size, 1);
327  m0_be_op_init(op);
329  M0_UT_ASSERT(rc == 0);
331  M0_UT_ASSERT(rc == 0);
333  rc = m0_be_log_reserve(log, reserve_size);
334  M0_UT_ASSERT(rc == 0);
338 
340 }
341 
343  struct m0_mutex *lock,
344  struct m0_be_op *op,
345  bool discard)
346 {
347  m0_be_op_wait(op);
349  if (discard)
350  m0_be_log_record_discard(record->lgr_log, record->lgr_size);
351  else
355  /* record can be non-discarded here, in this case finalisation must be
356  * protected with external lock.
357  */
361  m0_be_op_fini(op);
362 }
363 
364 /* Writes a record to the log and returns position/size of the record. */
365 static void be_ut_log_record_write_sync(struct m0_be_log *log,
366  struct m0_mutex *lock,
368  m0_bcount_t *size)
369 {
370  struct m0_be_log_record record = {};
371  struct m0_be_op op = {};
372 
374  /* read-only access to record's fields when it's scheduled */
375  *index = record.lgr_position;
376  *size = record.lgr_size;
378 }
379 
380 static void be_ut_log_curr_pos_check(struct m0_be_log *log,
381  m0_bindex_t pos)
382 {
383  struct m0_be_log_record_iter iter = {};
384  int rc;
385 
387  M0_UT_ASSERT(rc == 0);
388  rc = m0_be_log_record_initial(log, &iter);
389  M0_UT_ASSERT(rc == 0);
390  M0_UT_ASSERT(iter.lri_header.lrh_pos == pos);
392 }
393 
394 static void be_ut_log_header_repair_test(int header_nr,
395  int new_nr,
396  m0_bindex_t lsn_old,
397  m0_bcount_t size_old,
398  m0_bindex_t lsn_new,
399  m0_bcount_t size_new,
400  int valid_index)
401 {
402  struct m0_be_fmt_log_header **hdrs;
403  struct m0_be_fmt_log_header valid = {};
404  bool need_repair;
405  int rc;
406  int i;
407 
408  M0_PRE(new_nr <= header_nr);
409  M0_PRE(valid_index < header_nr);
410 
411  M0_ALLOC_ARR(hdrs, header_nr);
412  M0_UT_ASSERT(hdrs != NULL);
413  for (i = 0; i < header_nr; ++i) {
414  M0_ALLOC_PTR(hdrs[i]);
415  M0_UT_ASSERT(hdrs[i] != NULL);
416  rc = m0_be_fmt_log_header_init(hdrs[i], NULL);
417  M0_UT_ASSERT(rc == 0);
418  if (i < new_nr)
419  m0_be_log_header__set(hdrs[i], 0, lsn_new, size_new);
420  else
421  m0_be_log_header__set(hdrs[i], 0, lsn_old, size_old);
422  }
423  rc = m0_be_fmt_log_header_init(&valid, NULL);
424  M0_UT_ASSERT(rc == 0);
425 
426  need_repair = m0_be_log_header__repair(hdrs, header_nr, &valid);
427  M0_UT_ASSERT(equi(new_nr % header_nr == 0, !need_repair));
428  M0_UT_ASSERT(m0_be_log_header__is_eq(&valid, hdrs[valid_index]));
429 
430  for (i = 0; i < header_nr; ++i) {
431  m0_be_fmt_log_header_fini(hdrs[i]);
432  m0_free(hdrs[i]);
433  }
434  m0_free(hdrs);
435  m0_be_fmt_log_header_fini(&valid);
436 }
437 
439 {
440  /* Write records in parallel. Records consume size less than log
441  * capacity.
442  */
444 
445  /* Write records in parallel, but don't discard them. */
447 }
448 
450 static void be_ut_log_recover_and_discard(struct m0_be_log *log,
451  struct m0_mutex *lock)
452 {
453  struct m0_be_log_record_iter iter = {};
454  struct m0_be_log_record record = {};
455  int rc;
456 
459  M0_UT_ASSERT(rc == 0);
461  M0_UT_ASSERT(rc == 0);
463  M0_UT_ASSERT(rc == 0);
465  m0_be_log_recovery_record_get(log, &iter);
466  m0_be_log_record_assign(&record, &iter, true);
472  bo_sm.sm_rc);
473  M0_UT_ASSERT(rc == 0);
475  m0_be_log_record_discard(log, record.lgr_size);
478  }
482 }
483 
484 /*
485  * Check m0_be_log_close() in case when at least 1 record is finalised
486  * without discarding.
487  *
488  * XXX temporary changed due to recovery integration.
489  * Please change back after log_discard become a part of log.
490  */
492 {
493  struct m0_be_log log = {};
494  struct m0_mutex lock = {};
495  struct m0_be_log_record records[4];
496  struct m0_be_op ops[4];
497  m0_bindex_t pos;
498  int i;
499 
501  be_ut_log_init(&log, &lock);
502 
503  /* 1th non-discarded */
504 
505  memset(records, 0, sizeof(records));
506  memset(ops, 0, sizeof(ops));
507  for (i = 0; i < 4; ++i) {
508  be_ut_log_record_init_write_one(&records[i], &log,
509  &lock, &ops[i]);
510  }
511  pos = records[3].lgr_position;
512  /*
513  be_ut_log_record_wait_fini_one(&records[0], &lock, &ops[0], true);
514  be_ut_log_record_wait_fini_one(&records[1], &lock, &ops[1], false);
515  be_ut_log_record_wait_fini_one(&records[2], &lock, &ops[2], true);
516  be_ut_log_record_wait_fini_one(&records[3], &lock, &ops[3], false);
517  */
518  be_ut_log_record_wait_fini_one(&records[0], &lock, &ops[0], true);
519  be_ut_log_record_wait_fini_one(&records[1], &lock, &ops[1], true);
520  be_ut_log_record_wait_fini_one(&records[2], &lock, &ops[2], true);
521  be_ut_log_record_wait_fini_one(&records[3], &lock, &ops[3], false);
522 
523  m0_be_log_close(&log);
524  be_ut_log_open(&log, &lock);
525  be_ut_log_curr_pos_check(&log, pos);
527 
528  /* 0th non-discarded */
529 
530  memset(records, 0, sizeof(records));
531  memset(ops, 0, sizeof(ops));
532  for (i = 0; i < 3; ++i) {
533  be_ut_log_record_init_write_one(&records[i], &log,
534  &lock, &ops[i]);
535  }
536  pos = records[1].lgr_position;
537  /*
538  * After log_discard is part of log it can be reverted back.
539  */
540  /*
541  be_ut_log_record_wait_fini_one(&records[0], &lock, &ops[0], false);
542  be_ut_log_record_wait_fini_one(&records[1], &lock, &ops[1], true);
543  be_ut_log_record_wait_fini_one(&records[2], &lock, &ops[2], false);
544  */
545  be_ut_log_record_wait_fini_one(&records[0], &lock, &ops[0], true);
546  be_ut_log_record_wait_fini_one(&records[1], &lock, &ops[1], false);
547  be_ut_log_record_wait_fini_one(&records[2], &lock, &ops[2], false);
548 
549  m0_be_log_close(&log);
550  be_ut_log_open(&log, &lock);
551  be_ut_log_curr_pos_check(&log, pos);
553 
554  /* all 3 non-discarded */
555 
556  memset(records, 0, sizeof(records));
557  memset(ops, 0, sizeof(ops));
558  for (i = 0; i < 3; ++i) {
559  be_ut_log_record_init_write_one(&records[i], &log,
560  &lock, &ops[i]);
561  }
562  pos = records[0].lgr_position;
563  be_ut_log_record_wait_fini_one(&records[0], &lock, &ops[0], false);
564  be_ut_log_record_wait_fini_one(&records[1], &lock, &ops[1], false);
565  be_ut_log_record_wait_fini_one(&records[2], &lock, &ops[2], false);
566 
567  m0_be_log_close(&log);
568  be_ut_log_open(&log, &lock);
569  be_ut_log_curr_pos_check(&log, pos);
571 
572  be_ut_log_fini(&log);
574 }
575 
577 {
578  struct m0_be_fmt_log_header header = {};
579  struct m0_be_log log = {};
580  struct m0_mutex lock = {};
583  int i;
584  int rc;
585 
587  be_ut_log_init(&log, &lock);
588  rc = m0_be_fmt_log_header_init(&header, NULL);
589  M0_UT_ASSERT(rc == 0);
590 
591  /* Check of correct work. */
592 
593  rc = m0_be_log_header_read(&log, &header);
594  M0_UT_ASSERT(rc == 0);
595  M0_UT_ASSERT(header.flh_group_lsn == 0 &&
596  header.flh_group_size == 0);
597 
598  for (i = 0; i < BE_UT_LOG_THREAD_NR; ++i) {
600  if (i == 0) {
601  /* log header must contain position of a valid record
602  * after first write.
603  */
604  rc = m0_be_log_header_read(&log, &header);
605  M0_UT_ASSERT(rc == 0);
606  M0_UT_ASSERT(header.flh_group_lsn == 0 &&
607  header.flh_group_size == size);
608  }
609  }
610 
611  /* log writes header during closing */
612  m0_be_log_close(&log);
613  rc = be_ut_log_open(&log, &lock);
614  M0_UT_ASSERT(rc == 0);
615 
616  rc = m0_be_log_header_read(&log, &header);
617  M0_UT_ASSERT(rc == 0);
618  M0_UT_ASSERT(header.flh_group_lsn == index &&
619  header.flh_group_size == size);
620 
621  /* m0_be_log_header__repair() check */
622 
623  be_ut_log_header_repair_test(3, 0, index + 4096, size + 4096,
624  index, size, 0);
625  be_ut_log_header_repair_test(3, 3, index + 4096, size + 4096,
626  index, size, 0);
627  be_ut_log_header_repair_test(3, 1, index + 4096, size + 4096,
628  index, size, 2);
629  be_ut_log_header_repair_test(3, 2, index + 4096, size + 4096,
630  index, size, 0);
631 
632  m0_be_fmt_log_header_fini(&header);
633  be_ut_log_fini(&log);
635 }
636 
637 /* Check guarantees of api that ain't checked by the rest tests. */
639 {
640  struct m0_be_log log = {};
641  struct m0_mutex lock = {};
642  struct m0_be_log_record_iter iter = {};
643  m0_bcount_t unit;
644  int rc;
645 
647  be_ut_log_init(&log, &lock);
648 
649  /* m0_be_log_record_initial() on empty log */
650 
652  M0_UT_ASSERT(rc == 0);
653  rc = m0_be_log_record_initial(&log, &iter);
654  M0_UT_ASSERT(rc == -ENOENT);
656 
657  /* m0_be_log_unreserve() check */
658 
659  unit = 1 << m0_be_log_bshift(&log);
661  m0_be_log_reserve(&log, unit);
662  m0_be_log_reserve(&log, unit);
663  m0_be_log_unreserve(&log, 2 * unit);
664  m0_be_log_reserve(&log, 2 * unit);
665  m0_be_log_unreserve(&log, unit);
666  m0_be_log_unreserve(&log, unit);
668  /* log checks pointers during finalisation */
669  m0_be_log_close(&log);
670  rc = be_ut_log_open(&log, &lock);
671  M0_UT_ASSERT(rc == 0);
672 
673  /* end of tests */
674 
675  be_ut_log_fini(&log);
677 }
678 
679 /* Simple UT shows example of log usage. */
681 {
682  struct m0_be_log_record_iter iter = {};
683  struct m0_be_log_record record = {};
684  struct m0_be_log log = {};
685  struct m0_mutex lock = {};
686  struct m0_bufvec *bvec;
687  m0_bcount_t size[2];
688  m0_bcount_t reserved_size;
689  int rc;
690 
692  be_ut_log_init(&log, &lock);
693 
694  /* Check that actual record's size is less than we will reserve */
695  size[0] = size[1] = BE_UT_LOG_LIO_SIZE;
696  reserved_size = m0_be_log_reserved_size(&log, size, 2);
697  M0_UT_ASSERT(reserved_size < BE_UT_LOG_RESERVE_SIZE);
698 
699  /* Write */
700 
703  M0_UT_ASSERT(rc == 0);
705  M0_UT_ASSERT(rc == 0);
707  M0_UT_ASSERT(rc == 0);
708 
711  M0_UT_ASSERT(rc == 0);
713 
719 
720  /* fill bufvec */
722  M0_UT_ASSERT(bvec->ov_vec.v_nr == 1 &&
724  strncpy(bvec->ov_buf[0], "lio1", BE_UT_LOG_LIO_SIZE);
726  M0_UT_ASSERT(bvec->ov_vec.v_nr == 1 &&
728  strncpy(bvec->ov_buf[0], "lio2", BE_UT_LOG_LIO_SIZE);
729 
732  bo_sm.sm_rc);
733  M0_UT_ASSERT(rc == 0);
734 
736  m0_be_log_record_discard(&log, record.lgr_size);
740 
741  /* Read */
742 
743  record = (struct m0_be_log_record){};
746  M0_UT_ASSERT(rc == 0);
748  M0_UT_ASSERT(rc == 0);
750  M0_UT_ASSERT(rc == 0);
751 
753  M0_UT_ASSERT(rc == 0);
754  rc = m0_be_log_record_initial(&log, &iter);
755  M0_UT_ASSERT(rc == 0);
756  m0_be_log_record_assign(&record, &iter, false);
757 
761 
764  bo_sm.sm_rc);
765  M0_UT_ASSERT(rc == 0);
766 
768  M0_UT_ASSERT(bvec->ov_vec.v_nr == 1 &&
770  M0_UT_ASSERT(memcmp("lio1", bvec->ov_buf[0], strlen("lio1")) == 0);
772  M0_UT_ASSERT(bvec->ov_vec.v_nr == 1 &&
774  M0_UT_ASSERT(memcmp("lio2", bvec->ov_buf[0], strlen("lio2")) == 0);
775 
779 
780  be_ut_log_fini(&log);
782 }
783 
784 /*
785  * Local variables:
786  * c-indentation-style: "K&R"
787  * c-basic-offset: 8
788  * tab-width: 8
789  * fill-column: 80
790  * scroll-step: 1
791  * End:
792  */
793 /*
794  * vim: tabstop=8 shiftwidth=8 noexpandtab textwidth=80 nowrap
795  */
m0_bindex_t lrh_pos
Definition: fmt.h:259
M0_INTERNAL void m0_be_log_record_io_size_set(struct m0_be_log_record *record, int index, m0_bcount_t size)
Definition: log.c:636
static struct m0_mutex lock
Definition: transmit.c:326
M0_INTERNAL struct m0_bufvec * m0_be_log_record_io_bufvec(struct m0_be_log_record *record, int index)
Definition: log.c:830
#define M0_PRE(cond)
static void be_ut_log_record_init_write_one(struct m0_be_log_record *record, struct m0_be_log *log, struct m0_mutex *lock, struct m0_be_op *op)
Definition: log.c:315
#define M0_ALLOC_ARR(arr, nr)
Definition: memory.h:84
struct m0_be_fmt_log_record_header lri_header
Definition: log.h:509
M0_INTERNAL void m0_mutex_unlock(struct m0_mutex *mutex)
Definition: mutex.c:66
#define NULL
Definition: misc.h:38
Definition: io.h:230
static void be_ut_log_cfg_set(struct m0_be_log_cfg *log_cfg, struct m0_mutex *lock)
Definition: log.c:55
M0_INTERNAL int m0_be_log_record_iter_init(struct m0_be_log_record_iter *iter)
Definition: log.c:1151
char * lsc_stob_domain_location
Definition: log_store.h:144
M0_INTERNAL bool m0_be_log_recovery_record_available(struct m0_be_log *log)
Definition: log.c:1214
M0_INTERNAL int m0_be_log_record_allocate(struct m0_be_log_record *record)
Definition: log.c:615
M0_INTERNAL int m0_be_log_record_initial(struct m0_be_log *log, struct m0_be_log_record_iter *curr)
Definition: log.c:1167
M0_INTERNAL void m0_be_log_record_io_launch(struct m0_be_log_record *record, struct m0_be_op *op)
Definition: log.c:795
M0_INTERNAL const struct m0_fid * m0_stob_domain_id_get(const struct m0_stob_domain *dom)
Definition: domain.c:300
m0_bcount_t bult_reserve_size
Definition: log.c:124
M0_INTERNAL int m0_stob_domain_destroy(struct m0_stob_domain *dom)
Definition: domain.c:227
struct m0_vec ov_vec
Definition: vec.h:147
void m0_be_ut_log_header(void)
Definition: log.c:576
M0_INTERNAL int m0_be_log_open(struct m0_be_log *log, struct m0_be_log_cfg *log_cfg)
Definition: log.c:281
M0_INTERNAL m0_bcount_t m0_be_log_reserved_size(struct m0_be_log *log, m0_bcount_t *lio_size, int lio_nr)
Definition: log.c:358
M0_INTERNAL void m0_be_log_record_io_prepare(struct m0_be_log_record *record, enum m0_stob_io_opcode opcode, m0_bcount_t size_reserved)
Definition: log.c:660
uint64_t m0_bindex_t
Definition: types.h:80
struct m0_be_log_record bult_record
Definition: log.c:119
uint64_t m0_bcount_t
Definition: types.h:77
static int be_ut_log_open(struct m0_be_log *log, struct m0_mutex *lock)
Definition: log.c:103
static void be_ut_log_curr_pos_check(struct m0_be_log *log, m0_bindex_t pos)
Definition: log.c:380
m0_bcount_t bult_lio_size
Definition: log.c:123
#define M0_SET0(obj)
Definition: misc.h:64
M0_INTERNAL void m0_mutex_lock(struct m0_mutex *mutex)
Definition: mutex.c:49
static void be_ut_log_fini(struct m0_be_log *log)
Definition: log.c:94
void ** ov_buf
Definition: vec.h:149
void m0_be_ut_log_api(void)
Definition: log.c:638
void m0_be_ut_log_multi(void)
Definition: log.c:438
M0_INTERNAL uint64_t m0_round_up(uint64_t val, uint64_t size)
Definition: misc.c:181
M0_INTERNAL int m0_be_log_record_io_create(struct m0_be_log_record *record, m0_bcount_t size_max)
Definition: log.c:558
op
Definition: libdemo.c:64
#define equi(a, b)
Definition: misc.h:297
M0_INTERNAL bool m0_be_log_header__repair(struct m0_be_fmt_log_header **hdrs, int nr, struct m0_be_fmt_log_header *out)
Definition: log.c:953
char * bult_data
Definition: log.c:115
M0_INTERNAL int m0_be_log_record_next(struct m0_be_log *log, const struct m0_be_log_record_iter *curr, struct m0_be_log_record_iter *next)
Definition: log.c:1191
int i
Definition: dir.c:1033
struct m0_be_log_store_cfg lc_store_cfg
Definition: log.h:250
M0_INTERNAL int m0_stob_domain_create(const char *location, const char *str_cfg_init, uint64_t dom_key, const char *str_cfg_create, struct m0_stob_domain **out)
Definition: domain.c:217
M0_INTERNAL void m0_be_log_record_assign(struct m0_be_log_record *record, struct m0_be_log_record_iter *iter, bool need_discard)
Definition: log.c:467
const char * be_ut_log_sdom_init_cfg
Definition: log.c:45
M0_INTERNAL void m0_be_log_close(struct m0_be_log *log)
Definition: log.c:287
#define M0_BE_OP_SYNC_RET(op_obj, action, member)
Definition: op.h:243
static struct m0_addb2_callback c
Definition: consumer.c:41
struct m0_mutex * bult_lock
Definition: log.c:117
struct m0_be_op bult_op
Definition: log.c:120
static struct m0_bufvec bvec
Definition: xcode.c:169
bool bult_discard
Definition: log.c:121
M0_INTERNAL void m0_be_log_record_fini(struct m0_be_log_record *record)
Definition: log.c:410
M0_INTERNAL void m0_stob_id_make(uint64_t container, uint64_t key, const struct m0_fid *dom_id, struct m0_stob_id *stob_id)
Definition: stob.c:343
static void be_ut_log_record_wait_fini_one(struct m0_be_log_record *record, struct m0_mutex *lock, struct m0_be_op *op, bool discard)
Definition: log.c:342
void * m0_alloc(size_t size)
Definition: memory.c:126
M0_INTERNAL int m0_be_log_create(struct m0_be_log *log, struct m0_be_log_cfg *log_cfg)
Definition: log.c:292
M0_INTERNAL void m0_mutex_init(struct m0_mutex *mutex)
Definition: mutex.c:35
static void be_ut_log_init(struct m0_be_log *log, struct m0_mutex *lock)
Definition: log.c:78
M0_INTERNAL void m0_be_log_header__set(struct m0_be_fmt_log_header *hdr, m0_bindex_t discarded, m0_bindex_t lsn, m0_bcount_t size)
Definition: log.c:888
M0_INTERNAL void m0_be_log_record_discard(struct m0_be_log *log, m0_bcount_t size)
Definition: log.c:523
uint32_t v_nr
Definition: vec.h:51
m0_bcount_t * v_count
Definition: vec.h:53
M0_UT_THREADS_DEFINE(be_ut_log, be_ut_log_multi_thread)
M0_INTERNAL void m0_be_log_record_reset(struct m0_be_log_record *record)
Definition: log.c:433
M0_INTERNAL int m0_be_log_reserve(struct m0_be_log *log, m0_bcount_t size)
Definition: log.c:850
M0_INTERNAL void m0_be_log_recovery_record_get(struct m0_be_log *log, struct m0_be_log_record_iter *iter)
Definition: log.c:1221
static void be_ut_log_record_write_sync(struct m0_be_log *log, struct m0_mutex *lock, m0_bindex_t *index, m0_bcount_t *size)
Definition: log.c:365
static void be_ut_log_multi_ut(int thread_nr, bool discard, int lio_nr, m0_bcount_t lio_size)
Definition: log.c:173
int * bult_order
Definition: log.c:114
#define M0_ALLOC_PTR(ptr)
Definition: memory.h:86
M0_INTERNAL bool m0_be_log_header__is_eq(struct m0_be_fmt_log_header *hdr1, struct m0_be_fmt_log_header *hdr2)
Definition: log.c:902
M0_INTERNAL void m0_be_log_destroy(struct m0_be_log *log)
Definition: log.c:298
void m0_be_ut_log_unplaced(void)
Definition: log.c:491
void m0_be_ut_log_user(void)
Definition: log.c:680
M0_INTERNAL int m0_be_log_header_read(struct m0_be_log *log, struct m0_be_fmt_log_header *log_hdr)
Definition: log.c:973
m0_bcount_t size
Definition: di.c:39
M0_INTERNAL void m0_be_op_fini(struct m0_be_op *op)
Definition: stubs.c:92
M0_INTERNAL void m0_mutex_fini(struct m0_mutex *mutex)
Definition: mutex.c:42
const char * be_ut_log_sdom_location
Definition: log.c:44
Definition: record.py:1
#define M0_UT_THREADS_STOP(name)
Definition: threads.h:55
static void be_ut_log_recover_and_discard(struct m0_be_log *log, struct m0_mutex *lock)
Definition: log.c:450
M0_INTERNAL void m0_be_log_record_iter_fini(struct m0_be_log_record_iter *iter)
Definition: log.c:1156
struct m0_atomic64 * bult_atom
Definition: log.c:116
const char * be_ut_log_sdom_create_cfg
Definition: log.c:46
Definition: nucleus.c:42
Definition: log.h:261
static void be_ut_log_multi_thread(struct be_ut_log_thread_ctx *ctx)
Definition: log.c:127
#define M0_UT_THREADS_START(name, thread_nr, param_array)
Definition: threads.h:51
struct m0_be_log * bult_log
Definition: log.c:118
M0_INTERNAL void m0_be_log_record_deallocate(struct m0_be_log_record *record)
Definition: log.c:630
Definition: io.h:229
struct m0_fom_ops ops
Definition: io_foms.c:623
static void be_ut_log_got_space_cb(struct m0_be_log *log)
Definition: log.c:50
Definition: op.h:74
struct m0_stob_domain * be_ut_log_stob_domain
Definition: log.c:48
M0_INTERNAL void m0_be_log_record_skip_discard(struct m0_be_log_record *record)
Definition: log.c:505
static void be_ut_log_header_repair_test(int header_nr, int new_nr, m0_bindex_t lsn_old, m0_bcount_t size_old, m0_bindex_t lsn_new, m0_bcount_t size_new, int valid_index)
Definition: log.c:394
M0_INTERNAL void m0_be_op_init(struct m0_be_op *op)
Definition: stubs.c:87
void m0_free(void *data)
Definition: memory.c:146
Definition: mutex.h:47
m0_bindex_t lgr_position
Definition: log.h:334
M0_INTERNAL void m0_be_log_record_init(struct m0_be_log_record *record, struct m0_be_log *log)
Definition: log.c:390
int32_t rc
Definition: trigger_fop.h:47
M0_INTERNAL uint32_t m0_be_log_bshift(struct m0_be_log *log)
Definition: log.c:883
#define M0_UT_ASSERT(a)
Definition: ut.h:46
static int64_t m0_atomic64_add_return(struct m0_atomic64 *a, int64_t d)
M0_INTERNAL void m0_be_op_wait(struct m0_be_op *op)
Definition: stubs.c:96
struct m0_stob_id lsc_stob_id
Definition: log_store.h:135
Definition: vec.h:145
static void m0_atomic64_set(struct m0_atomic64 *a, int64_t num)
M0_INTERNAL void m0_be_log_unreserve(struct m0_be_log *log, m0_bcount_t size)
Definition: log.c:868