Motr  M0
varr.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2013-2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 
23 #include "lib/bob.h" /* m0_bob_type */
24 #include "lib/memory.h" /* M0_ALLOC_ARR */
25 #include "lib/misc.h" /* m0_forall */
26 #include "lib/errno.h" /* Includes appropriate errno header. */
27 #include "lib/types.h" /* Includes appropriate types header. */
28 #include "lib/string.h" /* strcmp() */
29 #include "lib/finject.h" /* M0_FI_ENABLED() */
30 #include "lib/varr.h" /* m0_varr */
31 #include "lib/varr_private.h" /* m0_varr_buf_alloc(), m0_varr_buf_free */
32 #ifndef __KERNEL__
33 #include <limits.h> /* CHAR_BIT */
34 #else
35 #include <linux/pagemap.h> /* PAGE_SIZE */
36 #include <linux/limits.h>
37 #endif
38 
39 #define M0_TRACE_SUBSYSTEM M0_TRACE_SUBSYS_LIB
40 #include "lib/trace.h"
41 
42 M0_INTERNAL const struct m0_bob_type varr_bobtype;
43 
44 M0_BOB_DEFINE(M0_INTERNAL, &varr_bobtype, m0_varr);
45 
46 M0_INTERNAL const struct m0_bob_type varr_bobtype = {
47  .bt_name = "virtual_array",
48  .bt_magix_offset = offsetof(struct m0_varr, va_magic),
49  .bt_magix = M0_LIB_GENARRAY_MAGIC,
50  .bt_check = NULL,
51 };
52 
53 M0_INTERNAL bool varr_invariant(const struct m0_varr *arr);
55 M0_INTERNAL int varr_buffers_alloc(struct m0_varr *arr);
57 M0_INTERNAL void varr_buffers_dealloc(struct m0_varr *arr);
62 M0_INTERNAL uint32_t depth_find(const struct m0_varr *arr, uint64_t buff_nr);
67 M0_INTERNAL uint32_t index_within_level(const struct m0_varr *arr,
68  uint64_t target_idx, uint32_t depth);
70 M0_INTERNAL uint32_t children_of_level(const struct m0_varr *arr,
71  uint32_t level);
76 M0_INTERNAL void *cache_fetch(const struct m0_varr *arr, uint64_t index);
81 M0_INTERNAL void cache_update(struct m0_varr *arr, void *holder,
82  uint64_t start_index);
84 M0_INTERNAL unsigned long varr_obj_nr_in_buff(const struct m0_varr *arr);
86 M0_INTERNAL uint64_t total_leaf_buffers(unsigned long nr,
87  unsigned long obj_nr_in_1_cont,
88  uint8_t obj_nr_shift);
90 M0_INTERNAL uint64_t max_idx_within_level(const struct m0_varr_cursor *cursor,
91  uint32_t depth);
92 M0_INTERNAL uint32_t inc_to_idx_xlate(const struct m0_varr_cursor *cursor,
93  uint64_t carry, uint32_t depth);
94 M0_INTERNAL uint64_t inc_for_next_level(const struct m0_varr_cursor *cursor,
95  uint64_t carry, uint32_t depth);
96 
97 M0_INTERNAL uint8_t log_radix(const struct m0_varr *arr, uint32_t level);
99 M0_INTERNAL uint8_t nearest_power_of_two(size_t num);
101 M0_INTERNAL uint64_t last_nbits_set(uint8_t n);
103 M0_INTERNAL void *buff_incr(const struct m0_varr *arr, uint32_t depth,
104  void *buff, uint32_t inc);
106 #define safe_bitshift(num, shift, operator) \
107 ({ \
108  uint8_t __shift = (shift); \
109  typeof(num) __num = (num); \
110  M0_ASSERT(__shift < CHAR_BIT * sizeof __num); \
111  __num operator __shift; \
112 })
113 
114 M0_INTERNAL int m0_varr_init(struct m0_varr *arr, uint64_t nr, size_t size,
115  size_t bufsize)
116 {
117  int rc = 0;
118 
119  M0_PRE(arr != NULL);
120  M0_PRE(nr > 0 && size > 0 && bufsize > 0);
121  M0_PRE(size <= bufsize);
122 #ifdef __KERNEL__
123  M0_PRE(bufsize <= PAGE_SIZE);
124 #endif
125 
126  arr->va_nr = nr;
127  /* Can result into padding if object and buffer sizes are not integer
128  * powers of two. */
130  arr->va_obj_size = safe_bitshift((size_t) 1, arr->va_obj_shift,
131  <<);
132  arr->va_buf_shift = nearest_power_of_two(bufsize);
133  arr->va_bufsize = safe_bitshift((size_t) 1, arr->va_buf_shift,
134  <<);
135  arr->va_bufptr_nr_shift = arr->va_buf_shift -
137  arr->va_bufptr_nr = safe_bitshift((uint64_t) 1,
138  arr->va_bufptr_nr_shift, <<);
139  m0_varr_bob_init(arr);
140  arr->va_failure_depth = 0;
141  M0_ALLOC_PTR(arr->va_cache);
142  if (arr->va_cache != NULL) {
143  arr->va_buff_nr = total_leaf_buffers(arr->va_nr,
144  varr_obj_nr_in_buff(arr),
145  arr->va_buf_shift -
146  arr->va_obj_shift);
147  arr->va_depth = depth_find(arr, arr->va_buff_nr);
148  rc = varr_buffers_alloc(arr);
149  } else
150  rc = -ENOMEM;
151  if (rc != 0)
152  m0_varr_fini(arr);
153  M0_POST_EX(ergo(rc == 0, varr_invariant(arr)));
154  return M0_RC(rc);
155 }
156 
157 M0_INTERNAL uint8_t nearest_power_of_two(size_t num)
158 {
159  size_t aligned_num = 1;
160  uint8_t aligned_shift = 0;
161 
162  M0_PRE(num > 0);
163 
164  while (num > aligned_num) {
165  aligned_num = safe_bitshift(aligned_num, 1, <<);
166  ++aligned_shift;
167  }
168  return aligned_shift;
169 }
170 
171 M0_INTERNAL unsigned long varr_obj_nr_in_buff(const struct m0_varr *arr)
172 {
173  M0_PRE(arr != NULL);
174  return safe_bitshift((unsigned long) 1,
175  (arr->va_buf_shift - arr->va_obj_shift), <<);
176 }
177 
178 M0_INTERNAL uint64_t total_leaf_buffers(unsigned long nr,
179  unsigned long obj_nr_in_1_cont,
180  uint8_t obj_nr_shift)
181 {
182  uint64_t buff_nr;
183  M0_PRE(obj_nr_in_1_cont > 0);
184 
185  buff_nr = safe_bitshift(nr, obj_nr_shift, >>);
186  buff_nr += (nr & (obj_nr_in_1_cont - 1)) == 0 ? 0 : 1;
187  return buff_nr;
188 }
189 
190 /* All trees that hold objects will have same depth. This depth is a many to
191  * one function of total number of objects to be stored in the array.
192  * For example, suppose one buffer can hold k objects, then an array of k
193  * objects can fit into a single leaf node of a tree. Then in order to store an
194  * array with k + 1 objects, instead of using a tree with depth 2, we use two
195  * trees each having depth one. Thus, if total number of available trees is
196  * M0_VA_TNODE_NR then for *all* arrays with total objects less than or equal to
197  * k * M0_VA_TNODE_NR, depth of trees holding object(s) will be one.
198  * When total objects in an array exceed k * M0_VA_TNODE_NR, we increase
199  * depth by one. If buf_size represents size of a buffer, * ptr_size represents
200  * size of a pointer and obj_size represents size of an object, then following
201  * table summarizes mapping between total number of objects and depth of trees
202  * holding objects.
203  * @verbatim
204  _______________________________________________________________________
205  | Max. number of objects | Depth |
206  |____________________________________________________________|_________|
207  | M0_VA_TNODE_NR * (bufsize/obj_size) | 1 |
208  |____________________________________________________________|_________|
209  | M0_VA_TNODE_NR * (buffsize/ptr_size) * (buf_size/obj_size)| 2 |
210  |____________________________________________________________|_________|
211  | M0_VA_TNODE_NR * (bufsize/ptr_size)^2 * (buf_size/obj_size)| 3 |
212  |____________________________________________________________|_________|
213  * @endverbatim
214  * The current implementation treats the structure virtual array not as a
215  * collection of trees with same depth, but as a single tree encompassing
216  * entire data-structure. Following function returns depth of this tree. For
217  * each case in the table above, this tree has depth one more than the one
218  * mentioned in the table above.
219  */
220 M0_INTERNAL uint32_t depth_find(const struct m0_varr *arr,
221  uint64_t total_leaves)
222 {
223  uint32_t level;
224 
225  M0_PRE(arr != NULL);
226  M0_PRE(total_leaves > 0);
227 
228  for (level = 1;; ++level)
229  if (total_leaves <= safe_bitshift((uint64_t) 1,
230  arr->va_bufptr_nr_shift *
231  (level - 1) +
233  break;
234  return level + 1;
235 }
236 
237 M0_INTERNAL int varr_buffers_alloc(struct m0_varr *arr)
238 {
239  struct m0_varr_cursor cursor;
240  int rc = 0;
241  void *holder;
242  uint32_t i;
243 
244  for (i = 1; i < arr->va_depth; ++i) {
245  rc = m0_varr_cursor_init(&cursor, arr, i);
246  if (rc != 0)
247  goto end;
248  do {
249  holder = m0_varr_buf_alloc(arr->va_bufsize);
250  if (holder == NULL) {
251  rc = -ENOMEM;
252  arr->va_failure_depth = cursor.vc_done == 0 ?
253  cursor.vc_depth : cursor.vc_depth + 1;
254  goto end;
255  }
256  *(void **)m0_varr_cursor_get(&cursor) = holder;
257  } while (m0_varr_cursor_next(&cursor));
258  }
259 end:
260  return M0_RC(rc);
261 }
262 
263 M0_INTERNAL int m0_varr_cursor_init(struct m0_varr_cursor *cursor,
264  const struct m0_varr *arr,
265  uint32_t depth)
266 {
267  struct m0_varr_path_element *pe;
268  void *buf;
269  void *root;
270 
271  M0_PRE(cursor != NULL);
272  M0_PRE(arr != NULL);
273  M0_PRE(depth <= arr->va_depth);
274 
275  cursor->vc_arr = (struct m0_varr *)arr;
276  cursor->vc_depth = 0;
277  cursor->vc_done = 0;
278  pe = &cursor->vc_path[0];
279  pe->vp_idx = 0;
280  root = (void *)arr->va_tree;
281  /* Note that we will never dereference pe->vp_buf at depth == 0 outside
282  * the scope of this function */
283  pe->vp_buf = (void *)&root;
284  pe->vp_width = 1;
285 
286  while (cursor->vc_depth < depth) {
287  buf = pe->vp_buf;
288  if (buf != NULL) {
289  ++pe;
290  ++cursor->vc_depth;
291  pe->vp_buf = *(void **)buf;
292  pe->vp_idx = 0;
293  pe->vp_width = children_of_level(arr,
294  cursor->vc_depth);
295  } else
296  return M0_ERR(-EINVAL);
297  }
298  return 0;
299 }
300 
301 M0_INTERNAL uint32_t children_of_level(const struct m0_varr *arr,
302  uint32_t level)
303 {
304  M0_PRE(arr != NULL);
305  M0_PRE(level <= arr->va_depth);
306 
307  if (level <= 1)
308  return level == 1 ? M0_VA_TNODE_NR : 1;
309  else
310  return level == arr->va_depth ?
311  safe_bitshift((uint32_t) 1,
312  arr->va_buf_shift - arr->va_obj_shift,
313  <<) : arr->va_bufptr_nr;
314 }
315 
316 M0_INTERNAL void* m0_varr_cursor_get(struct m0_varr_cursor *cursor)
317 {
318  M0_PRE(cursor != NULL);
319  return cursor->vc_path[cursor->vc_depth].vp_buf;
320 }
321 
322 M0_INTERNAL int m0_varr_cursor_next(struct m0_varr_cursor *cursor)
323 {
324  return m0_varr_cursor_move(cursor, 1);
325 }
326 
327 M0_INTERNAL int m0_varr_cursor_move(struct m0_varr_cursor *cursor,
328  uint64_t inc)
329 {
330  void *buf;
331  struct m0_varr_path_element *pe;
332  uint32_t d = cursor->vc_depth;
333  uint64_t target_idx;
334  uint64_t max_idx_in_level;
335  uint64_t idx_in_level;
336 
337  M0_PRE(cursor != NULL);
338  M0_PRE(d <= cursor->vc_arr->va_depth);
339 
340  pe = &cursor->vc_path[d];
341  max_idx_in_level = max_idx_within_level(cursor, d);
342  target_idx = cursor->vc_done + inc;
343  if (target_idx > max_idx_in_level)
344  goto end;
345  else if (target_idx == cursor->vc_done)
346  goto next;
347  idx_in_level = pe->vp_idx + inc;
348  while (d > 0 && idx_in_level >= pe->vp_width) {
349  inc = inc_for_next_level(cursor, idx_in_level,
350  d);
351  pe->vp_idx = inc_to_idx_xlate(cursor, idx_in_level, d);
352  --pe;
353  --d;
354  idx_in_level = pe->vp_idx + inc;
355  }
356  pe->vp_buf = buff_incr(cursor->vc_arr, d, pe->vp_buf,
357  inc);
358  pe->vp_idx = idx_in_level;
359  while (d < cursor->vc_depth) {
360  buf = pe->vp_buf;
361  ++pe;
362  ++d;
363  pe->vp_buf = *(void **)buf;
364  pe->vp_buf = buff_incr(cursor->vc_arr, d, pe->vp_buf,
365  pe->vp_idx);
366  }
367  cursor->vc_done = target_idx;
368  goto next;
369 next:
370  return 1;
371 end:
372  return 0;
373 
374 }
375 
376 M0_INTERNAL uint64_t max_idx_within_level(const struct m0_varr_cursor *cursor,
377  uint32_t depth)
378 {
379  uint64_t shift;
380 
381  shift = depth == cursor->vc_arr->va_depth ? 0 :
382  cursor->vc_arr->va_buf_shift - cursor->vc_arr->va_obj_shift +
383  (cursor->vc_arr->va_depth - depth - 1) *
384  cursor->vc_arr->va_bufptr_nr_shift;
385  return safe_bitshift(cursor->vc_arr->va_nr - 1, shift, >>);
386 }
387 
388 M0_INTERNAL uint32_t inc_to_idx_xlate(const struct m0_varr_cursor *cursor,
389  uint64_t carry, uint32_t depth)
390 {
391  M0_PRE(cursor != NULL);
392  M0_PRE(depth <= cursor->vc_arr->va_depth);
393  return carry & (cursor->vc_path[depth].vp_width - 1);
394 }
395 
396 M0_INTERNAL uint64_t inc_for_next_level(const struct m0_varr_cursor *cursor,
397  uint64_t carry, uint32_t depth)
398 {
399  M0_PRE(cursor != NULL);
400  M0_PRE(depth <= cursor->vc_arr->va_depth);
401  return safe_bitshift(carry, log_radix(cursor->vc_arr, depth), >>);
402 }
403 
404 M0_INTERNAL uint8_t log_radix(const struct m0_varr *arr, uint32_t level)
405 {
406  M0_PRE(arr != NULL);
407  M0_PRE(level <= arr->va_depth);
408 
409  if (level <= 1)
410  return level == 1 ? M0_VA_TNODE_NR_SHIFT : 0;
411  else
412  return level == arr->va_depth ?
413  arr->va_buf_shift - arr->va_obj_shift :
414  arr->va_bufptr_nr_shift;
415 }
416 
417 M0_INTERNAL uint32_t index_within_level(const struct m0_varr *arr,
418  uint64_t target_idx, uint32_t depth)
419 {
420  uint64_t shift;
421  uint64_t mask_bits;
422 
423  M0_PRE(arr != NULL);
424  M0_PRE(depth <= arr->va_depth);
425 
426  shift = depth == arr->va_depth ? 0 :
427  arr->va_buf_shift - arr->va_obj_shift +
428  (arr->va_depth - depth - 1) * arr->va_bufptr_nr_shift;
429  mask_bits = depth == arr->va_depth ?
430  arr->va_buf_shift - arr->va_obj_shift :
432  target_idx = safe_bitshift(target_idx, shift, >>);
433  target_idx &= last_nbits_set(mask_bits);
434  return target_idx;
435 }
436 
437 M0_INTERNAL uint64_t last_nbits_set(uint8_t n)
438 {
439  M0_PRE(n <= 64);
440  return n < 64 ? ~safe_bitshift(~(uint64_t) 0, n, <<) :
441  ~(uint64_t) 0;
442 }
443 
444 M0_INTERNAL void *buff_incr(const struct m0_varr *arr, uint32_t depth,
445  void *buff, uint32_t inc)
446 {
447  size_t inc_unit;
448 
449  M0_PRE(arr != NULL && buff != NULL);
450 
451  if (depth == arr->va_depth)
452  inc_unit = arr->va_obj_size;
453  else
454  inc_unit = M0_VA_TNODEPTR_SIZE;
455  buff += inc*inc_unit;
456  return buff;
457 }
458 
459 M0_INTERNAL void varr_buffers_dealloc(struct m0_varr *arr)
460 {
461  struct m0_varr_cursor cursor;
462  int rc;
463  void *holder;
464  int32_t i;
465  uint32_t depth;
466 
467  depth = arr->va_failure_depth == 0 ? arr->va_depth :
468  arr->va_failure_depth;
469 
470  for (i = depth - 1; i > 0; --i) {
471  rc = m0_varr_cursor_init(&cursor, arr, i);
472  M0_ASSERT(rc == 0);
473  do {
474  holder = *(void **)m0_varr_cursor_get(&cursor);
475  /* This condition will fail when varr_buffers_alloc()
476  * has got terminated intermittently. */
477  if ((void *)holder != NULL) {
478  m0_varr_buf_free(holder, arr->va_bufsize);
479  } else
480  break;
481 
482  } while (m0_varr_cursor_next(&cursor));
483  }
484 }
485 
486 M0_INTERNAL void m0_varr_fini(struct m0_varr *arr)
487 {
488  M0_PRE(arr != NULL);
490 
492  m0_free(arr->va_cache);
493  m0_varr_bob_fini(arr);
494  arr->va_nr = arr->va_bufsize = 0;
495  arr->va_depth = 0;
496 }
497 
498 M0_INTERNAL bool varr_invariant(const struct m0_varr *arr)
499 {
500  return m0_varr_bob_check(arr) &&
501  arr->va_nr > 0 &&
502  arr->va_buf_shift >= arr->va_obj_shift;
503 }
504 
505 M0_INTERNAL void *m0_varr_ele_get(struct m0_varr *arr, uint64_t index)
506 {
507  uint32_t level;
508  void *holder;
509 
510  M0_PRE(arr != NULL);
511  M0_PRE(index < arr->va_nr);
512 
513  holder = cache_fetch(arr, index);
514  if (holder != NULL)
515  goto end;
516  holder = (void *)arr->va_tree;
517  for (level = 1; level < arr->va_depth; ++level) {
518 
519  holder = buff_incr(arr, level, holder,
520  index_within_level((const struct m0_varr *)
521  arr, index, level));
522  /* Dereferences the buffer pointer at given offset. */
523  holder = *(void **)holder;
524  M0_ASSERT(holder != NULL);
525  }
526  cache_update(arr, holder, index);
528 end:
529  /* Adds to holder the index of required object within a buffer */
530  return buff_incr(arr, arr->va_depth, holder,
531  index_within_level((const struct m0_varr *) arr,
532  index, arr->va_depth));
533 }
534 
535 M0_INTERNAL void *cache_fetch(const struct m0_varr *arr, uint64_t index)
536 {
537  struct varr_cache *cache;
538 
539  M0_PRE(arr != NULL);
540  cache = arr->va_cache;
541  return cache->vc_buff != NULL &&
542  cache->vc_first_index <= index &&
543  index <= cache->vc_last_index ?
544  cache->vc_buff : NULL;
545 }
546 
547 M0_INTERNAL void cache_update(struct m0_varr *arr, void *holder,
548  uint64_t index)
549 {
550  uint64_t index_in_level;
551  struct varr_cache *cache;
552 
553  M0_PRE(arr != NULL);
554  M0_PRE(index < arr->va_nr);
555 
556  cache = arr->va_cache;
557  index_in_level = index_within_level((const struct m0_varr *)arr,
558  index, arr->va_depth);
559  cache->vc_buff = holder;
560  cache->vc_first_index = index - index_in_level;
561  cache->vc_last_index = min64u(cache->vc_first_index +
562  varr_obj_nr_in_buff((const struct
563  m0_varr *) arr) -
564  1, arr->va_nr - 1);
565 }
566 
567 M0_INTERNAL uint64_t m0_varr_size(const struct m0_varr *arr)
568 {
569  M0_PRE(arr != NULL);
570  return arr->va_nr;
571 }
572 
573 #undef M0_TRACE_SUBSYSTEM
574 
575 /*
576  * Local variables:
577  * c-indentation-style: "K&R"
578  * c-basic-offset: 8
579  * tab-width: 8
580  * fill-column: 80
581  * scroll-step: 1
582  * End:
583  */
M0_INTERNAL int m0_varr_cursor_init(struct m0_varr_cursor *cursor, const struct m0_varr *arr, uint32_t depth)
Definition: varr.c:263
M0_INTERNAL uint64_t total_leaf_buffers(unsigned long nr, unsigned long obj_nr_in_1_cont, uint8_t obj_nr_shift)
Definition: varr.c:178
Definition: beck.c:235
static size_t nr
Definition: dump.c:1505
#define M0_PRE(cond)
uint64_t va_buff_nr
Definition: varr.h:125
uint64_t vc_done
Definition: varr.h:112
uint32_t vc_depth
Definition: varr.h:110
struct m0_varr * vc_arr
Definition: varr.h:109
#define NULL
Definition: misc.h:38
#define ergo(a, b)
Definition: misc.h:293
void * va_tree[M0_VA_TNODE_NR]
Definition: varr.h:157
enum m0_trace_level level
Definition: trace.c:111
struct m0_varr_path_element vc_path[M0_VA_DEPTH_MAX]
Definition: varr.h:118
size_t va_obj_size
Definition: varr.h:126
M0_INTERNAL int m0_varr_cursor_move(struct m0_varr_cursor *cursor, uint64_t inc)
Definition: varr.c:327
M0_INTERNAL uint64_t max_idx_within_level(const struct m0_varr_cursor *cursor, uint32_t depth)
Definition: varr.c:376
#define PAGE_SIZE
Definition: lnet_ut.c:277
static int void * buf
Definition: dir.c:1019
const char * bt_name
Definition: bob.h:73
Definition: sock.c:887
M0_EXTERN void * m0_varr_buf_alloc(size_t bufsize)
Definition: varr.c:30
struct varr_cache * va_cache
Definition: varr.h:159
return M0_RC(rc)
int i
Definition: dir.c:1033
static unsigned depth
Definition: base.c:377
struct m0_conf_root * root
Definition: note.c:50
return M0_ERR(-EOPNOTSUPP)
M0_INTERNAL uint32_t index_within_level(const struct m0_varr *arr, uint64_t target_idx, uint32_t depth)
Definition: varr.c:417
M0_INTERNAL const struct m0_bob_type varr_bobtype
Definition: varr.c:42
uint64_t va_bufptr_nr
Definition: varr.h:148
M0_INTERNAL uint64_t m0_varr_size(const struct m0_varr *arr)
Definition: varr.c:567
#define M0_ASSERT(cond)
M0_INTERNAL uint8_t nearest_power_of_two(size_t num)
Definition: varr.c:157
M0_EXTERN void m0_varr_buf_free(void *buf, size_t bufsize)
Definition: varr.c:38
uint32_t va_depth
Definition: varr.h:138
M0_INTERNAL void * m0_varr_cursor_get(struct m0_varr_cursor *cursor)
Definition: varr.c:316
uint32_t vp_idx
Definition: varr.h:103
M0_INTERNAL int m0_varr_init(struct m0_varr *arr, uint64_t nr, size_t size, size_t bufsize)
Definition: varr.c:114
static int next[]
Definition: cp.c:248
M0_BOB_DEFINE(M0_INTERNAL, &varr_bobtype, m0_varr)
M0_INTERNAL uint32_t children_of_level(const struct m0_varr *arr, uint32_t level)
Definition: varr.c:301
M0_INTERNAL void varr_buffers_dealloc(struct m0_varr *arr)
Definition: varr.c:459
M0_INTERNAL int m0_varr_cursor_next(struct m0_varr_cursor *cursor)
Definition: varr.c:322
static uint64_t min64u(uint64_t a, uint64_t b)
Definition: arith.h:66
M0_INTERNAL int varr_buffers_alloc(struct m0_varr *arr)
Definition: varr.c:237
M0_INTERNAL uint64_t inc_for_next_level(const struct m0_varr_cursor *cursor, uint64_t carry, uint32_t depth)
Definition: varr.c:396
#define safe_bitshift(num, shift, operator)
Definition: varr.c:106
uint64_t n
Definition: fops.h:107
uint32_t va_failure_depth
Definition: varr.h:161
M0_INTERNAL void * buff_incr(const struct m0_varr *arr, uint32_t depth, void *buff, uint32_t inc)
Definition: varr.c:444
M0_INTERNAL void m0_varr_fini(struct m0_varr *arr)
Definition: varr.c:486
M0_INTERNAL uint32_t inc_to_idx_xlate(const struct m0_varr_cursor *cursor, uint64_t carry, uint32_t depth)
Definition: varr.c:388
uint8_t va_buf_shift
Definition: varr.h:135
#define M0_ALLOC_PTR(ptr)
Definition: memory.h:86
M0_INTERNAL bool varr_invariant(const struct m0_varr *arr)
Definition: varr.c:498
M0_INTERNAL void * cache_fetch(const struct m0_varr *arr, uint64_t index)
Definition: varr.c:535
m0_bcount_t size
Definition: di.c:39
uint8_t va_bufptr_nr_shift
Definition: varr.h:149
M0_INTERNAL uint8_t log_radix(const struct m0_varr *arr, uint32_t level)
Definition: varr.c:404
M0_INTERNAL uint64_t last_nbits_set(uint8_t n)
Definition: varr.c:437
M0_INTERNAL unsigned long varr_obj_nr_in_buff(const struct m0_varr *arr)
Definition: varr.c:171
uint32_t vp_width
Definition: varr.h:104
M0_INTERNAL uint32_t depth_find(const struct m0_varr *arr, uint64_t buff_nr)
Definition: varr.c:220
Definition: varr.h:121
#define M0_PRE_EX(cond)
M0_INTERNAL void cache_update(struct m0_varr *arr, void *holder, uint64_t start_index)
Definition: varr.c:547
int num
Definition: bulk_if.c:54
void m0_free(void *data)
Definition: memory.c:146
size_t va_bufsize
Definition: varr.h:133
int32_t rc
Definition: trigger_fop.h:47
#define M0_POST_EX(cond)
#define offsetof(typ, memb)
Definition: misc.h:29
uint64_t va_nr
Definition: varr.h:123
M0_INTERNAL void * m0_varr_ele_get(struct m0_varr *arr, uint64_t index)
Definition: varr.c:505
uint8_t va_obj_shift
Definition: varr.h:128