Motr  M0
varr.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2013-2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 
23 #include "lib/vec.h"
24 #include "lib/memory.h"
25 #include "lib/misc.h" /* M0SET0() */
26 #include "lib/arith.h" /* m0_rnd() */
27 #include "lib/ub.h"
28 #include "ut/ut.h"
29 #include "lib/time.h"
30 #include "lib/varr.h"
31 #include "lib/varr_private.h" /* varr_cache */
32 #include "lib/misc.h" /* M0_BITS() */
33 #include "lib/finject.h" /* M0_FI_ENABLED */
34 #ifndef __KERNEL__
35 #include <limits.h> /* CHAR_BIT */
36 #else
37 #include <linux/limits.h>
38 #endif
39 
40 enum data_types {
45 };
46 
48  BUFF_SIZE = 32,
54 };
55 
56 M0_BASSERT(sizeof (uint64_t) == M0_BITS(DT_ATOMIC_64_SHIFT));
57 M0_BASSERT(sizeof (uint8_t) == M0_BITS(DT_ATOMIC_8_SHIFT));
58 
60 /* Following expression has been used to compute the maximum allowable tree
61  * depth under given constraints. In actual computation, SYSTEM_MEMORY has been
62  * assumed to be 1GB.
63  max_depth = log(SYSTEM_MEMORY *
64  (BUFF_SIZE - M0_VA_TNODEPTR_SIZE)/
65  (BUFF_SIZE * M0_VA_TNODEPTR_SIZE*M0_VA_TNODE_NR) + 1))/
66  log (BUFF_SIZE/M0_VA_TNODEPTR_SIZE) + 1;
67 */
68  MAX_DEPTH = 11,
69  MAX_OBJ_NR = 26000,
71  MAX_BUFFERS = 4096,
72 };
73 
75 M0_BASSERT((int)M0_0VEC_ALIGN > (int)BUFF_SIZE);
76 M0_BASSERT(!(M0_0VEC_ALIGN & (M0_0VEC_ALIGN - 1)));
77 
78 /* sizeof struct po2 is an integer power of two. */
79 struct po2 {
80  uint64_t p_x;
81  uint64_t p_y;
82 };
83 M0_BASSERT(sizeof (struct po2) == M0_BITS(DT_POWTWO_SHIFT));
84 /* sizeof struct non_po2 is not an integer power of two. */
85 struct non_po2 {
86  uint16_t np_chksum;
87  uint8_t np_arr[15];
88 };
89 /* Test the case when object size and buffer size are same. */
90 M0_BASSERT(sizeof (struct non_po2) < BUFF_SIZE &&
91  sizeof (struct non_po2) > M0_BITS(BUFF_SHIFT - 1));
92 
93 /* Iterates over arrays with various sizes of objects of type 'dt'. */
94 #define test_iterate(varr, ele, dt, buff_nr) \
95 ({ \
96  struct m0_varr *__varr_ = (varr); \
97  typeof(ele) __ele; \
98  uint64_t __buff_nr = (buff_nr); \
99  uint64_t __nr; \
100  int __rc; \
101  uint64_t __step; \
102  uint32_t __dt = dt; \
103  \
104  M0_ASSERT(size_get(__dt) == sizeof __ele); \
105  M0_SET0(__varr_); \
106  __nr = array_len_compute(__buff_nr, __dt); \
107  __rc = m0_varr_init(__varr_, __nr, size_get(__dt), BUFF_SIZE); \
108  M0_ASSERT(__rc == 0); \
109  m0_varr_iter(__varr_, typeof(__ele), i, obj, 0, \
110  m0_varr_size(__varr_), 1) { \
111  obj_init((void*)obj, i, __dt); \
112  } m0_varr_enditer; \
113  m0_varr_for (__varr_, typeof(__ele), i, obj) { \
114  obj_sanity_check(obj, i, __dt); \
115  } m0_varr_endfor; \
116  for (__step = 2; __step <= __nr; ++__step) { \
117  m0_varr_iter(__varr_, typeof(__ele), i, obj, 0, \
118  m0_varr_size(__varr_), __step) { \
119  obj_sanity_check((void *)obj, i, __dt); \
120  } m0_varr_enditer; \
121  } \
122  m0_varr_fini(__varr_); \
123 })
124 
125 /* Tests init and fini APIs for m0_varr. */
126 static void test_init(void);
127 /* Tests sanity of object and buffer sizes that get stored within
128  * m0_varr. */
129 static void test_size(void);
130 /* Tests tree construction for complete trees of various depths, maximum
131  * depth being max_depth. */
132 static void test_depth(uint32_t max_depth);
133 /* Tests contents of cache present in m0_varr. */
134 static void test_cache(void);
135 /* Iterates over array, for various input objects. */
136 static void test_ut_iterate(uint64_t nr);
137 static void obj_init(void *obj, uint64_t data, enum data_types dt);
138 static void obj_sanity_check(const void *obj, uint64_t data,
139  enum data_types dt);
140 static size_t size_get(enum data_types dt);
141 static uint16_t int_summation(uint8_t n);
142 uint64_t array_len_compute(uint64_t buff_nr, enum data_types dt);
143 uint32_t shift_get(enum data_types dt);
144 static void tree_sanity_check(const struct m0_varr *varr, uint32_t depth);
145 
146 void test_varr(void)
147 {
148  test_init();
149  test_size();
151  test_cache();
153 }
154 
155 static void test_init(void)
156 {
157  struct m0_varr varr;
158  int rc;
159  uint64_t n;
160  m0_time_t seed;
161 
162  M0_SET0(&varr);
164  BUFF_SIZE);
165  M0_UT_ASSERT(rc == 0);
166  m0_varr_fini(&varr);
167 
168  /* Test fault injection. */
169  seed = m0_time_now();
170  n = m0_rnd(MAX_BUFFERS, &seed);
171  m0_fi_enable_off_n_on_m("m0_alloc", "fail_allocation", n, 1);
172  M0_SET0(&varr);
174  BUFF_SIZE);
175  if (rc == 0)
176  m0_varr_fini(&varr);
177  m0_fi_disable("m0_alloc", "fail_allocation");
178 }
179 
180 static void test_size(void)
181 {
182  struct m0_varr varr;
183  int rc;
184  enum data_types dt;
185  size_t obj_size;
186 
187  for (dt = DT_ATOMIC_8; dt <= DT_NON_POWTWO; ++dt) {
188  obj_size = size_get(dt);
189  M0_SET0(&varr);
190  /* Note that input buffer size has been deliberately given as
191  * non power of two */
192  rc = m0_varr_init(&varr, M0_VA_TNODE_NR, obj_size,
193  M0_0VEC_ALIGN - 1);
194  M0_UT_ASSERT(rc == 0);
195  M0_UT_ASSERT(!(varr.va_obj_size & (varr.va_obj_size - 1)) &&
196  obj_size <= varr.va_obj_size &&
197  2 * obj_size > varr.va_obj_size);
198  M0_UT_ASSERT(varr.va_bufsize == M0_0VEC_ALIGN);
199  tree_sanity_check(&varr, 2);
200  m0_varr_fini(&varr);
201  }
202 }
203 
204 static size_t size_get(enum data_types dt)
205 {
206  switch (dt) {
207  case DT_ATOMIC_8:
208  return sizeof (uint8_t);
209  case DT_ATOMIC_64:
210  return sizeof (uint64_t);
211  case DT_POWTWO:
212  return sizeof (struct po2);
213  case DT_NON_POWTWO:
214  return sizeof (struct non_po2);
215  }
216  return 0;
217 
218 }
219 
220 static void tree_sanity_check(const struct m0_varr *varr, uint32_t depth)
221 {
222  uint32_t num_trees;
223  uint64_t buff_nr;
224  uint64_t num_accomodated;
225 
226  for (num_trees = 0; num_trees < M0_VA_TNODE_NR &&
227  varr->va_tree[num_trees] != NULL; ++num_trees)
228  /* do nothing */;
229 
230  /* Maximum possible leaf buffers for a given depth. */
231  buff_nr = M0_BITS(varr->va_bufptr_nr_shift * (varr->va_depth - 2));
232  buff_nr *= num_trees;
233  M0_UT_ASSERT(varr->va_buff_nr <= buff_nr);
234  /* Maximum number of objects that can accomodate in a given number of
235  * leaf-buffers. */
236  num_accomodated = M0_BITS(varr->va_buf_shift - varr->va_obj_shift) *
237  varr->va_buff_nr;
238  M0_UT_ASSERT(num_accomodated >= varr->va_nr);
239  M0_UT_ASSERT(num_accomodated - varr->va_nr <
240  M0_BITS(varr->va_buf_shift - varr->va_obj_shift));
241  M0_UT_ASSERT(varr->va_depth == depth);
242 }
243 
244 static void test_depth(uint32_t max_depth)
245 {
246  struct m0_varr varr;
247  uint64_t nr;
248  int rc;
249  uint32_t depth = 2;
250  uint64_t buff_nr;
251 
252  M0_SET0(&varr);
253  M0_UT_ASSERT(max_depth > 1);
254 
255  /* Test complete trees with various depths, maximum depth being
256  * max_depth. */
257  for (buff_nr = M0_VA_TNODE_NR; depth <= max_depth;
258  buff_nr *= (BUFF_SIZE/M0_VA_TNODEPTR_SIZE), ++depth) {
259  nr = buff_nr * M0_BITS(BUFF_SHIFT - DT_POWTWO_SHIFT);
260  M0_SET0(&varr);
261  rc = m0_varr_init(&varr, nr,
263  M0_UT_ASSERT(rc == 0);
264  tree_sanity_check(&varr, depth);
265  m0_varr_fini(&varr);
266  }
267 }
268 
269 static void test_cache(void)
270 {
271  struct m0_varr varr;
272  uint32_t i;
273  uint32_t obj_per_buff;
274  int rc;
275  void *arr_ele;
276 
277  M0_SET0(&varr);
279  M0_UT_ASSERT(rc == 0);
280  tree_sanity_check(&varr, 6);
281  obj_per_buff = varr.va_bufsize/ varr.va_obj_size;
282  for (i = 0; i < m0_varr_size(&varr); ++i) {
283  arr_ele = m0_varr_ele_get(&varr, i);
284  M0_UT_ASSERT(arr_ele == (void *)varr.va_cache->vc_buff +
285  (i % obj_per_buff) * varr.va_obj_size);
286  }
287  m0_varr_fini(&varr);
288 }
289 
290 static uint16_t int_summation(uint8_t n)
291 {
292  return (n + 1) * n / 2;
293 }
294 
295 void test_ut_iterate(uint64_t buff_nr)
296 {
297  struct m0_varr *varr;
298  struct po2 obj_po2;
299  struct non_po2 obj_non_po2;
300  uint8_t atomic8_obj;
301  uint64_t atomic64_obj;
302 
303  M0_ALLOC_PTR(varr);
304  M0_UT_ASSERT(varr != NULL);
305 
306  test_iterate(varr, obj_po2, DT_POWTWO, buff_nr);
307  test_iterate(varr, obj_non_po2, DT_NON_POWTWO, buff_nr);
308  test_iterate(varr, atomic8_obj, DT_ATOMIC_8, buff_nr);
309  test_iterate(varr, atomic64_obj, DT_ATOMIC_64, buff_nr);
310 
311  m0_free(varr);
312 }
313 
314 void test_ub_iterate(void)
315 {
316  uint32_t depth = 2;
317  uint64_t buff_nr;
318  m0_time_t seed;
319  int i;
320 
321  /* Testing for various leaf buffers. */
322  for (i = 0; i < 100; ++i) {
323  seed = m0_time_now();
324  buff_nr = m0_rnd(MAX_BUFFERS, &seed);
325  test_ut_iterate(buff_nr);
326  }
327 
328  /* Testing complete tree. */
329  for (buff_nr = M0_VA_TNODE_NR; depth <= MAX_DEPTH - 1;
330  buff_nr *= (BUFF_SIZE/M0_VA_TNODEPTR_SIZE), ++depth) {
331  test_ut_iterate(buff_nr);
332  }
333 }
334 
335 uint64_t array_len_compute(uint64_t buff_nr, enum data_types dt)
336 {
337  uint32_t shift = shift_get(dt);
338 
339  return buff_nr * M0_BITS(BUFF_SHIFT - shift);
340 }
341 
342 uint32_t shift_get(enum data_types dt)
343 {
344  switch (dt) {
345  case DT_ATOMIC_8:
346  return DT_ATOMIC_8_SHIFT;
347  case DT_ATOMIC_64:
348  return DT_ATOMIC_64_SHIFT;
349  case DT_POWTWO:
350  return DT_POWTWO_SHIFT;
351  case DT_NON_POWTWO:
352  return DT_NON_POWTWO_SHIFT;
353  }
354  return 0;
355 }
356 
357 static void obj_init(void *obj, uint64_t data, enum data_types dt)
358 {
359  int i;
360  struct po2 *obj_po2;
361  struct non_po2 *obj_non_po2;
362 
363  switch (dt) {
364  case DT_ATOMIC_8:
365  *(uint8_t *)obj = data % UINT8_MAX;
366  break;
367  case DT_ATOMIC_64:
368  *(uint64_t *)obj = data;
369  break;
370  case DT_POWTWO:
371  obj_po2 = (struct po2 *)obj;
372  obj_po2->p_x = data;
373  obj_po2->p_y = data;
374  break;
375  case DT_NON_POWTWO:
376  obj_non_po2 = (struct non_po2 *)obj;
377  obj_non_po2->np_chksum = 0;
378  for (i = 0; i < ARRAY_SIZE(obj_non_po2->np_arr);
379  ++i) {
380  obj_non_po2->np_arr[i] = (data % UINT8_MAX) * i;
381  obj_non_po2->np_chksum += (data % UINT8_MAX) * i;
382  }
383  break;
384  }
385 }
386 
387 static void obj_sanity_check(const void *obj, uint64_t data,
388  enum data_types dt)
389 {
390  struct po2 obj_po2;
391  struct non_po2 obj_non_po2;
392 
393  switch (dt) {
394  case DT_ATOMIC_8:
395  M0_ASSERT( *(uint8_t *)obj == data % UINT8_MAX);
396  break;
397  case DT_ATOMIC_64:
398  M0_ASSERT( *(uint64_t *)obj == data);
399  break;
400  case DT_POWTWO:
401  obj_po2 = *(struct po2 *)obj;
402  M0_ASSERT(obj_po2.p_x == data);
403  M0_ASSERT(obj_po2.p_y == data);
404  break;
405  case DT_NON_POWTWO:
406  obj_non_po2 = *(struct non_po2 *)obj;
407  M0_ASSERT(obj_non_po2.np_chksum == (data % UINT8_MAX)*
408  int_summation(ARRAY_SIZE(obj_non_po2.np_arr) -
409  1));
410  }
411 }
412 enum {
413  UB_ITER = 1,
414 };
415 
416 static void varr_ub(int i)
417 {
418  test_ub_iterate();
419 }
420 
422  .us_name = "varr-ub",
423  .us_init = NULL,
424  .us_fini = NULL,
425  .us_run = {
426  { .ub_name = "varr",
427  .ub_iter = UB_ITER,
428  .ub_round = varr_ub },
429  {.ub_name = NULL }
430  }
431 };
static size_t nr
Definition: dump.c:1505
uint64_t p_x
Definition: varr.c:80
uint64_t va_buff_nr
Definition: varr.h:125
#define NULL
Definition: misc.h:38
M0_BASSERT(sizeof(uint64_t)==M0_BITS(DT_ATOMIC_64_SHIFT))
void * va_tree[M0_VA_TNODE_NR]
Definition: varr.h:157
uint64_t m0_time_t
Definition: time.h:37
static size_t size_get(enum data_types dt)
Definition: varr.c:204
static void test_ut_iterate(uint64_t nr)
Definition: varr.c:295
size_t va_obj_size
Definition: varr.h:126
struct m0_bufvec data
Definition: di.c:40
static void test_init(void)
Definition: varr.c:155
void test_ub_iterate(void)
Definition: varr.c:314
static void test_depth(uint32_t max_depth)
Definition: varr.c:244
#define M0_BITS(...)
Definition: misc.h:236
#define M0_SET0(obj)
Definition: misc.h:64
uint64_t p_y
Definition: varr.c:81
static struct foo * obj
Definition: tlist.c:302
struct varr_cache * va_cache
Definition: varr.h:159
data_types
Definition: varr.c:40
int i
Definition: dir.c:1033
static unsigned depth
Definition: base.c:377
#define UINT8_MAX
Definition: types.h:35
M0_INTERNAL uint64_t m0_rnd(uint64_t max, uint64_t *seed)
Definition: misc.c:115
M0_INTERNAL void m0_fi_disable(const char *fp_func, const char *fp_tag)
Definition: finject.c:485
M0_INTERNAL uint64_t m0_varr_size(const struct m0_varr *arr)
Definition: varr.c:567
#define M0_ASSERT(cond)
void test_varr(void)
Definition: varr.c:146
uint8_t np_arr[15]
Definition: varr.c:87
const char * us_name
Definition: ub.h:76
uint32_t va_depth
Definition: varr.h:138
Definition: varr.c:85
m0_time_t m0_time_now(void)
Definition: time.c:134
static void obj_init(void *obj, uint64_t data, enum data_types dt)
Definition: varr.c:357
M0_INTERNAL int m0_varr_init(struct m0_varr *arr, uint64_t nr, size_t size, size_t bufsize)
Definition: varr.c:114
Definition: varr.c:43
Definition: varr.c:413
#define test_iterate(varr, ele, dt, buff_nr)
Definition: varr.c:94
static uint16_t int_summation(uint8_t n)
Definition: varr.c:290
static void varr_ub(int i)
Definition: varr.c:416
static void test_cache(void)
Definition: varr.c:269
Definition: varr.c:79
uint16_t np_chksum
Definition: varr.c:86
static void m0_fi_enable_off_n_on_m(const char *func, const char *tag, uint32_t n, uint32_t m)
Definition: finject.h:346
unsigned long * vc_buff
Definition: varr_private.h:32
static void obj_sanity_check(const void *obj, uint64_t data, enum data_types dt)
Definition: varr.c:387
uint64_t n
Definition: fops.h:107
M0_INTERNAL void m0_varr_fini(struct m0_varr *arr)
Definition: varr.c:486
uint8_t va_buf_shift
Definition: varr.h:135
struct m0_ub_set m0_varr_ub
Definition: varr.c:421
#define M0_ALLOC_PTR(ptr)
Definition: memory.h:86
uint8_t va_bufptr_nr_shift
Definition: varr.h:149
sizes_and_shifts
Definition: varr.c:47
static void tree_sanity_check(const struct m0_varr *varr, uint32_t depth)
Definition: varr.c:220
static void test_size(void)
Definition: varr.c:180
misc_params
Definition: varr.c:59
Definition: varr.h:121
void m0_free(void *data)
Definition: memory.c:146
size_t va_bufsize
Definition: varr.h:133
Definition: varr.c:48
uint32_t shift_get(enum data_types dt)
Definition: varr.c:342
int32_t rc
Definition: trigger_fop.h:47
#define ARRAY_SIZE(a)
Definition: misc.h:45
Definition: ub.h:74
#define M0_UT_ASSERT(a)
Definition: ut.h:46
uint64_t va_nr
Definition: varr.h:123
Definition: varr.c:68
M0_INTERNAL void * m0_varr_ele_get(struct m0_varr *arr, uint64_t index)
Definition: varr.c:505
uint8_t va_obj_shift
Definition: varr.h:128
uint64_t array_len_compute(uint64_t buff_nr, enum data_types dt)
Definition: varr.c:335