Motr  M0
alloc.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2013-2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 
23 #define M0_TRACE_SUBSYSTEM M0_TRACE_SUBSYS_BE
24 #include "lib/trace.h"
25 
26 #include "be/alloc.h"
27 #include "be/alloc_internal.h"
28 #include "be/seg_internal.h" /* m0_be_seg_hdr */
29 #include "be/tx.h" /* M0_BE_TX_CAPTURE_PTR */
30 #include "be/op.h" /* m0_be_op */
31 #include "lib/memory.h" /* m0_addr_is_aligned */
32 #include "lib/errno.h" /* ENOSPC */
33 #include "lib/misc.h" /* memset, M0_BITS, m0_forall */
34 #include "motr/magic.h"
35 #include "be/domain.h" /* m0_be_domain */
36 
37 /*
38  * @addtogroup be
39  * @todo make a doxygen page
40  *
41  * Overview
42  *
43  * Allocator provides API to allocate and free memory in BE segment.
44  *
45  * Definitions
46  *
47  * - allocator segment - BE segment which is used as memory for allocations;
48  * - allocator space - part of memory inside the allocator segment in which
49  * all allocations will take place. It may be smaller than segment itself;
50  * - chunk - memory structure that contains allocator data and user data.
51  * User data follows allocator data in memory. There is no free space
52  * in memory after allocator data and user data of the same chunk;
53  * - "chunk B located just after chunk A" - there is no free space in memory
54  * after end of user data and in chunk A and before allocator data in chunk B;
55  * - used chunk - chunk for which address of user data was returned
56  * to user from m0_be_alloc() and for which m0_be_free() wasn't called;
57  * - free chunk - chunk that is not used;
58  * - adjacent chunks - A and B are adjacent chunks iff chunk A located
59  * just after B or vice versa;
60  *
61  * API
62  *
63  * - m0_be_alloc() and m0_be_alloc_aligned(): allocate memory;
64  * - m0_be_free() and m0_be_free_aligned(): free memory allocated with
65  * m0_be_alloc() and m0_be_alloc_aligned() respectively;
66  * - m0_be_alloc_stats(): provide some allocator statistics;
67  *
68  * Algorithm
69  *
70  * Allocator has:
71  * - list of all chunks;
72  * - data structure (m0_be_fl) to keep account of free chunks.
73  *
74  * m0_be_alloc_aligned()
75  * - uses m0_be_fl to pick free chunk that meets alignment and size requirement;
76  * - splits free chunk to have wasted space as little as possible.
77  *
78  * m0_be_free_aligned()
79  * - merges free chunk with adjacent chunks if they are free, so at most 2 merge
80  * operations takes place.
81  *
82  * m0_be_alloc(), m0_be_free()
83  * - they are calls to m0_be_alloc_aligned() and m0_be_free_aligned with
84  * M0_BE_ALLOC_SHIFT_MIN alignment shift.
85  *
86  * Allocator space invariants:
87  * - Each byte of allocator space belongs to a chunk. There is one exception -
88  * if there is no space for chunk with at least 1 byte of user data from
89  * the beginning of allocator space to other chunk then this space is
90  * temporary unused;
91  *
92  * Chunk invariants:
93  * - all chunks are mutually disjoint;
94  * - chunk is entirely inside the allocator space;
95  * - each chunk is either free or used;
96  * - user data in chunk located just after allocator data;
97  *
98  * List of all chunks invariants:
99  * - all chunks in a list are ordered by address;
100  * - every chunk is in the list;
101  * - chunks that are adjacent in the list are adjacent in allocator space.
102  *
103  * Special cases
104  *
105  * Chunk split in be_alloc_chunk_split()
106  *
107  * @verbatim
108  * | | | | | | | |
109  * +---------------+ +----------+ +--------+ | |
110  * | prev | | prev | | | | |
111  * +---------------+ +----------+ | prev | | | < start0
112  * | | | chunk0 | | | | |
113  * | | +----------+ +--------+ +--------+ < start_new
114  * | c | | | | | | |
115  * | | | new | | new | | new |
116  * | | | | | | | |
117  * | | +----------+ +--------+ | | < start1
118  * | | | chunk1 | | | | |
119  * +---------------+ +----------+ | | +--------+ < start_next
120  * | next | | next | | | | next |
121  * +---------------+ +----------+ | | +--------+
122  * | | | | | | | |
123  *
124  * initial position after split no space no space
125  * for chunk0 for chunk1
126  * @endverbatim
127  *
128  * Free chunks merge if it is possible in be_alloc_chunk_trymerge()
129  *
130  * @verbatim
131  * | | | |
132  * +----------+ +----------+
133  * | | | |
134  * | a | | |
135  * | | | |
136  * | | | |
137  * +----------+ | |
138  * | | | a |
139  * | | | |
140  * | | | |
141  * | b | | |
142  * | | | |
143  * | | | |
144  * | | | |
145  * +----------+ +----------+
146  * | | | |
147  * @endverbatim
148  *
149  * Time and I/O complexity.
150  * - m0_be_alloc_aligned() has the same time complexity and I/O complexity as
151  * m0_be_fl_pick();
152  * - m0_be_free_aligned() has time complexity and I/O complexity O(1) -
153  * the same as m0_be_fl_add() and m0_be_fl_del();
154  * - m0_be_alloc_aligned() and m0_be_free_aligned() has optimisations for
155  * M0_BE_ALLOC_SHIFT_MIN alignment shift so if there is no special
156  * requirements for alignment it's better to use m0_be_alloc() and
157  * m0_be_free().
158  *
159  * Limitations
160  * - allocator can use only one BE segment;
161  * - allocator can use only continuous allocator space;
162  *
163  * Known issues
164  * - all allocator functions are fully synchronous now despite the fact that
165  * they have m0_be_op parameter;
166  * - m0_be_allocator_stats are unused;
167  * - allocator credit includes 2 * size requested for alignment shift greater
168  * than M0_BE_ALLOC_SHIFT_MIN;
169  * - it is not truly O(1) allocator; see m0_be_fl documentation for explanation;
170  * - there is one big allocator lock that protects all allocations/deallocation.
171  *
172  * Locks
173  * Allocator lock (m0_mutex) is used to protect all allocator data.
174  *
175  * Space reservation for DIX recovery
176  * ----------------------------------
177  *
178  * Space is reserved to not get -ENOSPC during DIX repair.
179  *
180  * A special allocator zone M0_BAP_REPAIR is introduced for a memory allocated
181  * during DIX repair. This zone is specified in zone mask in
182  * m0_be_alloc_aligned() and its callers up to functions which may be called
183  * during repair (from m0_dix_cm_cp_write()). Functions/macro which are never
184  * called during repair pass always M0_BITS(M0_BAP_NORMAL) as zone mask.
185  *
186  * In fact, each zone is implemented as independent space in BE segment
187  * represented by own m0_be_allocator_header.
188  *
189  * Repair uses all available space in the allocator while normal alloc fails
190  * if free space is less than reserved. Repair uses M0_BAP_REPAIR zone by
191  * default, but if there is no space in M0_BAP_REPAIR, then memory will be
192  * allocated in M0_BAP_NORMAL zone.
193  *
194  * The space is reserved for repair zone during m0mkfs in
195  * m0_be_allocator_create(). Percentage of free space is passed to
196  * m0_be_allocator_create() via 'zone_percent' argument which is assigned in
197  * cs_be_init(). The space is reserved in terms of bytes, not memory region, so
198  * fragmentation can prevent successful allocation from reserved space if there
199  * is no contiguous memory block with requested size.
200  *
201  * Repair zone is not accounted in df output.
202  */
203 
204 /*
205  * @addtogroup be
206  *
207  * @{
208  */
209 
210 enum {
215 };
216 
217 M0_BE_LIST_DESCR_DEFINE(chunks_all, "list of all chunks in m0_be_allocator",
218  static, struct be_alloc_chunk, bac_linkage, bac_magic,
220 M0_BE_LIST_DEFINE(chunks_all, static, struct be_alloc_chunk);
221 
223 {
224  static const char *zone_names[] = {
225  [M0_BAP_REPAIR] = "repair",
226  [M0_BAP_NORMAL] = "normal"
227  };
228 
229  M0_CASSERT(ARRAY_SIZE(zone_names) == M0_BAP_NR);
230  M0_PRE(type < M0_BAP_NR);
231  return zone_names[type];
232 }
233 
234 static void
236 {
237  *cstat = (struct m0_be_allocator_call_stat){
238  .bcs_nr = 0,
239  .bcs_size = 0,
240  };
241 }
242 
244 {
248 }
249 
251  struct m0_be_allocator_header *h)
252 {
253  *stats = (struct m0_be_allocator_stats){
254  .bas_chunk_overhead = sizeof(struct be_alloc_chunk),
256  .bas_print_interval = M0_BE_ALLOCATOR_STATS_PRINT_INTERVAL,
257  .bas_print_index = 0,
258  .bas_space_total = h->bah_size,
259  .bas_space_free = h->bah_size,
260  .bas_space_used = 0,
261  };
262  be_allocator_call_stats_init(&stats->bas_total);
263  be_allocator_call_stats_init(&stats->bas_stat0);
264  be_allocator_call_stats_init(&stats->bas_stat1);
265 }
266 
267 static void
269  unsigned long nr,
271 {
272  cstat->bcs_nr += nr;
273  cstat->bcs_size += size;
274 }
275 
276 static void
279  bool alloc,
280  bool failed)
281 {
282  struct m0_be_allocator_call_stat *cstat;
283  if (alloc && failed) {
284  cstat = &cs->bacs_alloc_failure;
285  } else if (alloc) {
286  cstat = &cs->bacs_alloc_success;
287  } else {
288  cstat = &cs->bacs_free;
289  }
291 }
292 
293 static void
295  const char *descr)
296 {
297 #define P_ACS(acs) (acs)->bcs_nr, (acs)->bcs_size
298  M0_LOG(M0_DEBUG, "%s (nr, size): alloc_success=(%lu, %" PRIu64 "), "
299  "free=(%lu, %" PRIu64 "), alloc_failure=(%lu, %" PRIu64 ")", descr,
301  P_ACS(&cs->bacs_alloc_failure));
302 #undef P_ACS
303 }
304 
306 {
307  M0_LOG(M0_DEBUG, "stats=%p chunk_overhead=%" PRIu64 " boundary=%" PRIu64 " "
308  "print_interval=%lu print_index=%lu",
309  stats, stats->bas_chunk_overhead, stats->bas_stat0_boundary,
310  stats->bas_print_interval, stats->bas_print_index);
311  M0_LOG(M0_DEBUG, "chunks=%" PRIu64 " free_chunks=%"PRIu64,
312  stats->bas_chunks_nr, stats->bas_free_chunks_nr);
313  be_allocator_call_stats_print(&stats->bas_total, " total");
314  be_allocator_call_stats_print(&stats->bas_stat0, "size <= boundary");
315  be_allocator_call_stats_print(&stats->bas_stat1, "size > boundary");
316 }
317 
320  bool alloc,
321  bool failed)
322 {
323  unsigned long space_change;
324  long multiplier;
325 
326  M0_PRE(ergo(failed, alloc));
327 
328  multiplier = failed ? 0 : alloc ? 1 : -1;
329  space_change = size + stats->bas_chunk_overhead;
330  stats->bas_space_used += multiplier * space_change;
331  stats->bas_space_free -= multiplier * space_change;
332  be_allocator_call_stats_update(&stats->bas_total, size, alloc, failed);
334  &stats->bas_stat0 : &stats->bas_stat1,
335  size, alloc, failed);
336  if (stats->bas_print_index++ == stats->bas_print_interval) {
338  stats->bas_print_index = 0;
339  }
340 }
341 
343  enum m0_be_alloc_zone_type ztype,
344  struct m0_be_tx *tx)
345 {
346  struct m0_be_allocator_header *h = a->ba_h[ztype];
347 
348  if (tx != NULL)
350 }
351 
353  struct m0_be_tx *tx,
354  struct be_alloc_chunk *c)
355 {
356  if (tx != NULL && c != NULL)
357  M0_BE_TX_CAPTURE_PTR(a->ba_seg, tx, c);
358 }
359 
360 static void be_alloc_free_flag_capture(const struct m0_be_allocator *a,
361  struct m0_be_tx *tx,
362  struct be_alloc_chunk *c)
363 {
364  if (tx != NULL)
365  M0_BE_TX_CAPTURE_PTR(a->ba_seg, tx, &c->bac_free);
366 }
367 
368 static void be_alloc_size_capture(const struct m0_be_allocator *a,
369  struct m0_be_tx *tx,
370  struct be_alloc_chunk *c)
371 {
372  if (tx != NULL)
373  M0_BE_TX_CAPTURE_PTR(a->ba_seg, tx, &c->bac_size);
374 }
375 
376 static bool be_alloc_mem_is_in(const struct m0_be_allocator *a,
377  enum m0_be_alloc_zone_type ztype,
378  const void *ptr, m0_bcount_t size)
379 {
380  struct m0_be_allocator_header *h = a->ba_h[ztype];
381 
382  return ptr >= h->bah_addr &&
383  ptr + size <= h->bah_addr + h->bah_size;
384 }
385 
386 static bool be_alloc_chunk_is_in(const struct m0_be_allocator *a,
387  enum m0_be_alloc_zone_type ztype,
388  const struct be_alloc_chunk *c)
389 {
390  return be_alloc_mem_is_in(a, ztype, c, sizeof *c + c->bac_size);
391 }
392 
394  const struct be_alloc_chunk *b)
395 {
396 #if 0
397  return a == NULL || b == NULL ||
398  (a < b && &a->bac_mem[a->bac_size] <= (char *) b);
399 #else
400  return a == NULL || b == NULL ||
401  (a < b && &a->bac_mem[a->bac_size] <= (char *) b) ||
402  (b < a && &b->bac_mem[b->bac_size] <= (char *) a);
403 #endif
404 }
405 
407  const struct be_alloc_chunk *c)
408 {
409  enum m0_be_alloc_zone_type ztype = c->bac_zone;
410  struct m0_be_allocator_header *h;
411  struct be_alloc_chunk *cprev;
412  struct be_alloc_chunk *cnext;
413 
414  M0_PRE(ztype < M0_BAP_NR);
415 
416  h = a->ba_h[ztype];
417  cprev = chunks_all_be_list_prev(&h->bah_chunks, c);
418  cnext = chunks_all_be_list_next(&h->bah_chunks, c);
419 
420  return _0C(c != NULL) &&
421  _0C(be_alloc_chunk_is_in(a, ztype, c)) &&
423  _0C(ergo(cnext != NULL,
424  be_alloc_chunk_is_in(a, ztype, cnext))) &&
425  _0C(ergo(cprev != NULL,
426  be_alloc_chunk_is_in(a, ztype, cprev))) &&
427  _0C(c->bac_magic0 == M0_BE_ALLOC_MAGIC0) &&
428  _0C(c->bac_magic1 == M0_BE_ALLOC_MAGIC1) &&
431 }
432 
433 static void be_alloc_chunk_init(struct m0_be_allocator *a,
434  enum m0_be_alloc_zone_type ztype,
435  struct m0_be_tx *tx,
436  struct be_alloc_chunk *c,
437  m0_bcount_t size, bool free)
438 {
439  *c = (struct be_alloc_chunk) {
440  .bac_magic0 = M0_BE_ALLOC_MAGIC0,
441  .bac_size = size,
442  .bac_free = free,
443  .bac_zone = ztype,
444  .bac_magic1 = M0_BE_ALLOC_MAGIC1,
445  };
446  chunks_all_be_tlink_create(c, tx);
447  /*
448  * Move this right before m0_be_tlink_create() to optimize capturing
449  * size. Chunk capturing at the end of the function will help with
450  * debugging credit calculation errors with current regmap
451  * implementation.
452  */
453  be_alloc_chunk_capture(a, tx, c);
454 }
455 
457  enum m0_be_alloc_zone_type ztype,
458  struct m0_be_tx *tx,
459  struct be_alloc_chunk *c)
460 {
461  struct m0_be_allocator_header *h = a->ba_h[ztype];
462 
464  M0_PRE(c->bac_zone == ztype);
465 
466  m0_be_fl_del(&h->bah_fl, tx, c);
467 
468  chunks_all_be_list_del(&h->bah_chunks, tx, c);
469  chunks_all_be_tlink_destroy(c, tx);
470 }
471 
473 {
474  return container_of(ptr, struct be_alloc_chunk, bac_mem);
475 }
476 
477 static struct be_alloc_chunk *
479  enum m0_be_alloc_zone_type ztype,
480  struct be_alloc_chunk *c)
481 {
482  struct m0_be_allocator_header *h = a->ba_h[ztype];
483  struct be_alloc_chunk *r;
484 
485  M0_PRE(c->bac_zone == ztype);
486 
487  r = chunks_all_be_list_prev(&h->bah_chunks, c);
489  return r;
490 }
491 
492 static struct be_alloc_chunk *
494  enum m0_be_alloc_zone_type ztype,
495  struct be_alloc_chunk *c)
496 {
497  struct m0_be_allocator_header *h = a->ba_h[ztype];
498  struct be_alloc_chunk *r;
499 
500  M0_PRE(c->bac_zone == ztype);
501 
502  r = chunks_all_be_list_next(&h->bah_chunks, c);
504  return r;
505 }
506 
508  enum m0_be_alloc_zone_type ztype,
509  struct m0_be_tx *tx,
510  struct be_alloc_chunk *c)
511 {
512  struct m0_be_allocator_header *h = a->ba_h[ztype];
513 
515  M0_PRE(!c->bac_free);
516  M0_PRE(c->bac_zone == ztype);
517 
518  m0_be_fl_add(&h->bah_fl, tx, c);
519  c->bac_free = true;
521 
522  M0_POST(c->bac_free);
524 }
525 
526 static uintptr_t be_alloc_chunk_after(struct m0_be_allocator *a,
527  enum m0_be_alloc_zone_type ztype,
528  struct be_alloc_chunk *c)
529 {
530  struct m0_be_allocator_header *h = a->ba_h[ztype];
531 
532  M0_PRE(ergo(c != NULL, c->bac_zone == ztype));
533 
534  return c == NULL ? (uintptr_t) h->bah_addr :
535  (uintptr_t) &c->bac_mem[c->bac_size];
536 }
537 
539 static struct be_alloc_chunk *
541  enum m0_be_alloc_zone_type ztype,
542  struct m0_be_tx *tx,
543  struct be_alloc_chunk *c,
544  uintptr_t offset,
545  m0_bcount_t size_total,
546  bool free)
547 {
548  struct m0_be_allocator_header *h = a->ba_h[ztype];
549  struct be_alloc_chunk *new;
550 
552  M0_PRE(size_total > sizeof *new);
553  M0_PRE(ergo(c != NULL, c->bac_zone == ztype));
554 
555  new = c == NULL ? (struct be_alloc_chunk *)
556  ((uintptr_t) h->bah_addr + offset) :
557  (struct be_alloc_chunk *)
558  be_alloc_chunk_after(a, ztype, c);
559  be_alloc_chunk_init(a, ztype, tx, new, size_total - sizeof *new, free);
560 
561  if (c != NULL)
562  chunks_all_be_list_add_after(&h->bah_chunks, tx, c, new);
563  else
564  chunks_all_be_list_add(&h->bah_chunks, tx, new);
565 
566  if (free)
567  m0_be_fl_add(&h->bah_fl, tx, new);
568 
571  return new;
572 }
573 
575  enum m0_be_alloc_zone_type ztype,
576  struct m0_be_tx *tx,
577  struct be_alloc_chunk *c,
578  m0_bcount_t new_size)
579 {
580  struct m0_be_allocator_header *h = a->ba_h[ztype];
581 
582  M0_PRE(c->bac_zone == ztype);
583 
584  if (c->bac_free)
585  m0_be_fl_del(&h->bah_fl, tx, c);
586  c->bac_size = new_size;
587  if (c->bac_free)
588  m0_be_fl_add(&h->bah_fl, tx, c);
589  be_alloc_size_capture(a, tx, c);
590 }
591 
592 static struct be_alloc_chunk *
594  enum m0_be_alloc_zone_type ztype,
595  struct m0_be_tx *tx,
596  struct be_alloc_chunk *c,
597  uintptr_t offset,
598  m0_bcount_t size_total)
599 {
600  if (size_total <= sizeof *c) {
601  if (c != NULL) {
602  be_alloc_chunk_resize(a, ztype, tx, c,
603  c->bac_size + size_total);
604  } else
605  ; /* space before the first chunk is temporary lost */
606  } else {
607  c = be_alloc_chunk_add_after(a, ztype, tx, c,
608  offset, size_total, true);
609  }
610  return c;
611 }
612 
613 static struct be_alloc_chunk *
615  enum m0_be_alloc_zone_type ztype,
616  struct m0_be_tx *tx,
617  struct be_alloc_chunk *c,
618  uintptr_t start_new,
620 {
621  struct be_alloc_chunk *prev;
622  struct be_alloc_chunk *new;
623  uintptr_t start0;
624  uintptr_t start1;
625  uintptr_t start_next;
626  m0_bcount_t size_min_aligned;
627  m0_bcount_t chunk0_size;
628  m0_bcount_t chunk1_size;
629 
631  M0_PRE(c->bac_free);
632 
633  size_min_aligned = m0_align(size, 1UL << M0_BE_ALLOC_SHIFT_MIN);
634 
635  prev = be_alloc_chunk_prev(a, ztype, c);
636 
637  start0 = be_alloc_chunk_after(a, ztype, prev);
638  start1 = start_new + sizeof *new + size_min_aligned;
639  start_next = be_alloc_chunk_after(a, ztype, c);
640  chunk0_size = start_new - start0;
641  chunk1_size = start_next - start1;
642  M0_ASSERT(start0 <= start_new);
643  M0_ASSERT(start_new <= start1);
644  M0_ASSERT(start1 <= start_next);
645 
646  be_alloc_chunk_del_fini(a, ztype, tx, c);
647  /* c is not a valid chunk now */
648 
649  prev = be_alloc_chunk_tryadd_free_after(a, ztype, tx, prev, 0,
650  chunk0_size);
651  new = be_alloc_chunk_add_after(a, ztype, tx, prev,
652  prev == NULL ? chunk0_size : 0,
653  sizeof *new + size_min_aligned, false);
654  M0_ASSERT(new != NULL);
655  be_alloc_chunk_tryadd_free_after(a, ztype, tx, new, 0, chunk1_size);
656 
657  M0_POST(!new->bac_free);
658  M0_POST(new->bac_size >= size);
660  return new;
661 }
662 
663 static struct be_alloc_chunk *
665  enum m0_be_alloc_zone_type ztype,
666  struct m0_be_tx *tx,
667  struct be_alloc_chunk *c,
668  m0_bcount_t size, unsigned shift)
669 {
670  struct be_alloc_chunk *result = NULL;
671  uintptr_t alignment = 1UL << shift;
672  uintptr_t addr_mem;
673  uintptr_t addr_start;
674  uintptr_t addr_end;
675 
677  M0_PRE(alignment != 0);
678  if (c->bac_free) {
679  addr_start = (uintptr_t) c;
680  addr_end = (uintptr_t) &c->bac_mem[c->bac_size];
681  /* find aligned address for memory block */
682  addr_mem = addr_start + sizeof *c + alignment - 1;
683  addr_mem &= ~(alignment - 1);
684  /* if block fits inside free chunk */
685  result = addr_mem + size <= addr_end ?
686  be_alloc_chunk_split(a, ztype, tx, c,
687  addr_mem - sizeof *c, size) : NULL;
688  }
689  M0_POST(ergo(result != NULL, be_alloc_chunk_invariant(a, result)));
690  return result;
691 }
692 
694  enum m0_be_alloc_zone_type ztype,
695  struct m0_be_tx *tx,
696  struct be_alloc_chunk *x,
697  struct be_alloc_chunk *y)
698 {
699  m0_bcount_t y_size_total;
700  bool chunks_were_merged = false;
701 
704  M0_PRE(ergo(x != NULL && y != NULL, (char *) x < (char *) y));
705  M0_PRE(ergo(x != NULL, x->bac_free) || ergo(y != NULL, y->bac_free));
706  if (x != NULL && y != NULL && x->bac_free && y->bac_free) {
707  y_size_total = sizeof *y + y->bac_size;
708  be_alloc_chunk_del_fini(a, ztype, tx, y);
709  be_alloc_chunk_resize(a, ztype, tx, x,
710  x->bac_size + y_size_total);
711  chunks_were_merged = true;
712  }
714  M0_POST(ergo(y != NULL && !chunks_were_merged,
715  be_alloc_chunk_invariant(a, y)));
716  return chunks_were_merged;
717 }
718 
719 M0_INTERNAL int m0_be_allocator_init(struct m0_be_allocator *a,
720  struct m0_be_seg *seg)
721 {
722  struct m0_be_seg_hdr *seg_hdr;
723  int i;
724 
725  M0_ENTRY("a=%p seg=%p seg->bs_addr=%p seg->bs_size=%"PRId64,
726  a, seg, seg->bs_addr, seg->bs_size);
727 
729 
730  /* See comment in m0_be_btree_init(). */
731  M0_SET0(&a->ba_lock);
732  m0_mutex_init(&a->ba_lock);
733 
734  a->ba_seg = seg;
735  seg_hdr = (struct m0_be_seg_hdr *)seg->bs_addr;
736  for (i = 0; i < M0_BAP_NR; ++i) {
737  a->ba_h[i] = &seg_hdr->bh_alloc[i];
740  }
741 
742  return 0;
743 }
744 
745 M0_INTERNAL void m0_be_allocator_fini(struct m0_be_allocator *a)
746 {
747  int i;
748 
749  M0_ENTRY("a=%p", a);
750 
751  for (i = 0; i < M0_BAP_NR; ++i)
753  m0_mutex_fini(&a->ba_lock);
754 
755  M0_LEAVE();
756 }
757 
758 M0_INTERNAL bool m0_be_allocator__invariant(struct m0_be_allocator *a)
759 {
760  return m0_mutex_is_locked(&a->ba_lock) &&
761  (true || /* XXX Disabled as it's too slow. */
762  m0_forall(z, M0_BAP_NR,
763  m0_be_list_forall(chunks_all, iter,
764  &a->ba_h[z]->bah_chunks,
765  be_alloc_chunk_invariant(a, iter))));
766 }
767 
769  enum m0_be_alloc_zone_type ztype,
770  struct m0_be_tx *tx,
771  uintptr_t offset,
773 {
774  struct m0_be_allocator_header *h = a->ba_h[ztype];
775  struct be_alloc_chunk *c;
776 
777  M0_PRE(ztype < M0_BAP_NR);
778 
779  if (size != 0 && size < sizeof *c + 1)
780  return M0_ERR(-ENOSPC);
781 
782  h->bah_addr = (void *)offset;
783  h->bah_size = size;
784  M0_BE_TX_CAPTURE_PTR(a->ba_seg, tx, &h->bah_addr);
785  M0_BE_TX_CAPTURE_PTR(a->ba_seg, tx, &h->bah_size);
786 
787  chunks_all_be_list_create(&h->bah_chunks, tx);
788  m0_be_fl_create(&h->bah_fl, tx, a->ba_seg);
790  be_allocator_stats_capture(a, ztype, tx);
791 
792  /* init main chunk */
793  if (size != 0) {
794  c = be_alloc_chunk_add_after(a, ztype, tx, NULL, 0, size, true);
795  M0_ASSERT(c != NULL);
796  }
797  return 0;
798 }
799 
801  enum m0_be_alloc_zone_type ztype,
802  struct m0_be_tx *tx)
803 {
804  struct m0_be_allocator_header *h = a->ba_h[ztype];
805  struct be_alloc_chunk *c;
806 
807  /*
808  * We destroy allocator when all objects are de-allocated. Therefore,
809  * bah_chunks contains only 1 element. The list is empty for an unused
810  * zone (bah_size == 0).
811  */
812  c = chunks_all_be_list_head(&h->bah_chunks);
813  M0_ASSERT(equi(c == NULL, h->bah_size == 0));
814  if (c != NULL)
815  be_alloc_chunk_del_fini(a, ztype, tx, c);
816 
817  m0_be_fl_destroy(&h->bah_fl, tx);
818  chunks_all_be_list_destroy(&h->bah_chunks, tx);
819 }
820 
821 M0_INTERNAL int m0_be_allocator_create(struct m0_be_allocator *a,
822  struct m0_be_tx *tx,
823  uint32_t *zone_percent,
824  uint32_t zones_nr)
825 {
826  m0_bcount_t reserved;
827  m0_bcount_t free_space;
828  m0_bcount_t remain;
830  uintptr_t offset;
831  int i;
832  int z;
833  int rc;
834 
835  M0_ENTRY("a=%p tx=%p", a, tx);
836  M0_PRE(zones_nr <= M0_BAP_NR);
837  M0_PRE(m0_reduce(i, zones_nr, 0ULL, + zone_percent[i]) == 100);
838 
839  reserved = m0_be_seg_reserved(a->ba_seg);
840  free_space = a->ba_seg->bs_size - reserved;
841  offset = (uintptr_t)a->ba_seg->bs_addr + reserved;
842 
843  m0_mutex_lock(&a->ba_lock);
844 
845  remain = free_space;
846  for (i = 0; i < zones_nr; ++i) {
847  if (i < zones_nr - 1) {
848  size = free_space * zone_percent[i] / 100;
850  } else
851  size = remain;
852  M0_ASSERT(size <= remain);
854  if (rc != 0) {
855  for (z = 0; z < i; ++z)
856  be_allocator_header_destroy(a, z, tx);
858  return M0_RC(rc);
859  }
860  remain -= size;
861  offset += size;
862  }
863  M0_ASSERT(remain == 0);
864 
865  /* Create the rest of zones as empty/unused. */
866  for (i = zones_nr; i < M0_BAP_NR; ++i) {
867  rc = be_allocator_header_create(a, i, tx, 0, 0);
868  M0_ASSERT(rc == 0);
869  }
870 
871  M0_LOG(M0_DEBUG, "free_space=%"PRIu64, free_space);
872  for (i = 0; i < zones_nr; ++i)
873  M0_LOG(M0_DEBUG, "%s zone size=%"PRIu64,
875 
878 
879  M0_LEAVE();
880  return 0;
881 }
882 
883 M0_INTERNAL void m0_be_allocator_destroy(struct m0_be_allocator *a,
884  struct m0_be_tx *tx)
885 {
886  int z;
887 
888  M0_ENTRY("a=%p tx=%p", a, tx);
889 
890  m0_mutex_lock(&a->ba_lock);
892 
893  for (z = 0; z < M0_BAP_NR; ++z)
894  be_allocator_header_destroy(a, z, tx);
895 
897  M0_LEAVE();
898 }
899 
900 M0_INTERNAL void m0_be_allocator_credit(struct m0_be_allocator *a,
901  enum m0_be_allocator_op optype,
903  unsigned shift,
904  struct m0_be_tx_credit *accum)
905 {
906  /*
907  * XXX `a' can be NULL, &(struct m0_be_seg)(0)->bs_allocator or
908  * uninitialised value. Use h == NULL to avoid dereferencing in the
909  * above cases.
910  */
911  struct m0_be_allocator_header *h = NULL;
912  struct m0_be_tx_credit cred_list_create = {};
913  struct m0_be_tx_credit cred_list_destroy = {};
914  struct m0_be_tx_credit chunk_add_after_credit;
915  struct m0_be_tx_credit chunk_del_fini_credit;
916  struct m0_be_tx_credit chunk_trymerge_credit = {};
917  struct m0_be_tx_credit chunk_resize_credit = {};
918  struct m0_be_tx_credit tryadd_free_after_credit;
919  struct m0_be_tx_credit cred_mark_free = {};
920  struct m0_be_tx_credit cred_split = {};
921  struct m0_be_tx_credit mem_zero_credit;
922  struct m0_be_tx_credit chunk_credit;
923  struct m0_be_tx_credit cred_allocator = {};
924  struct m0_be_tx_credit cred_free_flag;
925  struct m0_be_tx_credit cred_chunk_size;
926  struct m0_be_tx_credit stats_credit;
927  struct m0_be_tx_credit tmp;
928  struct be_alloc_chunk chunk;
929 
930  chunk_credit = M0_BE_TX_CREDIT_TYPE(struct be_alloc_chunk);
931  cred_free_flag = M0_BE_TX_CREDIT_PTR(&chunk.bac_free);
932  cred_chunk_size = M0_BE_TX_CREDIT_PTR(&chunk.bac_size);
933  stats_credit = M0_BE_TX_CREDIT_PTR(&h->bah_stats);
934 
935  m0_be_tx_credit_add(&cred_allocator,
937  m0_be_tx_credit_add(&cred_allocator,
939 
940  shift = max_check(shift, (unsigned) M0_BE_ALLOC_SHIFT_MIN);
941  mem_zero_credit = M0_BE_TX_CREDIT(1, size);
942 
943  chunks_all_be_list_credit(M0_BLO_CREATE, 1, &cred_list_create);
944  chunks_all_be_list_credit(M0_BLO_DESTROY, 1, &cred_list_destroy);
945 
946  tmp = M0_BE_TX_CREDIT(0, 0);
947  chunks_all_be_list_credit(M0_BLO_TLINK_CREATE, 1, &tmp);
948  m0_be_tx_credit_add(&tmp, &chunk_credit);
949  chunks_all_be_list_credit(M0_BLO_ADD, 1, &tmp);
950  m0_be_fl_credit(&h->bah_fl, M0_BFL_ADD, &tmp);
951  chunk_add_after_credit = tmp;
952 
953  tmp = M0_BE_TX_CREDIT(0, 0);
954  chunks_all_be_list_credit(M0_BLO_DEL, 1, &tmp);
955  chunks_all_be_list_credit(M0_BLO_TLINK_DESTROY, 1, &tmp);
956  m0_be_fl_credit(&h->bah_fl, M0_BFL_DEL, &tmp);
957  chunk_del_fini_credit = tmp;
958 
959  m0_be_fl_credit(&h->bah_fl, M0_BFL_DEL, &chunk_resize_credit);
960  m0_be_fl_credit(&h->bah_fl, M0_BFL_ADD, &chunk_resize_credit);
961  m0_be_tx_credit_add(&chunk_resize_credit, &cred_chunk_size);
962 
963  m0_be_tx_credit_add(&chunk_trymerge_credit, &chunk_del_fini_credit);
964  m0_be_tx_credit_add(&chunk_trymerge_credit, &chunk_resize_credit);
965 
966  m0_be_tx_credit_max(&tryadd_free_after_credit,
967  &chunk_resize_credit,
968  &chunk_add_after_credit);
969 
970  m0_be_tx_credit_add(&cred_split, &chunk_del_fini_credit);
971  m0_be_tx_credit_mac(&cred_split, &tryadd_free_after_credit, 2);
972  m0_be_tx_credit_mac(&cred_split, &chunk_add_after_credit, 1);
973 
974  m0_be_tx_credit_add(&cred_mark_free, &cred_free_flag);
975  m0_be_fl_credit(&h->bah_fl, M0_BFL_ADD, &cred_mark_free);
976 
977  switch (optype) {
978  case M0_BAO_CREATE:
979  tmp = M0_BE_TX_CREDIT(0, 0);
980  m0_be_tx_credit_mac(&tmp, &cred_list_create, 2);
982  m0_be_tx_credit_add(&tmp, &chunk_add_after_credit);
983  m0_be_tx_credit_add(&tmp, &cred_allocator);
984  m0_be_tx_credit_add(&tmp, &stats_credit);
985  m0_be_tx_credit_mac(accum, &tmp, M0_BAP_NR);
986  break;
987  case M0_BAO_DESTROY:
988  tmp = M0_BE_TX_CREDIT(0, 0);
990  m0_be_tx_credit_add(&tmp, &chunk_del_fini_credit);
991  m0_be_tx_credit_mac(&tmp, &cred_list_destroy, 2);
992  m0_be_tx_credit_mac(accum, &tmp, M0_BAP_NR);
993  break;
995  m0_be_tx_credit_add(accum, &cred_split);
996  m0_be_tx_credit_add(accum, &mem_zero_credit);
997  m0_be_tx_credit_add(accum, &stats_credit);
998  break;
999  case M0_BAO_ALLOC:
1001  M0_BE_ALLOC_SHIFT_MIN, accum);
1002  break;
1003  case M0_BAO_FREE_ALIGNED:
1004  m0_be_tx_credit_add(accum, &cred_mark_free);
1005  m0_be_tx_credit_mac(accum, &chunk_trymerge_credit, 2);
1006  m0_be_tx_credit_add(accum, &stats_credit);
1007  break;
1008  case M0_BAO_FREE:
1010  shift, accum);
1011  break;
1012  default:
1013  M0_IMPOSSIBLE("Invalid allocator operation type");
1014  }
1015  /* XXX FIXME ASAP workaround for allocator/btree/etc. credit bugs */
1016  /* 640 bytes ought to be enough for anybody */
1017  m0_be_tx_credit_add(accum, &M0_BE_TX_CREDIT(40, 640));
1018 }
1019 
1020 M0_INTERNAL void m0_be_alloc_aligned(struct m0_be_allocator *a,
1021  struct m0_be_tx *tx,
1022  struct m0_be_op *op,
1023  void **ptr,
1024  m0_bcount_t size,
1025  unsigned shift,
1026  uint64_t zonemask)
1027 {
1028  enum m0_be_alloc_zone_type ztype;
1029  struct be_alloc_chunk *c = NULL;
1030  m0_bcount_t size_to_pick;
1031  int z;
1032 
1033  shift = max_check(shift, (unsigned) M0_BE_ALLOC_SHIFT_MIN);
1034  M0_ASSERT_INFO(size <= (M0_BCOUNT_MAX - (1UL << shift)) / 2,
1035  "size=%"PRIu64, size);
1036 
1038 
1039  m0_mutex_lock(&a->ba_lock);
1041 
1042  /* algorithm starts here */
1043  size_to_pick = (1UL << shift) - (1UL << M0_BE_ALLOC_SHIFT_MIN) +
1045  for (z = 0; z < M0_BAP_NR; ++z) {
1046  if ((zonemask & M0_BITS(z)) != 0)
1047  c = m0_be_fl_pick(&a->ba_h[z]->bah_fl, size_to_pick);
1048  if (c != NULL)
1049  break;
1050  }
1051  /* XXX If allocation fails then stats are updated for normal zone. */
1052  ztype = c != NULL ? z : M0_BAP_NORMAL;
1053  if (c != NULL) {
1054  c = be_alloc_chunk_trysplit(a, ztype, tx, c, size, shift);
1055  M0_ASSERT(c != NULL);
1056  M0_ASSERT(c->bac_zone == ztype);
1057  memset(&c->bac_mem, 0, size);
1058  m0_be_tx_capture(tx, &M0_BE_REG(a->ba_seg, size, &c->bac_mem));
1059  }
1060  *ptr = c == NULL ? NULL : &c->bac_mem;
1062  c == NULL ? size : c->bac_size, true, c == 0);
1063  be_allocator_stats_capture(a, ztype, tx);
1064  /* and ends here */
1065 
1066  M0_LOG(M0_DEBUG, "allocator=%p size=%" PRIu64 " shift=%u "
1067  "c=%p c->bac_size=%" PRIu64 " ptr=%p", a, size, shift, c,
1068  c == NULL ? 0 : c->bac_size, *ptr);
1069  if (*ptr == NULL) {
1072  }
1073 
1074  if (c != NULL) {
1075  M0_POST(!c->bac_free);
1076  M0_POST(c->bac_size >= size);
1077  M0_POST(m0_addr_is_aligned(&c->bac_mem, shift));
1078  M0_POST(be_alloc_chunk_is_in(a, ztype, c));
1079  }
1080  /*
1081  * unlock mutex after post-conditions which are using allocator
1082  * internals
1083  */
1085  m0_mutex_unlock(&a->ba_lock);
1086 
1087  /* set op state after post-conditions because they are using op */
1088  m0_be_op_done(op);
1089 }
1090 
1091 M0_INTERNAL void m0_be_alloc(struct m0_be_allocator *a,
1092  struct m0_be_tx *tx,
1093  struct m0_be_op *op,
1094  void **ptr,
1095  m0_bcount_t size)
1096 {
1099 }
1100 
1101 M0_INTERNAL void m0_be_free_aligned(struct m0_be_allocator *a,
1102  struct m0_be_tx *tx,
1103  struct m0_be_op *op,
1104  void *ptr)
1105 {
1106  enum m0_be_alloc_zone_type ztype;
1107  struct be_alloc_chunk *c;
1108  struct be_alloc_chunk *prev;
1109  struct be_alloc_chunk *next;
1110  bool chunks_were_merged;
1111 
1112  M0_PRE(ptr != NULL);
1113  M0_PRE(m0_reduce(z, M0_BAP_NR, 0,
1114  +(int)be_alloc_mem_is_in(a, z, ptr, 1)) == 1);
1115 
1117 
1118  m0_mutex_lock(&a->ba_lock);
1120 
1123  M0_PRE(!c->bac_free);
1124  ztype = c->bac_zone;
1125  M0_LOG(M0_DEBUG, "allocator=%p c=%p c->bac_size=%" PRIu64 " zone=%d "
1126  "data=%p", a, c, c->bac_size, c->bac_zone, &c->bac_mem);
1127  /* algorithm starts here */
1128  be_alloc_chunk_mark_free(a, ztype, tx, c);
1129  /* update stats before c->bac_size gets modified due to merge */
1131  c->bac_size, false, false);
1132  prev = be_alloc_chunk_prev(a, ztype, c);
1133  next = be_alloc_chunk_next(a, ztype, c);
1134  chunks_were_merged = be_alloc_chunk_trymerge(a, ztype, tx,
1135  prev, c);
1136  if (chunks_were_merged)
1137  c = prev;
1138  be_alloc_chunk_trymerge(a, ztype, tx, c, next);
1139  be_allocator_stats_capture(a, ztype, tx);
1140  /* and ends here */
1141  M0_POST(c->bac_free);
1142  M0_POST(c->bac_size > 0);
1144 
1146  m0_mutex_unlock(&a->ba_lock);
1147 
1148  m0_be_op_done(op);
1149 }
1150 
1151 M0_INTERNAL void m0_be_free(struct m0_be_allocator *a,
1152  struct m0_be_tx *tx,
1153  struct m0_be_op *op,
1154  void *ptr)
1155 {
1156  m0_be_free_aligned(a, tx, op, ptr);
1157 }
1158 
1159 M0_INTERNAL void m0_be_alloc_stats(struct m0_be_allocator *a,
1160  struct m0_be_allocator_stats *out)
1161 {
1162  m0_mutex_lock(&a->ba_lock);
1164  *out = a->ba_h[M0_BAP_NORMAL]->bah_stats;
1165  m0_mutex_unlock(&a->ba_lock);
1166 }
1167 
1168 M0_INTERNAL void m0_be_alloc_stats_credit(struct m0_be_allocator *a,
1169  struct m0_be_tx_credit *accum)
1170 {
1171  m0_be_tx_credit_add(accum,
1173 }
1174 
1175 M0_INTERNAL void m0_be_alloc_stats_capture(struct m0_be_allocator *a,
1176  struct m0_be_tx *tx)
1177 {
1178  if (tx != NULL) {
1180  &a->ba_h[M0_BAP_NORMAL]->bah_stats));
1181  }
1182 }
1183 
1185 #undef M0_TRACE_SUBSYSTEM
1186 
1187 /*
1188  * Local variables:
1189  * c-indentation-style: "K&R"
1190  * c-basic-offset: 8
1191  * tab-width: 8
1192  * fill-column: 80
1193  * scroll-step: 1
1194  * End:
1195  */
1196 /*
1197  * vim: tabstop=8 shiftwidth=8 noexpandtab textwidth=80 nowrap
1198  */
Definition: fl.h:54
void * bs_addr
Definition: seg.h:71
static void ptr(struct m0_addb2__context *ctx, const uint64_t *v, char *buf)
Definition: dump.c:440
#define M0_BE_TX_CREDIT_PTR(ptr)
Definition: tx_credit.h:98
static size_t nr
Definition: dump.c:1505
#define M0_PRE(cond)
static struct be_alloc_chunk * be_alloc_chunk_next(struct m0_be_allocator *a, enum m0_be_alloc_zone_type ztype, struct be_alloc_chunk *c)
Definition: alloc.c:493
Allocator header.
static void be_alloc_chunk_mark_free(struct m0_be_allocator *a, enum m0_be_alloc_zone_type ztype, struct m0_be_tx *tx, struct be_alloc_chunk *c)
Definition: alloc.c:507
M0_INTERNAL void m0_mutex_unlock(struct m0_mutex *mutex)
Definition: mutex.c:66
static struct be_alloc_chunk * be_alloc_chunk_trysplit(struct m0_be_allocator *a, enum m0_be_alloc_zone_type ztype, struct m0_be_tx *tx, struct be_alloc_chunk *c, m0_bcount_t size, unsigned shift)
Definition: alloc.c:664
M0_INTERNAL void m0_be_fl_destroy(struct m0_be_fl *fl, struct m0_be_tx *tx)
Definition: fl.c:120
#define NULL
Definition: misc.h:38
static bool m0_addr_is_aligned(const void *addr, unsigned shift)
Definition: memory.h:107
#define ergo(a, b)
Definition: misc.h:293
m0_bcount_t bac_size
static bool x
Definition: sm.c:168
m0_bcount_t bas_stat0_boundary
Definition: alloc.h:119
#define P_ACS(acs)
static bool be_alloc_chunk_is_in(const struct m0_be_allocator *a, enum m0_be_alloc_zone_type ztype, const struct be_alloc_chunk *c)
Definition: alloc.c:386
#define M0_LOG(level,...)
Definition: trace.h:167
M0_LEAVE()
M0_INTERNAL void m0_be_alloc_stats_capture(struct m0_be_allocator *a, struct m0_be_tx *tx)
Definition: alloc.c:1175
#define M0_CASSERT(cond)
static struct be_alloc_chunk * be_alloc_chunk_prev(struct m0_be_allocator *a, enum m0_be_alloc_zone_type ztype, struct be_alloc_chunk *c)
Definition: alloc.c:478
#define M0_BE_REG_PTR(seg, ptr)
Definition: seg.h:154
m0_bcount_t bs_size
Definition: seg.h:69
static void be_allocator_call_stats_init(struct m0_be_allocator_call_stats *cs)
Definition: alloc.c:243
#define max_check(a, b)
Definition: arith.h:95
Allocator.
Definition: alloc.h:132
static void be_alloc_free_flag_capture(const struct m0_be_allocator *a, struct m0_be_tx *tx, struct be_alloc_chunk *c)
Definition: alloc.c:360
static void be_allocator_call_stat_update(struct m0_be_allocator_call_stat *cstat, unsigned long nr, m0_bcount_t size)
Definition: alloc.c:268
M0_INTERNAL void m0_be_alloc_stats(struct m0_be_allocator *a, struct m0_be_allocator_stats *out)
Definition: alloc.c:1159
struct m0_be_list bah_chunks
struct m0_be_allocator_stats bah_stats
Allocator statistics.
Definition: alloc.h:114
#define M0_BE_TX_CAPTURE_PTR(seg, tx, ptr)
Definition: tx.h:505
#define M0_BITS(...)
Definition: misc.h:236
uint64_t m0_bcount_t
Definition: types.h:77
M0_INTERNAL bool m0_be_seg__invariant(const struct m0_be_seg *seg)
Definition: stubs.c:221
#define container_of(ptr, type, member)
Definition: misc.h:33
#define M0_SET0(obj)
Definition: misc.h:64
#define M0_BE_TX_CREDIT_TYPE(type)
Definition: tx_credit.h:97
M0_INTERNAL void m0_mutex_lock(struct m0_mutex *mutex)
Definition: mutex.c:49
M0_INTERNAL void m0_be_alloc_aligned(struct m0_be_allocator *a, struct m0_be_tx *tx, struct m0_be_op *op, void **ptr, m0_bcount_t size, unsigned shift, uint64_t zonemask)
Definition: alloc.c:1020
#define M0_BE_REG(seg, size, addr)
Definition: seg.h:148
uint64_t bac_magic
static void be_allocator_stats_print(struct m0_be_allocator_stats *stats)
Definition: alloc.c:305
static void be_allocator_header_destroy(struct m0_be_allocator *a, enum m0_be_alloc_zone_type ztype, struct m0_be_tx *tx)
Definition: alloc.c:800
return M0_RC(rc)
op
Definition: libdemo.c:64
M0_INTERNAL void m0_be_fl_create(struct m0_be_fl *fl, struct m0_be_tx *tx, struct m0_be_seg *seg)
Definition: fl.c:110
#define equi(a, b)
Definition: misc.h:297
#define M0_BE_TX_CREDIT(nr, size)
Definition: tx_credit.h:94
#define M0_ASSERT_EX(cond)
#define M0_ENTRY(...)
Definition: trace.h:170
static const char * be_alloc_zone_name(enum m0_be_alloc_zone_type type)
Definition: alloc.c:222
M0_INTERNAL m0_bcount_t m0_be_seg_reserved(const struct m0_be_seg *seg)
Definition: seg.c:430
int i
Definition: dir.c:1033
#define PRIu64
Definition: types.h:58
M0_BE_LIST_DEFINE(chunks_all, static, struct be_alloc_chunk)
M0_INTERNAL void m0_be_alloc_stats_credit(struct m0_be_allocator *a, struct m0_be_tx_credit *accum)
Definition: alloc.c:1168
return M0_ERR(-EOPNOTSUPP)
struct m0_be_allocator_call_stat bacs_free
Definition: alloc.h:58
M0_INTERNAL void m0_be_fl_credit(struct m0_be_fl *fl, enum m0_be_fl_op fl_op, struct m0_be_tx_credit *accum)
Definition: fl.c:218
char bac_mem[0]
static void be_allocator_call_stats_update(struct m0_be_allocator_call_stats *cs, m0_bcount_t size, bool alloc, bool failed)
Definition: alloc.c:277
M0_INTERNAL void m0_be_tx_credit_mac(struct m0_be_tx_credit *c, const struct m0_be_tx_credit *c1, m0_bcount_t k)
Definition: tx_credit.c:88
#define M0_ASSERT(cond)
Allocator chunk.
unsigned long bcs_nr
Definition: alloc.h:51
M0_INTERNAL bool m0_mutex_is_locked(const struct m0_mutex *mutex)
Definition: mutex.c:95
struct m0_be_allocator_header * ba_h[M0_BAP_NR]
Definition: alloc.h:145
struct m0_be_fl bah_fl
struct m0_be_list_link bac_linkage
static struct m0_addb2_callback c
Definition: consumer.c:41
struct m0_be_allocator_header bh_alloc[M0_BAP_NR]
Definition: seg_internal.h:56
M0_INTERNAL void m0_be_tx_credit_add(struct m0_be_tx_credit *c0, const struct m0_be_tx_credit *c1)
Definition: tx_credit.c:44
static bool be_alloc_chunk_is_not_overlapping(const struct be_alloc_chunk *a, const struct be_alloc_chunk *b)
Definition: alloc.c:393
M0_INTERNAL void m0_be_alloc(struct m0_be_allocator *a, struct m0_be_tx *tx, struct m0_be_op *op, void **ptr, m0_bcount_t size)
Definition: alloc.c:1091
static int next[]
Definition: cp.c:248
M0_INTERNAL bool m0_be_allocator__invariant(struct m0_be_allocator *a)
Definition: alloc.c:758
m0_be_allocator_op
Definition: alloc.h:196
static struct ff2c_term * alloc(void)
Definition: parser.c:37
static void be_alloc_chunk_resize(struct m0_be_allocator *a, enum m0_be_alloc_zone_type ztype, struct m0_be_tx *tx, struct be_alloc_chunk *c, m0_bcount_t new_size)
Definition: alloc.c:574
struct m0_be_allocator_call_stat bacs_alloc_failure
Definition: alloc.h:57
static void be_alloc_size_capture(const struct m0_be_allocator *a, struct m0_be_tx *tx, struct be_alloc_chunk *c)
Definition: alloc.c:368
M0_INTERNAL void m0_mutex_init(struct m0_mutex *mutex)
Definition: mutex.c:35
#define M0_POST(cond)
static bool be_alloc_mem_is_in(const struct m0_be_allocator *a, enum m0_be_alloc_zone_type ztype, const void *ptr, m0_bcount_t size)
Definition: alloc.c:376
M0_INTERNAL void m0_be_op_done(struct m0_be_op *op)
Definition: stubs.c:104
static m0_bindex_t offset
Definition: dump.c:173
M0_INTERNAL void m0_be_allocator_destroy(struct m0_be_allocator *a, struct m0_be_tx *tx)
Definition: alloc.c:883
M0_INTERNAL struct be_alloc_chunk * m0_be_fl_pick(struct m0_be_fl *fl, m0_bcount_t size)
Definition: fl.c:178
static void be_allocator_stats_capture(struct m0_be_allocator *a, enum m0_be_alloc_zone_type ztype, struct m0_be_tx *tx)
Definition: alloc.c:342
Definition: seg.h:66
#define PRId64
Definition: types.h:57
m0_bcount_t bcs_size
Definition: alloc.h:52
#define m0_forall(var, nr,...)
Definition: misc.h:112
m0_bcount_t bas_stat0_boundary
Definition: alloc.h:1180
struct m0_be_seg * ba_seg
Definition: alloc.h:138
static int be_allocator_header_create(struct m0_be_allocator *a, enum m0_be_alloc_zone_type ztype, struct m0_be_tx *tx, uintptr_t offset, m0_bcount_t size)
Definition: alloc.c:768
Definition: fl.h:53
static void be_alloc_chunk_init(struct m0_be_allocator *a, enum m0_be_alloc_zone_type ztype, struct m0_be_tx *tx, struct be_alloc_chunk *c, m0_bcount_t size, bool free)
Definition: alloc.c:433
M0_INTERNAL void m0_be_op_active(struct m0_be_op *op)
Definition: stubs.c:100
M0_BE_LIST_DESCR_DEFINE(chunks_all, "list of all chunks in m0_be_allocator", static, struct be_alloc_chunk, bac_linkage, bac_magic, M0_BE_ALLOC_ALL_LINK_MAGIC, M0_BE_ALLOC_ALL_MAGIC)
M0_INTERNAL void m0_be_free(struct m0_be_allocator *a, struct m0_be_tx *tx, struct m0_be_op *op, void *ptr)
Definition: alloc.c:1151
static struct be_alloc_chunk * be_alloc_chunk_addr(void *ptr)
Definition: alloc.c:472
static void be_allocator_call_stat_init(struct m0_be_allocator_call_stat *cstat)
Definition: alloc.c:235
M0_INTERNAL void m0_be_fl_add(struct m0_be_fl *fl, struct m0_be_tx *tx, struct be_alloc_chunk *chunk)
Definition: fl.c:151
Definition: beck.c:130
static bool be_alloc_chunk_trymerge(struct m0_be_allocator *a, enum m0_be_alloc_zone_type ztype, struct m0_be_tx *tx, struct be_alloc_chunk *x, struct be_alloc_chunk *y)
Definition: alloc.c:693
static int r[NR]
Definition: thread.c:46
struct m0_be_allocator_call_stat bacs_alloc_success
Definition: alloc.h:56
static bool be_alloc_chunk_invariant(struct m0_be_allocator *a, const struct be_alloc_chunk *c)
Definition: alloc.c:406
m0_bcount_t size
Definition: di.c:39
#define _0C(exp)
Definition: assert.h:311
M0_INTERNAL void m0_mutex_fini(struct m0_mutex *mutex)
Definition: mutex.c:42
static void be_allocator_call_stats_print(struct m0_be_allocator_call_stats *cs, const char *descr)
Definition: alloc.c:294
M0_INTERNAL void m0_be_free_aligned(struct m0_be_allocator *a, struct m0_be_tx *tx, struct m0_be_op *op, void *ptr)
Definition: alloc.c:1101
static void be_alloc_chunk_capture(struct m0_be_allocator *a, struct m0_be_tx *tx, struct be_alloc_chunk *c)
Definition: alloc.c:352
#define m0_be_list_forall(name, var, head,...)
Definition: list.h:371
static struct m0_be_seg * seg
Definition: btree.c:40
#define M0_ASSERT_INFO(cond, fmt,...)
static struct be_alloc_chunk * be_alloc_chunk_split(struct m0_be_allocator *a, enum m0_be_alloc_zone_type ztype, struct m0_be_tx *tx, struct be_alloc_chunk *c, uintptr_t start_new, m0_bcount_t size)
Definition: alloc.c:614
M0_INTERNAL int m0_be_allocator_create(struct m0_be_allocator *a, struct m0_be_tx *tx, uint32_t *zone_percent, uint32_t zones_nr)
Definition: alloc.c:821
M0_INTERNAL void m0_be_tx_capture(struct m0_be_tx *tx, const struct m0_be_reg *reg)
Definition: stubs.c:190
else
Definition: dir.c:413
M0_INTERNAL void m0_be_fl_del(struct m0_be_fl *fl, struct m0_be_tx *tx, struct be_alloc_chunk *chunk)
Definition: fl.c:165
M0_INTERNAL int m0_be_allocator_init(struct m0_be_allocator *a, struct m0_be_seg *seg)
Definition: alloc.c:719
static uintptr_t be_alloc_chunk_after(struct m0_be_allocator *a, enum m0_be_alloc_zone_type ztype, struct be_alloc_chunk *c)
Definition: alloc.c:526
#define out(...)
Definition: gen.c:41
int type
Definition: dir.c:1031
static void be_allocator_stats_init(struct m0_be_allocator_stats *stats, struct m0_be_allocator_header *h)
Definition: alloc.c:250
M0_INTERNAL void m0_be_allocator_credit(struct m0_be_allocator *a, enum m0_be_allocator_op optype, m0_bcount_t size, unsigned shift, struct m0_be_tx_credit *accum)
Definition: alloc.c:900
Definition: op.h:74
#define M0_PRE_EX(cond)
M0_INTERNAL void m0_be_tx_credit_max(struct m0_be_tx_credit *c, const struct m0_be_tx_credit *c0, const struct m0_be_tx_credit *c1)
Definition: tx_credit.c:112
static struct be_alloc_chunk * be_alloc_chunk_add_after(struct m0_be_allocator *a, enum m0_be_alloc_zone_type ztype, struct m0_be_tx *tx, struct be_alloc_chunk *c, uintptr_t offset, m0_bcount_t size_total, bool free)
Definition: alloc.c:540
static void be_alloc_chunk_del_fini(struct m0_be_allocator *a, enum m0_be_alloc_zone_type ztype, struct m0_be_tx *tx, struct be_alloc_chunk *c)
Definition: alloc.c:456
int32_t rc
Definition: trigger_fop.h:47
m0_be_alloc_zone_type
Definition: alloc.h:85
static struct be_alloc_chunk * be_alloc_chunk_tryadd_free_after(struct m0_be_allocator *a, enum m0_be_alloc_zone_type ztype, struct m0_be_tx *tx, struct be_alloc_chunk *c, uintptr_t offset, m0_bcount_t size_total)
Definition: alloc.c:593
#define ARRAY_SIZE(a)
Definition: misc.h:45
#define M0_POST_EX(cond)
static uint64_t m0_align(uint64_t val, uint64_t alignment)
Definition: arith.h:170
struct m0_mutex ba_lock
Definition: alloc.h:143
M0_INTERNAL void m0_be_allocator_fini(struct m0_be_allocator *a)
Definition: alloc.c:745
static void be_allocator_stats_update(struct m0_be_allocator_stats *stats, m0_bcount_t size, bool alloc, bool failed)
Definition: alloc.c:318
Definition: tx.h:280
#define M0_IMPOSSIBLE(fmt,...)