Motr  M0
cache.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011-2020 Seagate Technology LLC and/or its Affiliates
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *
16  * For any questions about this software or licensing,
17  * please email opensource@seagate.com or cortx-questions@seagate.com.
18  *
19  */
20 
21 
22 #define M0_TRACE_SUBSYSTEM M0_TRACE_SUBSYS_STOB
23 #include "lib/trace.h"
24 
25 #include "stob/cache.h"
26 
27 #include "motr/magic.h"
28 
29 #include "stob/stob.h" /* m0_stob */
30 
37 M0_TL_DESCR_DEFINE(stob_cache, "cached stobs", static, struct m0_stob,
38  so_cache_linkage, so_cache_magic,
40 M0_TL_DEFINE(stob_cache, static, struct m0_stob);
41 
42 M0_INTERNAL int m0_stob_cache_init(struct m0_stob_cache *cache,
43  uint64_t idle_size,
44  m0_stob_cache_eviction_cb_t eviction_cb)
45 {
46  *cache = (struct m0_stob_cache){
47  .sc_idle_size = idle_size,
48  .sc_idle_used = 0,
49  .sc_eviction_cb = eviction_cb,
50  .sc_busy_hits = 0,
51  .sc_idle_hits = 0,
52  .sc_misses = 0,
53  .sc_evictions = 0,
54  };
55  m0_mutex_init(&cache->sc_lock);
56  stob_cache_tlist_init(&cache->sc_busy);
57  stob_cache_tlist_init(&cache->sc_idle);
58  return 0;
59 }
60 
61 M0_INTERNAL void m0_stob_cache_fini(struct m0_stob_cache *cache)
62 {
63  struct m0_stob *zombie;
64 
65  m0_stob_cache_purge(cache, cache->sc_idle_size);
67  m0_tl_for(stob_cache, &cache->sc_busy, zombie) {
68  M0_LOG(M0_FATAL, "Still busy "FID_F,
69  FID_P(m0_stob_fid_get(zombie)));
70  } m0_tl_endfor;
71  m0_tl_for(stob_cache, &cache->sc_idle, zombie) {
72  M0_LOG(M0_FATAL, "Still idle "FID_F,
73  FID_P(m0_stob_fid_get(zombie)));
74  } m0_tl_endfor;
75  stob_cache_tlist_fini(&cache->sc_idle);
76  stob_cache_tlist_fini(&cache->sc_busy);
77  m0_mutex_fini(&cache->sc_lock);
78 }
79 
80 
81 M0_INTERNAL bool m0_stob_cache__invariant(const struct m0_stob_cache *cache)
82 {
84  _0C(cache->sc_idle_size >= cache->sc_idle_used) &&
85  M0_CHECK_EX(_0C(stob_cache_tlist_length(&cache->sc_idle) ==
86  cache->sc_idle_used));
87 
88 }
89 
90 static void stob_cache_evict(struct m0_stob_cache *cache,
91  struct m0_stob *stob)
92 {
93  cache->sc_eviction_cb(cache, stob);
94  ++cache->sc_evictions;
95 }
96 
98  struct m0_stob *stob)
99 {
100  M0_ENTRY("stob %p, stob_fid "FID_F, stob,
102  stob_cache_tlink_del_fini(stob);
103  --cache->sc_idle_used;
104 }
105 
107  struct m0_stob *stob)
108 {
109  struct m0_stob *evicted;
110 
111  stob_cache_tlist_move(&cache->sc_idle, stob);
112  ++cache->sc_idle_used;
113  if (cache->sc_idle_used > cache->sc_idle_size) {
114  evicted = stob_cache_tlist_tail(&cache->sc_idle);
115  stob_cache_idle_del(cache, evicted);
116  stob_cache_evict(cache, evicted);
117  }
118 }
119 
120 M0_INTERNAL void m0_stob_cache_add(struct m0_stob_cache *cache,
121  struct m0_stob *stob)
122 {
125 
126  stob_cache_tlink_init_at(stob, &cache->sc_busy);
127 }
128 
129 M0_INTERNAL void m0_stob_cache_idle(struct m0_stob_cache *cache,
130  struct m0_stob *stob)
131 {
133 
135 }
136 
137 M0_INTERNAL struct m0_stob *m0_stob_cache_lookup(struct m0_stob_cache *cache,
138  const struct m0_fid *stob_fid)
139 {
140  struct m0_stob *stob;
141 
143 
144  m0_tl_for(stob_cache, &cache->sc_busy, stob) {
145  if (m0_fid_cmp(stob_fid, m0_stob_fid_get(stob)) == 0) {
146  ++cache->sc_busy_hits;
147  return stob;
148  }
149  } m0_tl_endfor;
150 
151  m0_tl_for(stob_cache, &cache->sc_idle, stob) {
152  if (m0_fid_cmp(stob_fid, m0_stob_fid_get(stob)) == 0) {
153  ++cache->sc_idle_hits;
155  stob_cache_tlink_init_at(stob, &cache->sc_busy);
156  return stob;
157  }
158  } m0_tl_endfor;
159 
160  ++cache->sc_misses;
161  return NULL;
162 }
163 
164 
165 M0_INTERNAL void m0_stob_cache_purge(struct m0_stob_cache *cache, int nr)
166 {
167  struct m0_stob *stob;
168  struct m0_stob *prev;
169 
172 
173  stob = stob_cache_tlist_tail(&cache->sc_idle);
174  for (; stob != NULL && nr > 0; --nr) {
175  prev = stob_cache_tlist_prev(&cache->sc_idle, stob);
178  stob = prev;
179  }
180 
183 }
184 
185 M0_INTERNAL void m0_stob_cache_lock(struct m0_stob_cache *cache)
186 {
187  m0_mutex_lock(&cache->sc_lock);
188 }
189 
190 M0_INTERNAL void m0_stob_cache_unlock(struct m0_stob_cache *cache)
191 {
192  m0_mutex_unlock(&cache->sc_lock);
193 }
194 
195 M0_INTERNAL bool m0_stob_cache_is_locked(const struct m0_stob_cache *cache)
196 {
197  return m0_mutex_is_locked(&cache->sc_lock);
198 }
199 
200 M0_INTERNAL bool m0_stob_cache_is_not_locked(const struct m0_stob_cache *cache)
201 {
202  return m0_mutex_is_not_locked(&cache->sc_lock);
203 }
204 
205 M0_INTERNAL void m0_stob_cache__print(struct m0_stob_cache *cache)
206 {
207 #define LEVEL M0_DEBUG
208  struct m0_stob *stob;
209  int i;
210 
211  M0_LOG(LEVEL, "m0_stob_cache %p: "
212  "sc_busy_hits = %" PRIu64 ", sc_idle_hits = %" PRIu64 ", "
213  "sc_misses = %" PRIu64 ", sc_evictions = %"PRIu64, cache,
214  cache->sc_busy_hits, cache->sc_idle_hits,
215  cache->sc_misses, cache->sc_evictions);
216  M0_LOG(LEVEL, "m0_stob_cache %p: "
217  "sc_idle_size = %" PRIu64 ", sc_idle_used = %" PRIu64 ", ",
218  cache, cache->sc_idle_size, cache->sc_idle_used);
219  M0_LOG(LEVEL, "m0_stob_cache %p: "
220  "sc_busy length = %zu, sc_idle length = %zu", cache,
221  stob_cache_tlist_length(&cache->sc_busy),
222  stob_cache_tlist_length(&cache->sc_idle));
223 
224 
225  M0_LOG(LEVEL, "m0_stob_cache %p: sc_busy list", cache);
226  i = 0;
227  m0_tl_for(stob_cache, &cache->sc_busy, stob) {
228  M0_LOG(LEVEL, "%d: %p, stob_fid =" FID_F,
230  ++i;
231  } m0_tl_endfor;
232 
233  M0_LOG(LEVEL, "m0_stob_cache %p: sc_idle list", cache);
234  i = 0;
235  m0_tl_for(stob_cache, &cache->sc_idle, stob) {
236  M0_LOG(LEVEL, "%d: %p, stob_key =" FID_F,
238  ++i;
239  } m0_tl_endfor;
240  M0_LOG(LEVEL, "m0_stob_cache %p: end.", cache);
241 #undef LEVEL
242 }
243 
245 #undef M0_TRACE_SUBSYSTEM
246 
247 /*
248  * Local variables:
249  * c-indentation-style: "K&R"
250  * c-basic-offset: 8
251  * tab-width: 8
252  * fill-column: 80
253  * scroll-step: 1
254  * End:
255  */
Definition: beck.c:235
static size_t nr
Definition: dump.c:1505
#define M0_PRE(cond)
M0_INTERNAL void m0_mutex_unlock(struct m0_mutex *mutex)
Definition: mutex.c:66
#define NULL
Definition: misc.h:38
static void stob_cache_idle_moveto(struct m0_stob_cache *cache, struct m0_stob *stob)
Definition: cache.c:106
M0_INTERNAL void m0_stob_cache_add(struct m0_stob_cache *cache, struct m0_stob *stob)
Definition: cache.c:120
#define M0_LOG(level,...)
Definition: trace.h:167
M0_INTERNAL bool m0_mutex_is_not_locked(const struct m0_mutex *mutex)
Definition: mutex.c:101
M0_INTERNAL const struct m0_fid * m0_stob_fid_get(struct m0_stob *stob)
Definition: stob.c:255
M0_INTERNAL void m0_mutex_lock(struct m0_mutex *mutex)
Definition: mutex.c:49
M0_INTERNAL int m0_fid_cmp(const struct m0_fid *fid0, const struct m0_fid *fid1)
Definition: fid.c:170
M0_INTERNAL int m0_stob_cache_init(struct m0_stob_cache *cache, uint64_t idle_size, m0_stob_cache_eviction_cb_t eviction_cb)
Definition: cache.c:42
#define m0_tl_endfor
Definition: tlist.h:700
#define M0_CHECK_EX(cond)
#define M0_ENTRY(...)
Definition: trace.h:170
M0_TL_DEFINE(m0_conf_cache, M0_INTERNAL, struct m0_conf_obj)
int i
Definition: dir.c:1033
#define PRIu64
Definition: types.h:58
Definition: stob.h:163
static struct m0_stob * stob
Definition: storage.c:39
M0_INTERNAL bool m0_mutex_is_locked(const struct m0_mutex *mutex)
Definition: mutex.c:95
M0_INTERNAL void m0_stob_cache_lock(struct m0_stob_cache *cache)
Definition: cache.c:185
M0_TL_DESCR_DEFINE(m0_conf_cache, "registered m0_conf_obj-s",, struct m0_conf_obj, co_cache_link, co_gen_magic, M0_CONF_OBJ_MAGIC, M0_CONF_CACHE_MAGIC)
M0_INTERNAL void m0_stob_cache_fini(struct m0_stob_cache *cache)
Definition: cache.c:61
M0_INTERNAL void m0_mutex_init(struct m0_mutex *mutex)
Definition: mutex.c:35
#define M0_POST(cond)
M0_INTERNAL void m0_stob_cache_unlock(struct m0_stob_cache *cache)
Definition: cache.c:190
M0_INTERNAL void m0_stob_cache_purge(struct m0_stob_cache *cache, int nr)
Definition: cache.c:165
#define LEVEL
#define FID_P(f)
Definition: fid.h:77
M0_INTERNAL bool m0_stob_cache_is_locked(const struct m0_stob_cache *cache)
Definition: cache.c:195
Definition: fid.h:38
M0_INTERNAL bool m0_stob_cache__invariant(const struct m0_stob_cache *cache)
Definition: cache.c:81
static void stob_cache_idle_del(struct m0_stob_cache *cache, struct m0_stob *stob)
Definition: cache.c:97
#define _0C(exp)
Definition: assert.h:311
M0_INTERNAL void m0_mutex_fini(struct m0_mutex *mutex)
Definition: mutex.c:42
static void stob_cache_evict(struct m0_stob_cache *cache, struct m0_stob *stob)
Definition: cache.c:90
void(* m0_stob_cache_eviction_cb_t)(struct m0_stob_cache *cache, struct m0_stob *stob)
Definition: cache.h:44
M0_INTERNAL bool m0_stob_cache_is_not_locked(const struct m0_stob_cache *cache)
Definition: cache.c:200
M0_INTERNAL void m0_stob_cache__print(struct m0_stob_cache *cache)
Definition: cache.c:205
M0_INTERNAL struct m0_stob * m0_stob_cache_lookup(struct m0_stob_cache *cache, const struct m0_fid *stob_fid)
Definition: cache.c:137
#define M0_PRE_EX(cond)
#define m0_tl_for(name, head, obj)
Definition: tlist.h:695
#define FID_F
Definition: fid.h:75
M0_INTERNAL void m0_stob_cache_idle(struct m0_stob_cache *cache, struct m0_stob *stob)
Definition: cache.c:129