Motr  M0
klnet_vec.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2012-2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 
23 /* This file is designed to be included in klnet_core.c.
24  Logic in this file is liberally borrowed from Isaac Huang's
25  ulamod_ubuf2kiov() subroutine in cortex-motr/lnet/lnet/ula/ulamod_mem.c
26  (unreleased ULA code).
27  */
28 
37 static bool nlx_kcore_kiov_invariant(const lnet_kiov_t *k, size_t len)
38 {
39  size_t i;
40 
41  if (k == NULL || len == 0 || len > LNET_MAX_IOV)
42  return false;
43  for (i = 0; i < len; ++i, ++k) {
44  if (k->kiov_page == NULL || k->kiov_len == 0 ||
45  k->kiov_len > PAGE_SIZE)
46  return false;
47  if (k->kiov_offset != 0 && i != 0)
48  return false; /* permitted only on first */
49  if (k->kiov_len != PAGE_SIZE && i != (len - 1) &&
50  k->kiov_offset == 0)
51  return false; /* permitted only on last entry */
52  if (k->kiov_offset != 0 &&
53  (k->kiov_offset + k->kiov_len) > PAGE_SIZE)
54  return false;
55  }
56  return true;
57 }
58 
65 static unsigned bufvec_seg_page_count(const struct m0_bufvec *bvec, unsigned n)
66 {
67  m0_bcount_t seg_len;
68  void *seg_addr;
69  unsigned npages;
70 
71  M0_ASSERT(n < bvec->ov_vec.v_nr);
72  seg_len = bvec->ov_vec.v_count[n];
73  seg_addr = bvec->ov_buf[n];
74 
75  /* npages = last_page_num - first_page_num + 1 */
76  npages = (uint64_t)(seg_addr + seg_len - 1) / PAGE_SIZE -
77  (uint64_t)seg_addr / PAGE_SIZE + 1;
78 
79  return npages;
80 }
81 
93 static unsigned bufvec_seg_kla_to_kiov(const struct m0_bufvec *bvec,
94  unsigned n,
95  lnet_kiov_t *kiov)
96 {
97  unsigned num_pages = bufvec_seg_page_count(bvec, n);
98  m0_bcount_t seg_len = bvec->ov_vec.v_count[n];
99  uint64_t seg_addr = (uint64_t) bvec->ov_buf[n];
100  int pnum;
101 
102  for (pnum = 0; pnum < num_pages; ++pnum) {
103  uint64_t offset = NLX_PAGE_OFFSET(seg_addr);
104  uint64_t len = PAGE_SIZE - offset;
105  struct page *pg;
106 
107  if (len > seg_len)
108  len = seg_len;
109 
110  pg = virt_to_page(seg_addr); /* KLA only! */
111  M0_ASSERT(pg != 0);
112 
113  kiov[pnum].kiov_page = pg;
114  kiov[pnum].kiov_len = len;
115  kiov[pnum].kiov_offset = offset;
116 
117  seg_addr += len;
118  seg_len -= len;
119  }
120  return num_pages;
121 }
122 
134  const struct m0_bufvec *bvec)
135 {
136  unsigned i;
137  unsigned num_pages;
138  unsigned knum;
139  int rc;
140 
142  M0_PRE(kb->kb_kiov == NULL && kb->kb_kiov_len == 0);
143 
144  /* compute the number of pages required */
145  num_pages = 0;
146  for (i = 0; i < bvec->ov_vec.v_nr; ++i)
147  num_pages += bufvec_seg_page_count(bvec, i);
148  M0_ASSERT(num_pages > 0);
149  if (num_pages > LNET_MAX_IOV) {
150  rc = M0_ERR(-EFBIG);
151  goto fail;
152  }
153 
154  /* allocate and fill in the kiov */
155  NLX_ALLOC_ARR(kb->kb_kiov, num_pages);
156  if (kb->kb_kiov == NULL) {
157  rc = M0_ERR(-ENOMEM);
158  goto fail;
159  }
160  kb->kb_kiov_len = num_pages;
161  knum = 0;
162  for (i = 0; i < bvec->ov_vec.v_nr; ++i)
163  knum += bufvec_seg_kla_to_kiov(bvec, i, &kb->kb_kiov[knum]);
164  M0_POST(knum == num_pages);
166 
167  return 0;
168 fail:
169  M0_ASSERT(rc != 0);
170  return M0_RC(rc);
171 }
172 
186  const struct m0_bufvec *bvec,
187  unsigned n,
188  lnet_kiov_t *kiov)
189 {
190  unsigned num_pages = bufvec_seg_page_count(bvec, n);
191  m0_bcount_t seg_len = bvec->ov_vec.v_count[n];
192  uint64_t seg_addr = (uint64_t) bvec->ov_buf[n];
193  int pnum;
194  int rc;
195 
196  for (pnum = 0; pnum < num_pages; ++pnum) {
197  uint64_t offset = NLX_PAGE_OFFSET(seg_addr);
198  uint64_t len = PAGE_SIZE - offset;
199  struct page *pg;
200 
201  if (len > seg_len)
202  len = seg_len;
203 
204  down_read(&current->mm->mmap_sem);
205  rc = WRITABLE_USER_PAGE_GET(seg_addr, pg);
206  up_read(&current->mm->mmap_sem);
207  if (rc < 0)
208  goto fail_page;
209  M0_ASSERT(rc == 1);
210 
211  kiov[pnum].kiov_page = pg;
212  kiov[pnum].kiov_len = len;
213  kiov[pnum].kiov_offset = offset;
214 
215  seg_addr += len;
216  seg_len -= len;
217  }
218  return num_pages;
219 fail_page:
220  while (pnum > 0)
221  WRITABLE_USER_PAGE_PUT(kiov[--pnum].kiov_page);
222  M0_ASSERT(rc < 0);
223  return M0_RC(rc);
224 }
225 
237  const struct m0_bufvec *bvec)
238 {
239  unsigned i;
240  unsigned num_pages;
241  unsigned knum;
242  int rc;
243 
245  M0_PRE(kb->kb_kiov == NULL && kb->kb_kiov_len == 0);
246 
247  /* compute the number of pages required */
248  num_pages = 0;
249  for (i = 0; i < bvec->ov_vec.v_nr; ++i)
250  num_pages += bufvec_seg_page_count(bvec, i);
251  if (num_pages == 0) {
252  rc = -EBADR;
253  goto fail;
254  }
255  if (num_pages > LNET_MAX_IOV) {
256  rc = -EFBIG;
257  goto fail;
258  }
259 
260  /* allocate and fill in the kiov */
261  NLX_ALLOC_ARR(kb->kb_kiov, num_pages);
262  if (kb->kb_kiov == NULL) {
263  rc = M0_ERR(-ENOMEM);
264  goto fail;
265  }
266  kb->kb_kiov_len = num_pages;
267  knum = 0;
268  for (i = 0; i < bvec->ov_vec.v_nr; ++i) {
269  rc = bufvec_seg_uva_to_kiov(kb, bvec, i, &kb->kb_kiov[knum]);
270  if (rc < 0)
271  goto fail_pages;
272  knum += rc;
273  }
274  M0_POST(knum == num_pages);
276  rc = -EBADR;
277  goto fail_pages;
278  }
279  return 0;
280 
281 fail_pages:
282  while (knum > 0)
283  WRITABLE_USER_PAGE_PUT(kb->kb_kiov[--knum].kiov_page);
284  m0_free0(&kb->kb_kiov);
285  kb->kb_kiov_len = 0;
286 fail:
287  M0_ASSERT(rc < 0);
288  return M0_RC(rc);
289 }
290 
302 static size_t nlx_kcore_num_kiov_entries_for_bytes(const lnet_kiov_t *kiov,
303  size_t kiov_len,
304  m0_bcount_t bytes,
305  unsigned *last_len)
306 {
308  size_t i;
309 
310  M0_PRE(kiov != NULL);
311  M0_PRE(kiov_len > 0);
312  M0_PRE(last_len != NULL);
313 
314  for (i = 0, count = 0; i < kiov_len && count < bytes; ++i, ++kiov)
315  count += kiov->kiov_len;
316 
317  M0_POST(i <= kiov_len);
318  --kiov;
319  *last_len = (kiov->kiov_len - count + bytes) % kiov->kiov_len;
320  if (*last_len == 0)
321  *last_len = kiov->kiov_len;
322 #if 0
323  NLXP("bytes=%lu n=%lu c=%ld k=%ld l=%u\n", (unsigned long) bytes,
324  (unsigned long) i, (unsigned long) count,
325  (unsigned long) kiov->kiov_len, *last_len);
326 #endif
327  return i;
328 }
329  /* KLNetCore */
331 
332 /*
333  * Local variables:
334  * c-indentation-style: "K&R"
335  * c-basic-offset: 8
336  * tab-width: 8
337  * fill-column: 79
338  * scroll-step: 1
339  * End:
340  */
size_t kb_kiov_len
Definition: klnet_core.h:191
static unsigned bufvec_seg_kla_to_kiov(const struct m0_bufvec *bvec, unsigned n, lnet_kiov_t *kiov)
Definition: klnet_vec.c:93
#define M0_PRE(cond)
#define NULL
Definition: misc.h:38
#define WRITABLE_USER_PAGE_GET(uaddr, pg)
Definition: klnet_drv.h:146
static size_t nlx_kcore_num_kiov_entries_for_bytes(const lnet_kiov_t *kiov, size_t kiov_len, m0_bcount_t bytes, unsigned *last_len)
Definition: klnet_vec.c:302
#define NLX_PAGE_OFFSET(addr)
Definition: klnet_core.h:404
struct m0_vec ov_vec
Definition: vec.h:147
uint64_t m0_bcount_t
Definition: types.h:77
#define PAGE_SIZE
Definition: lnet_ut.c:277
static int nlx_kcore_buffer_uva_to_kiov(struct nlx_kcore_buffer *kb, const struct m0_bufvec *bvec)
Definition: klnet_vec.c:236
#define NLXP(fmt,...)
Definition: lnet_main.c:876
void ** ov_buf
Definition: vec.h:149
static m0_bcount_t count
Definition: xcode.c:167
#define NLX_ALLOC_ARR(ptr, nr)
Definition: lnet_core.h:639
return M0_RC(rc)
static int nlx_kcore_buffer_kla_to_kiov(struct nlx_kcore_buffer *kb, const struct m0_bufvec *bvec)
Definition: klnet_vec.c:133
int i
Definition: dir.c:1033
return M0_ERR(-EOPNOTSUPP)
if(value==NULL)
Definition: dir.c:350
#define m0_free0(pptr)
Definition: memory.h:77
#define M0_ASSERT(cond)
static struct m0_bufvec bvec
Definition: xcode.c:169
static int bufvec_seg_uva_to_kiov(struct nlx_kcore_buffer *kb, const struct m0_bufvec *bvec, unsigned n, lnet_kiov_t *kiov)
Definition: klnet_vec.c:185
#define M0_POST(cond)
uint32_t v_nr
Definition: vec.h:51
static m0_bindex_t offset
Definition: dump.c:173
m0_bcount_t * v_count
Definition: vec.h:53
lnet_kiov_t * kb_kiov
Definition: klnet_core.h:188
static uint8_t fail[DATA_UNIT_COUNT_MAX+PARITY_UNIT_COUNT_MAX]
uint64_t n
Definition: fops.h:107
#define WRITABLE_USER_PAGE_PUT(pg)
Definition: klnet_drv.h:155
static unsigned bufvec_seg_page_count(const struct m0_bufvec *bvec, unsigned n)
Definition: klnet_vec.c:65
static bool nlx_kcore_buffer_invariant(const struct nlx_kcore_buffer *kcb)
Definition: klnet_core.c:908
int32_t rc
Definition: trigger_fop.h:47
Definition: vec.h:145
static bool nlx_kcore_kiov_invariant(const lnet_kiov_t *k, size_t len)
Definition: klnet_vec.c:37