Motr  M0
klnet_drv.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2012-2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 
824 #include <linux/miscdevice.h>
825 
826 M0_BASSERT(sizeof(struct nlx_xo_domain) < PAGE_SIZE);
827 M0_BASSERT(sizeof(struct nlx_xo_transfer_mc) < PAGE_SIZE);
828 M0_BASSERT(sizeof(struct nlx_xo_buffer) < PAGE_SIZE);
829 M0_BASSERT(sizeof(struct nlx_core_buffer_event) < PAGE_SIZE);
830 
831 /* LNET_NIDSTR_SIZE is only defined in the kernel */
832 M0_BASSERT(M0_NET_LNET_NIDSTR_SIZE == LNET_NIDSTR_SIZE);
833 
855 
856 {
857  struct page *pg;
858  struct nlx_core_domain *cd;
859  uint32_t off = NLX_PAGE_OFFSET((unsigned long) p->ddi_cd);
860  int rc;
861 
865  return M0_ERR(-EBADR);
866  }
867  if (off + sizeof *cd > PAGE_SIZE)
868  return M0_ERR(-EBADR);
869 
870  /* note: these calls can block */
871  down_read(&current->mm->mmap_sem);
872  rc = WRITABLE_USER_PAGE_GET(p->ddi_cd, pg);
873  up_read(&current->mm->mmap_sem);
874 
875  if (rc >= 0) {
876  M0_ASSERT(rc == 1);
877  nlx_core_kmem_loc_set(&kd->kd_cd_loc, pg, off);
878  cd = nlx_kcore_core_domain_map(kd);
879  rc = kd->kd_drv_ops->ko_dom_init(kd, cd);
880  if (rc == 0) {
881  p->ddi_max_buffer_size =
883  p->ddi_max_buffer_segment_size =
885  p->ddi_max_buffer_segments =
889  }
891  }
893  return M0_RC(rc);
894 }
895 
903 {
904  struct page *pg;
905  struct nlx_core_buffer *cb;
906  struct nlx_kcore_buffer *kb;
907  uint32_t off = NLX_PAGE_OFFSET((unsigned long) p->dbr_lcbuf);
908  m0_bcount_t sz;
909  void **buf;
911  int n = p->dbr_bvec.ov_vec.v_nr;
912  int rc;
913 
914  if (off + sizeof *cb > PAGE_SIZE)
915  return M0_ERR(-EBADR);
916  if (p->dbr_buffer_id == 0)
917  return M0_ERR(-EBADR);
918  NLX_ALLOC_ARR(buf, n);
919  if (buf == NULL)
920  return M0_ERR(-ENOMEM);
922  if (count == NULL) {
923  rc = M0_ERR(-ENOMEM);
924  goto fail_count;
925  }
926 
927  sz = n * sizeof *buf;
928  if (copy_from_user(buf, (void __user *) p->dbr_bvec.ov_buf, sz)) {
929  rc = M0_ERR(-EFAULT);
930  goto fail_copy;
931  }
932  sz = n * sizeof *count;
933  if (copy_from_user(count,
934  (void __user *) p->dbr_bvec.ov_vec.v_count, sz)) {
935  rc = M0_ERR(-EFAULT);
936  goto fail_copy;
937  }
938 
939  NLX_ALLOC_PTR(kb);
940  if (kb == NULL) {
941  rc = M0_ERR(-ENOMEM);
942  goto fail_copy;
943  }
945 
946  down_read(&current->mm->mmap_sem);
947  rc = WRITABLE_USER_PAGE_GET(p->dbr_lcbuf, pg);
948  up_read(&current->mm->mmap_sem);
949  if (rc < 0)
950  goto fail_page;
951  nlx_core_kmem_loc_set(&kb->kb_cb_loc, pg, off);
952  cb = nlx_kcore_core_buffer_map(kb);
953  if (cb->cb_magic != 0 || cb->cb_buffer_id != 0 || cb->cb_kpvt != NULL) {
954  rc = -EBADR;
955  goto fail_cb;
956  }
957  rc = kd->kd_drv_ops->ko_buf_register(kd, p->dbr_buffer_id, cb, kb);
958  if (rc != 0)
959  goto fail_cb;
960 
961  p->dbr_bvec.ov_buf = buf;
962  p->dbr_bvec.ov_vec.v_count = count;
963  rc = nlx_kcore_buffer_uva_to_kiov(kb, &p->dbr_bvec);
964  p->dbr_bvec.ov_buf = NULL;
965  p->dbr_bvec.ov_vec.v_count = NULL;
966  if (rc != 0)
967  goto fail_kiov;
968 
969  M0_ASSERT(kb->kb_kiov != NULL && kb->kb_kiov_len > 0);
973  drv_bufs_tlist_add(&kd->kd_drv_bufs, kb);
975  m0_free(count);
976  m0_free(buf);
977  return 0;
978 
979 fail_kiov:
980  kd->kd_drv_ops->ko_buf_deregister(cb, kb);
981 fail_cb:
984 fail_page:
985  kb->kb_magic = 0;
986  m0_free(kb);
987 fail_copy:
988  m0_free(count);
989 fail_count:
990  m0_free(buf);
991  M0_ASSERT(rc < 0);
992  return M0_RC(rc);
993 }
994 
999 static void nlx_dev_buf_pages_unpin(const struct nlx_kcore_buffer *kb)
1000 {
1001  size_t i;
1002 
1003  for (i = 0; i < kb->kb_kiov_len; ++i)
1004  WRITABLE_USER_PAGE_PUT(kb->kb_kiov[i].kiov_page);
1005 }
1006 
1013  struct nlx_kcore_buffer *kb)
1014 {
1015  struct nlx_core_buffer *cb;
1016 
1017  if (!nlx_kcore_buffer_invariant(kb))
1018  return M0_ERR(-EBADR);
1020  drv_bufs_tlist_del(kb);
1023  cb = nlx_kcore_core_buffer_map(kb);
1024  kd->kd_drv_ops->ko_buf_deregister(cb, kb);
1027  m0_free(kb);
1028  return 0;
1029 }
1030 
1038 {
1039  struct nlx_kcore_buffer *kb = p->dbd_kb;
1040 
1041  /* protect against user space passing invalid ptr */
1042  if (!virt_addr_valid(kb))
1043  return M0_ERR(-EBADR);
1044  return nlx_dev_buf_deregister(kd, kb);
1045 }
1046 
1053  const struct m0_lnet_dev_buf_queue_params *p,
1055 {
1056  struct nlx_kcore_transfer_mc *ktm = p->dbq_ktm;
1057  struct nlx_kcore_buffer *kb = p->dbq_kb;
1058  struct nlx_core_buffer *cb;
1059  int rc;
1060 
1061  M0_PRE(op != NULL);
1062  if (!virt_addr_valid(ktm) || !virt_addr_valid(kb))
1063  return M0_ERR(-EBADR);
1064  if (!nlx_kcore_tm_invariant(ktm))
1065  return M0_ERR(-EBADR);
1066  if (!nlx_kcore_buffer_invariant(kb))
1067  return M0_ERR(-EBADR);
1068  cb = nlx_kcore_core_buffer_map(kb);
1069  if (!nlx_core_buffer_invariant(cb))
1070  rc = -EBADR;
1071  else
1072  rc = op(ktm, cb, kb);
1074 
1075  return M0_RC(rc);
1076 }
1077 
1083 static int nlx_dev_ioctl_buf_del(const struct nlx_kcore_domain *kd,
1084  const struct m0_lnet_dev_buf_queue_params *p)
1085 {
1086  struct nlx_kcore_transfer_mc *ktm = p->dbq_ktm;
1087  struct nlx_kcore_buffer *kb = p->dbq_kb;
1088 
1089  if (!virt_addr_valid(ktm) || !virt_addr_valid(kb))
1090  return M0_ERR(-EBADR);
1091  if (!nlx_kcore_tm_invariant(ktm))
1092  return M0_ERR(-EBADR);
1093  if (!nlx_kcore_buffer_invariant(kb))
1094  return M0_ERR(-EBADR);
1095  return kd->kd_drv_ops->ko_buf_del(ktm, kb);
1096 }
1097 
1104  const struct m0_lnet_dev_buf_event_wait_params *p)
1105 {
1106  struct nlx_kcore_transfer_mc *ktm = p->dbw_ktm;
1107  struct nlx_core_transfer_mc *ctm;
1108  int rc;
1109 
1110  if (!virt_addr_valid(ktm))
1111  return M0_ERR(-EBADR);
1113  ctm = nlx_kcore_core_tm_map(ktm);
1114  if (!nlx_core_tm_invariant(ctm))
1115  rc = M0_ERR(-EBADR);
1116  else
1117  rc = kd->kd_drv_ops->ko_buf_event_wait(ctm, ktm,
1118  p->dbw_timeout);
1120 
1121  return M0_RC(rc);
1122 }
1123 
1130 {
1131  return nlx_kcore_nidstr_decode(p->dn_buf, &p->dn_nid);
1132 }
1133 
1140 {
1141  return nlx_kcore_nidstr_encode(p->dn_nid, p->dn_buf);
1142 }
1143 
1155 {
1156  char **nidstrs;
1157  int rc = nlx_core_nidstrs_get(NULL, &nidstrs);
1158  char *buf;
1159  m0_bcount_t sz;
1160  int i;
1161 
1162  if (rc != 0)
1163  return M0_RC(rc);
1164  for (i = 0, sz = 1; nidstrs[i] != NULL; ++i)
1165  sz += strlen(nidstrs[i]) + 1;
1166  if (sz > p->dng_size) {
1167  nlx_core_nidstrs_put(NULL, &nidstrs);
1168  return M0_ERR_INFO(-EFBIG, "sz=%" PRIu64 " p->dng_size=%"PRIu64,
1169  sz, p->dng_size);
1170  }
1171  NLX_ALLOC(buf, sz);
1172  if (buf == NULL) {
1173  nlx_core_nidstrs_put(NULL, &nidstrs);
1174  return M0_ERR(-ENOMEM);
1175  }
1176  for (i = 0, sz = 0; nidstrs[i] != NULL; ++i) {
1177  strcpy(&buf[sz], nidstrs[i]);
1178  sz += strlen(nidstrs[i]) + 1;
1179  }
1180  nlx_core_nidstrs_put(NULL, &nidstrs);
1181  if (copy_to_user((void __user *) p->dng_buf, buf, sz))
1182  rc = -EFAULT;
1183  else
1184  rc = i;
1185  m0_free(buf);
1186 
1187  return M0_RC(rc);
1188 }
1189 
1199 {
1200  struct page *pg;
1201  struct nlx_core_transfer_mc *ctm;
1202  struct nlx_kcore_transfer_mc *ktm;
1203  unsigned long utmp = (unsigned long) p->dts_ctm;
1204  uint32_t off = NLX_PAGE_OFFSET(utmp);
1205  int rc;
1206 
1207  if (off + sizeof *ctm > PAGE_SIZE)
1208  return M0_ERR(-EBADR);
1209  NLX_ALLOC_PTR(ktm);
1210  if (ktm == NULL)
1211  return M0_ERR(-ENOMEM);
1213 
1214  down_read(&current->mm->mmap_sem);
1215  rc = WRITABLE_USER_PAGE_GET(utmp, pg);
1216  up_read(&current->mm->mmap_sem);
1217  if (rc < 0) {
1218  M0_LOG(M0_ERROR, "WRITABLE_USER_PAGE_GET() failed: rc=%d", rc);
1219  goto fail_page;
1220  }
1221  nlx_core_kmem_loc_set(&ktm->ktm_ctm_loc, pg, off);
1222  ctm = nlx_kcore_core_tm_map(ktm);
1223  if (ctm->ctm_magic != 0 || ctm->ctm_mb_counter != 0 ||
1224  ctm->ctm_kpvt != NULL) {
1225  rc = M0_ERR(-EBADR);
1226  goto fail_ctm;
1227  }
1228  rc = kd->kd_drv_ops->ko_tm_start(kd, ctm, ktm);
1229  if (rc != 0)
1230  goto fail_ctm;
1233  drv_tms_tlist_add(&kd->kd_drv_tms, ktm);
1235  return M0_RC(0);
1236 
1237 fail_ctm:
1240 fail_page:
1241  ktm->ktm_magic = 0;
1242  m0_free(ktm);
1243  M0_ASSERT(rc != 0);
1244  return M0_ERR(rc);
1245 }
1246 
1254 static int nlx_dev_tm_cleanup(struct nlx_kcore_domain *kd,
1255  struct nlx_kcore_transfer_mc *ktm)
1256 {
1257  struct nlx_kcore_buffer_event *kbev;
1258  struct nlx_core_transfer_mc *ctm;
1259 
1260  if (!nlx_kcore_tm_invariant(ktm))
1261  return M0_ERR(-EBADR);
1262  m0_tl_for(drv_bevs, &ktm->ktm_drv_bevs, kbev) {
1269  drv_bevs_tlist_del(kbev);
1271  drv_bevs_tlink_fini(kbev);
1272  m0_free(kbev);
1273  } m0_tl_endfor;
1275  drv_tms_tlist_del(ktm);
1277 
1278  ctm = nlx_kcore_core_tm_map(ktm);
1279  kd->kd_drv_ops->ko_tm_stop(ctm, ktm);
1282  m0_free(ktm);
1283  return 0;
1284 }
1285 
1292  struct m0_lnet_dev_tm_stop_params *p)
1293 {
1294  struct nlx_kcore_transfer_mc *ktm = p->dts_ktm;
1295 
1296  /* protect against user space passing invalid ptr */
1297  if (!virt_addr_valid(ktm))
1298  return M0_ERR(-EBADR);
1299  return nlx_dev_tm_cleanup(kd, ktm);
1300 }
1301 
1312 {
1313  struct page *pg;
1314  struct nlx_kcore_transfer_mc *ktm = p->dbb_ktm;
1315  struct nlx_kcore_buffer_event *kbe;
1316  struct nlx_core_buffer_event *cbe;
1317  uint32_t off = NLX_PAGE_OFFSET((unsigned long) p->dbb_bev);
1318  int rc;
1319 
1320  if (!virt_addr_valid(ktm))
1321  return M0_ERR(-EBADR);
1322  if (!nlx_kcore_tm_invariant(ktm))
1323  return M0_ERR(-EBADR);
1324  if (off + sizeof *cbe > PAGE_SIZE)
1325  return M0_ERR(-EBADR);
1326 
1327  NLX_ALLOC_PTR(kbe);
1328  if (kbe == NULL)
1329  return M0_ERR(-ENOMEM);
1330  drv_bevs_tlink_init(kbe);
1331 
1332  down_read(&current->mm->mmap_sem);
1333  rc = WRITABLE_USER_PAGE_GET(p->dbb_bev, pg);
1334  up_read(&current->mm->mmap_sem);
1335  if (rc < 0)
1336  goto fail_page;
1337  nlx_core_kmem_loc_set(&kbe->kbe_bev_loc, pg, off);
1338  cbe = nlx_kcore_core_bev_map(kbe);
1339  if (cbe->cbe_kpvt != NULL ||
1341  rc = -EBADR;
1342  goto fail_cbe;
1343  }
1344  bev_link_bless(&cbe->cbe_tm_link, pg);
1345  cbe->cbe_kpvt = kbe;
1348  drv_bevs_tlist_add(&ktm->ktm_drv_bevs, kbe);
1350  return 0;
1351 
1352 fail_cbe:
1355 fail_page:
1356  drv_bevs_tlink_fini(kbe);
1357  kbe->kbe_magic = 0;
1358  m0_free(kbe);
1359  M0_ASSERT(rc != 0);
1360  return M0_RC(rc);
1361 }
1362 
1372 static long nlx_dev_ioctl(struct file *file,
1373  unsigned int cmd, unsigned long arg)
1374 {
1375  struct nlx_kcore_domain *kd = file->private_data;
1376  union {
1377  struct m0_lnet_dev_dom_init_params dip;
1378  struct m0_lnet_dev_tm_start_params tsp;
1379  struct m0_lnet_dev_tm_stop_params tpp;
1382  struct m0_lnet_dev_buf_queue_params bqp;
1384  struct m0_lnet_dev_nid_encdec_params nep;
1385  struct m0_lnet_dev_nidstrs_get_params ngp;
1386  struct m0_lnet_dev_bev_bless_params bbp;
1387  } p;
1388  unsigned sz = _IOC_SIZE(cmd);
1389  int rc;
1390 
1393 
1394  if (_IOC_TYPE(cmd) != M0_LNET_IOC_MAGIC ||
1395  _IOC_NR(cmd) < M0_LNET_IOC_MIN_NR ||
1396  _IOC_NR(cmd) > M0_LNET_IOC_MAX_NR || sz > sizeof p) {
1397  rc = M0_ERR(-ENOTTY);
1398  goto done;
1399  }
1400 
1401  if (_IOC_DIR(cmd) & _IOC_WRITE) {
1402  if (copy_from_user(&p, (void __user *) arg, sz)) {
1403  rc = M0_ERR(-EFAULT);
1404  goto done;
1405  }
1406  }
1407 
1408  switch (cmd) {
1409  case M0_LNET_DOM_INIT:
1410  rc = nlx_dev_ioctl_dom_init(kd, &p.dip);
1411  break;
1412  case M0_LNET_TM_START:
1413  rc = nlx_dev_ioctl_tm_start(kd, &p.tsp);
1414  break;
1415  case M0_LNET_TM_STOP:
1416  rc = nlx_dev_ioctl_tm_stop(kd, &p.tpp);
1417  break;
1418  case M0_LNET_BUF_REGISTER:
1419  rc = nlx_dev_ioctl_buf_register(kd, &p.brp);
1420  break;
1422  rc = nlx_dev_ioctl_buf_deregister(kd, &p.bdp);
1423  break;
1424  case M0_LNET_BUF_MSG_RECV:
1427  break;
1428  case M0_LNET_BUF_MSG_SEND:
1431  break;
1435  break;
1439  break;
1443  break;
1447  break;
1448  case M0_LNET_BUF_DEL:
1449  rc = nlx_dev_ioctl_buf_del(kd, &p.bqp);
1450  break;
1452  rc = nlx_dev_ioctl_buf_event_wait(kd, &p.bep);
1453  break;
1454  case M0_LNET_BEV_BLESS:
1455  rc = nlx_dev_ioctl_bev_bless(kd, &p.bbp);
1456  break;
1457  case M0_LNET_NIDSTR_DECODE:
1459  break;
1460  case M0_LNET_NIDSTR_ENCODE:
1462  break;
1463  case M0_LNET_NIDSTRS_GET:
1464  rc = nlx_dev_ioctl_nidstrs_get(kd, &p.ngp);
1465  break;
1466  default:
1467  rc = M0_ERR(-ENOTTY);
1468  break;
1469  }
1470 
1471  if (rc >= 0 && (_IOC_DIR(cmd) & _IOC_READ)) {
1472  if (copy_to_user((void __user *) arg, &p, sz))
1473  rc = M0_ERR(-EFAULT);
1474  }
1475 
1476 done:
1477  if (rc < 0 && ergo(cmd == M0_LNET_BUF_EVENT_WAIT,
1478  !M0_IN(rc, (-ETIMEDOUT, -ERESTARTSYS))))
1479  M0_LOG(M0_ERROR, "cmd=0x%x rc=%d", cmd, rc);
1480  if (rc == -ERESTARTSYS)
1481  rc = -EINTR;
1482  return M0_RC(rc);
1483 }
1484 
1494 static int nlx_dev_open(struct inode *inode, struct file *file)
1495 {
1496  struct nlx_kcore_domain *kd;
1497  int rc;
1499 
1500  if (!(file->f_flags & O_RDWR))
1501  return M0_ERR(-EACCES);
1502 
1503  NLX_ALLOC_PTR(kd);
1504  if (kd == NULL)
1505  return M0_ERR(-ENOMEM);
1507  if (rc != 0) {
1508  m0_free(kd);
1509  } else {
1510  file->private_data = kd;
1511  }
1512  return M0_RC(rc);
1513 }
1514 
1527 M0_INTERNAL int nlx_dev_close(struct inode *inode, struct file *file)
1528 {
1529  struct nlx_kcore_domain *kd =
1530  (struct nlx_kcore_domain *) file->private_data;
1531  struct nlx_core_domain *cd;
1532  struct nlx_kcore_transfer_mc *ktm;
1533  struct nlx_kcore_buffer *kb;
1534  bool cleanup = false;
1535  int rc;
1537 
1539  file->private_data = NULL;
1540 
1541  /*
1542  * user program may not unmap all areas, eg if it was killed.
1543  * 1. Cancel all outstanding buffer operations.
1544  * 2. Clean up (stop, et al) all running TMs, this can take a while.
1545  * 3. De-register all buffers.
1546  */
1547  m0_tl_for(drv_bufs, &kd->kd_drv_bufs, kb) {
1548  ktm = kb->kb_ktm;
1549  if (ktm != NULL) {
1550  /*
1551  * Only LNetMDUnlink() causes nlx_kcore_LNetMDUnlink()
1552  * failure. That can happen if the operation completes
1553  * concurrently with the execution of this loop.
1554  * Such failures are OK in this context.
1555  */
1556  nlx_kcore_LNetMDUnlink(ktm, kb);
1557  }
1558  } m0_tl_endfor;
1559  m0_tl_for(drv_tms, &kd->kd_drv_tms, ktm) {
1560  /*
1561  * Wait until no more buffers are associated with this TM and
1562  * the event callback is no longer using the ktm. Must be in
1563  * the ktm-based loop because it needs to synchronize use of the
1564  * kb_ktm with the LNet event callback using the spinlock. Only
1565  * holds spinlock for extremely short periods to avoid seriously
1566  * blocking event callback. Depends on:
1567  * 1. There can be no other threads down-ing ktm_sem or adding
1568  * new buffers to queues while in nlx_dev_close().
1569  * 2. kb_ktm is set to NULL in the spinlock and before ktm_sem
1570  * is up'd.
1571  * 3. The final reference to the ktm in nlx_kcore_eq_cb() is to
1572  * unlock the spinlock, after up-ing ktm_sem.
1573  * 4. LNet events involving unlink are not dropped.
1574  */
1575  m0_tlist_for(&drv_bufs_tl, &kd->kd_drv_bufs, kb) {
1576  spin_lock(&ktm->ktm_bevq_lock);
1577  while (kb->kb_ktm == ktm) {
1578  spin_unlock(&ktm->ktm_bevq_lock);
1579  wait_event(ktm->ktm_wq, kb->kb_ktm == NULL);
1580  spin_lock(&ktm->ktm_bevq_lock);
1581  }
1582  spin_unlock(&ktm->ktm_bevq_lock);
1583  } m0_tlist_endfor;
1584 
1585  rc = nlx_dev_tm_cleanup(kd, ktm);
1586  M0_ASSERT(rc == 0);
1587  cleanup = true;
1588  } m0_tl_endfor;
1589  m0_tl_for(drv_bufs, &kd->kd_drv_bufs, kb) {
1590  rc = nlx_dev_buf_deregister(kd, kb);
1591  M0_ASSERT(rc == 0);
1592  cleanup = true;
1593  } m0_tl_endfor;
1594 
1595  if (cleanup)
1596  M0_LOG(M0_NOTICE, "close cleanup");
1597 
1598  /* user program may not successfully perform M0_NET_DOM_INIT ioctl */
1600  cd = nlx_kcore_core_domain_map(kd);
1601  kd->kd_drv_ops->ko_dom_fini(kd, cd);
1605  }
1606 
1608  m0_free(kd);
1609  return M0_RC(0);
1610 }
1611 
1613 static struct file_operations nlx_dev_file_ops = {
1614  .owner = THIS_MODULE,
1615  .unlocked_ioctl = nlx_dev_ioctl,
1616  .open = nlx_dev_open,
1617  .release = nlx_dev_close
1618 };
1619 
1626 static struct miscdevice nlx_dev = {
1627  .minor = MISC_DYNAMIC_MINOR,
1628  .name = M0_LNET_DEV,
1629  .fops = &nlx_dev_file_ops
1630 };
1631 static bool nlx_dev_registered = false;
1632  /* LNetDevInternal */
1634 
1640 M0_INTERNAL int nlx_dev_init(void)
1641 {
1642  int rc;
1643 
1644  rc = misc_register(&nlx_dev);
1645  if (rc != 0)
1646  return M0_RC(rc);
1647  nlx_dev_registered = true;
1648  printk("Motr %s registered with minor %d\n",
1649  nlx_dev.name, nlx_dev.minor);
1650  return M0_RC(rc);
1651 }
1652 
1653 M0_INTERNAL void nlx_dev_fini(void)
1654 {
1655  if (nlx_dev_registered) {
1656  misc_deregister(&nlx_dev);
1657  nlx_dev_registered = false;
1658  printk("Motr %s deregistered\n", nlx_dev.name);
1659  }
1660 }
1661  /* LNetDev */
1663 
1664 /*
1665  * Local variables:
1666  * c-indentation-style: "K&R"
1667  * c-basic-offset: 8
1668  * tab-width: 8
1669  * fill-column: 80
1670  * scroll-step: 1
1671  * End:
1672  */
static void nlx_kcore_core_domain_unmap(struct nlx_kcore_domain *kd)
Definition: klnet_utils.c:603
struct nlx_core_kmem_loc kd_cd_loc
Definition: klnet_core.h:95
static void nlx_kcore_kcore_dom_fini(struct nlx_kcore_domain *kd)
Definition: klnet_core.c:2078
size_t kb_kiov_len
Definition: klnet_core.h:191
static struct m0_addb2_philter p
Definition: consumer.c:40
static int nlx_dev_ioctl_nidstr_encode(struct m0_lnet_dev_nid_encdec_params *p)
Definition: klnet_drv.c:1139
#define M0_PRE(cond)
static struct nlx_core_transfer_mc * nlx_kcore_core_tm_map(struct nlx_kcore_transfer_mc *ktm)
Definition: klnet_utils.c:685
M0_INTERNAL void m0_mutex_unlock(struct m0_mutex *mutex)
Definition: mutex.c:66
M0_INTERNAL int nlx_dev_init(void)
Definition: klnet_drv.c:1640
struct nlx_core_bev_link cbe_tm_link
#define NULL
Definition: misc.h:38
#define M0_LNET_BUF_ACTIVE_SEND
Definition: lnet_ioctl.h:183
int(* nlx_kcore_queue_op_t)(struct nlx_kcore_transfer_mc *ktm, struct nlx_core_buffer *cb, struct nlx_kcore_buffer *kb)
Definition: klnet_core.h:257
nlx_kcore_queue_op_t ko_buf_passive_recv
Definition: klnet_core.h:361
#define ergo(a, b)
Definition: misc.h:293
static int nlx_dev_buf_deregister(struct nlx_kcore_domain *kd, struct nlx_kcore_buffer *kb)
Definition: klnet_drv.c:1012
M0_INTERNAL m0_bcount_t nlx_core_get_max_buffer_segment_size(struct nlx_core_domain *lcdom)
Definition: klnet_core.c:1226
nlx_kcore_queue_op_t ko_buf_msg_send
Definition: klnet_core.h:343
#define WRITABLE_USER_PAGE_GET(uaddr, pg)
Definition: klnet_drv.h:146
struct m0_file file
Definition: di.c:36
struct page * kl_page
#define M0_LOG(level,...)
Definition: trace.h:167
static int nlx_kcore_kcore_dom_init(struct nlx_kcore_domain *kd)
Definition: klnet_core.c:2061
static bool nlx_kcore_tm_invariant(const struct nlx_kcore_transfer_mc *kctm)
Definition: klnet_core.c:929
static bool nlx_core_buffer_invariant(const struct nlx_core_buffer *lcb)
Definition: lnet_core.c:149
#define M0_LNET_TM_STOP
Definition: lnet_ioctl.h:203
#define M0_LNET_BUF_MSG_SEND
Definition: lnet_ioctl.h:179
#define NLX_PAGE_OFFSET(addr)
Definition: klnet_core.h:404
#define M0_LNET_BUF_REGISTER
Definition: lnet_ioctl.h:173
static int nlx_dev_ioctl_dom_init(struct nlx_kcore_domain *kd, struct m0_lnet_dev_dom_init_params *p)
Definition: klnet_drv.c:853
static struct file_operations nlx_dev_file_ops
Definition: klnet_drv.c:1613
static struct nlx_core_domain * nlx_kcore_core_domain_map(struct nlx_kcore_domain *kd)
Definition: klnet_utils.c:582
#define M0_LNET_NIDSTR_ENCODE
Definition: lnet_ioctl.h:196
#define NLX_ALLOC_PTR(ptr)
Definition: lnet_core.h:638
void(* ko_dom_fini)(struct nlx_kcore_domain *kd, struct nlx_core_domain *cd)
Definition: klnet_core.h:283
uint64_t m0_bcount_t
Definition: types.h:77
static int nlx_dev_ioctl_buf_del(const struct nlx_kcore_domain *kd, const struct m0_lnet_dev_buf_queue_params *p)
Definition: klnet_drv.c:1083
#define PAGE_SIZE
Definition: lnet_ut.c:277
static int nlx_dev_ioctl_buf_deregister(struct nlx_kcore_domain *kd, struct m0_lnet_dev_buf_deregister_params *p)
Definition: klnet_drv.c:1036
static int void * buf
Definition: dir.c:1019
uint64_t kb_magic
Definition: klnet_core.h:160
M0_INTERNAL void m0_mutex_lock(struct m0_mutex *mutex)
Definition: mutex.c:49
static int nlx_kcore_buffer_uva_to_kiov(struct nlx_kcore_buffer *kb, const struct m0_bufvec *bvec)
Definition: klnet_vec.c:236
static struct nlx_core_buffer * nlx_kcore_core_buffer_map(struct nlx_kcore_buffer *kb)
Definition: klnet_utils.c:617
static int nlx_dev_ioctl_buf_event_wait(const struct nlx_kcore_domain *kd, const struct m0_lnet_dev_buf_event_wait_params *p)
Definition: klnet_drv.c:1103
int(* ko_buf_register)(struct nlx_kcore_domain *kd, nlx_core_opaque_ptr_t buffer_id, struct nlx_core_buffer *cb, struct nlx_kcore_buffer *kb)
Definition: klnet_core.h:294
Definition: sock.c:887
static m0_bcount_t count
Definition: xcode.c:167
void(* ko_tm_stop)(struct nlx_core_transfer_mc *ctm, struct nlx_kcore_transfer_mc *ktm)
Definition: klnet_core.h:330
struct inode * inode
Definition: dir.c:624
#define NLX_ALLOC_ARR(ptr, nr)
Definition: lnet_core.h:639
#define m0_tl_endfor
Definition: tlist.h:700
return M0_RC(rc)
op
Definition: libdemo.c:64
nlx_kcore_queue_op_t ko_buf_msg_recv
Definition: klnet_core.h:337
int(* ko_buf_event_wait)(struct nlx_core_transfer_mc *ctm, struct nlx_kcore_transfer_mc *ktm, m0_time_t timeout)
Definition: klnet_core.h:383
static int nlx_kcore_nidstr_decode(const char *nidstr, uint64_t *nid)
Definition: klnet_core.c:1702
static int nlx_kcore_nidstr_encode(uint64_t nid, char nidstr[M0_NET_LNET_NIDSTR_SIZE])
Definition: klnet_core.c:1721
#define M0_LNET_BUF_PASSIVE_SEND
Definition: lnet_ioctl.h:187
static int nlx_dev_ioctl_tm_start(struct nlx_kcore_domain *kd, struct m0_lnet_dev_tm_start_params *p)
Definition: klnet_drv.c:1197
int i
Definition: dir.c:1033
#define PRIu64
Definition: types.h:58
#define M0_LNET_BUF_DEREGISTER
Definition: lnet_ioctl.h:175
int(* ko_tm_start)(struct nlx_kcore_domain *kd, struct nlx_core_transfer_mc *ctm, struct nlx_kcore_transfer_mc *ktm)
Definition: klnet_core.h:320
#define M0_ERR_INFO(rc, fmt,...)
Definition: trace.h:215
static int nlx_dev_ioctl_tm_stop(struct nlx_kcore_domain *kd, struct m0_lnet_dev_tm_stop_params *p)
Definition: klnet_drv.c:1291
return M0_ERR(-EOPNOTSUPP)
M0_INTERNAL void nlx_dev_fini(void)
Definition: klnet_drv.c:1653
#define M0_LNET_BUF_DEL
Definition: lnet_ioctl.h:189
#define M0_ASSERT(cond)
static int nlx_dev_ioctl_buf_register(struct nlx_kcore_domain *kd, struct m0_lnet_dev_buf_register_params *p)
Definition: klnet_drv.c:901
M0_INTERNAL int nlx_dev_close(struct inode *inode, struct file *file)
Definition: klnet_drv.c:1527
static struct miscdevice nlx_dev
Definition: klnet_drv.c:1626
void(* ko_buf_deregister)(struct nlx_core_buffer *cb, struct nlx_kcore_buffer *kb)
Definition: klnet_core.h:304
M0_THREAD_ENTER
Definition: dir.c:336
nlx_kcore_queue_op_t ko_buf_passive_send
Definition: klnet_core.h:367
#define M0_LNET_NIDSTR_DECODE
Definition: lnet_ioctl.h:194
#define NLX_ALLOC(ptr, len)
Definition: lnet_core.h:637
static int nlx_dev_ioctl_nidstr_decode(struct m0_lnet_dev_nid_encdec_params *p)
Definition: klnet_drv.c:1129
M0_INTERNAL m0_bcount_t nlx_core_get_max_buffer_size(struct nlx_core_domain *lcdom)
Definition: klnet_core.c:1220
static void nlx_kcore_core_bev_unmap(struct nlx_kcore_buffer_event *kbe)
Definition: klnet_utils.c:671
M0_INTERNAL int32_t nlx_core_get_max_buffer_segments(struct nlx_core_domain *lcdom)
Definition: klnet_core.c:1237
#define M0_LNET_DEV
Definition: lnet_ioctl.h:164
#define M0_POST(cond)
nlx_kcore_queue_op_t ko_buf_active_send
Definition: klnet_core.h:355
static void nlx_core_kmem_loc_set(struct nlx_core_kmem_loc *loc, struct page *pg, uint32_t off)
Definition: klnet_core.c:1127
static void bev_link_bless(struct nlx_core_bev_link *ql, struct page *pg)
Definition: kbev_cqueue.c:92
M0_INTERNAL void nlx_core_nidstrs_put(struct nlx_core_domain *lcdom, char ***nidary)
Definition: klnet_core.c:1774
lnet_kiov_t * kb_kiov
Definition: klnet_core.h:188
struct m0_mutex kd_drv_mutex
Definition: klnet_core.h:98
static void nlx_kcore_core_tm_unmap(struct nlx_kcore_transfer_mc *ktm)
Definition: klnet_utils.c:705
static bool nlx_dev_registered
Definition: klnet_drv.c:1631
#define M0_LNET_BUF_EVENT_WAIT
Definition: lnet_ioctl.h:191
struct nlx_kcore_ops * kd_drv_ops
Definition: klnet_core.h:101
#define M0_LNET_BUF_PASSIVE_RECV
Definition: lnet_ioctl.h:185
int(* ko_buf_del)(struct nlx_kcore_transfer_mc *ktm, struct nlx_kcore_buffer *kb)
Definition: klnet_core.h:374
uint64_t n
Definition: fops.h:107
struct m0_tl kd_drv_tms
Definition: klnet_core.h:107
#define M0_LNET_TM_START
Definition: lnet_ioctl.h:201
#define M0_LNET_IOC_MAX_NR
Definition: lnet_ioctl.h:168
static int nlx_dev_open(struct inode *inode, struct file *file)
Definition: klnet_drv.c:1494
struct nlx_core_kmem_loc kb_cb_loc
Definition: klnet_core.h:163
static int nlx_dev_tm_cleanup(struct nlx_kcore_domain *kd, struct nlx_kcore_transfer_mc *ktm)
Definition: klnet_drv.c:1254
#define M0_LNET_NIDSTRS_GET
Definition: lnet_ioctl.h:198
static long nlx_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
Definition: klnet_drv.c:1372
#define m0_tlist_endfor
Definition: tlist.h:448
#define WRITABLE_USER_PAGE_PUT(pg)
Definition: klnet_drv.h:155
#define M0_LNET_DOM_INIT
Definition: lnet_ioctl.h:170
static bool nlx_core_tm_invariant(const struct nlx_core_transfer_mc *lctm)
Definition: lnet_core.c:120
static unsigned done
Definition: storage.c:91
#define m0_tlist_for(descr, head, obj)
Definition: tlist.h:435
#define M0_LNET_BEV_BLESS
Definition: lnet_ioctl.h:206
static int nlx_dev_ioctl_buf_queue_op(const struct m0_lnet_dev_buf_queue_params *p, nlx_kcore_queue_op_t op)
Definition: klnet_drv.c:1052
static void nlx_dev_buf_pages_unpin(const struct nlx_kcore_buffer *kb)
Definition: klnet_drv.c:999
#define M0_LNET_IOC_MAGIC
Definition: lnet_ioctl.h:166
struct nlx_core_kmem_loc kbe_bev_loc
Definition: klnet_core.h:236
static struct nlx_core_buffer_event * nlx_kcore_core_bev_map(struct nlx_kcore_buffer_event *kbe)
Definition: klnet_utils.c:651
static bool nlx_core_kmem_loc_is_empty(const struct nlx_core_kmem_loc *loc)
Definition: lnet_pvt.h:91
#define M0_LNET_IOC_MIN_NR
Definition: lnet_ioctl.h:167
#define M0_LNET_BUF_MSG_RECV
Definition: lnet_ioctl.h:177
struct m0_tl ktm_drv_bevs
Definition: klnet_core.h:136
int(* ko_dom_init)(struct nlx_kcore_domain *kd, struct nlx_core_domain *cd)
Definition: klnet_core.h:275
struct nlx_core_kmem_loc ktm_ctm_loc
Definition: klnet_core.h:124
M0_BASSERT(sizeof(struct nlx_xo_domain)< PAGE_SIZE)
#define m0_tl_for(name, head, obj)
Definition: tlist.h:695
void m0_free(void *data)
Definition: memory.c:146
static bool nlx_kcore_buffer_invariant(const struct nlx_kcore_buffer *kcb)
Definition: klnet_core.c:908
nlx_kcore_queue_op_t ko_buf_active_recv
Definition: klnet_core.h:349
static void nlx_kcore_core_buffer_unmap(struct nlx_kcore_buffer *kb)
Definition: klnet_utils.c:637
int32_t rc
Definition: trigger_fop.h:47
#define M0_LNET_BUF_ACTIVE_RECV
Definition: lnet_ioctl.h:181
static int nlx_dev_ioctl_nidstrs_get(struct nlx_kcore_domain *kd, struct m0_lnet_dev_nidstrs_get_params *p)
Definition: klnet_drv.c:1153
nlx_core_opaque_ptr_t cb_buffer_id
M0_INTERNAL int nlx_core_nidstrs_get(struct nlx_core_domain *lcdom, char ***nidary)
Definition: klnet_core.c:1744
struct m0_tl kd_drv_bufs
Definition: klnet_core.h:113
static bool nlx_kcore_domain_invariant(const struct nlx_kcore_domain *kd)
Definition: klnet_core.c:897
static int nlx_kcore_LNetMDUnlink(struct nlx_kcore_transfer_mc *kctm, struct nlx_kcore_buffer *kcb)
Definition: klnet_utils.c:433
static int nlx_dev_ioctl_bev_bless(struct nlx_kcore_domain *kd, struct m0_lnet_dev_bev_bless_params *p)
Definition: klnet_drv.c:1310