Motr  M0
klnet_core.c
Go to the documentation of this file.
1 /* -*- C -*- */
2 /*
3  * Copyright (c) 2012-2020 Seagate Technology LLC and/or its Affiliates
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  * For any questions about this software or licensing,
18  * please email opensource@seagate.com or cortx-questions@seagate.com.
19  *
20  */
21 
22 
787 /*
788  ******************************************************************************
789  End of DLD
790  ******************************************************************************
791  */
792 
793 #include "lib/mutex.h"
794 #include "lib/string.h" /* m0_streq */
796 
797 /* LNet API, LNET_NIDSTR_SIZE */
798 #if M0_LUSTRE_VERSION < 2110
799 #include <lnet/lnet.h>
800 #else
801 #include <lnet/nidstr.h>
802 #endif
803 
804 #if LUSTRE_VERSION_CODE >= OBD_OCD_VERSION(2, 7, 51, 0)
805 #include <lnet/api.h>
806 #include <lnet/lib-types.h>
807 #endif
808 
809 /* include local files */
812 
822 static struct m0_mutex nlx_kcore_mutex;
823 
825 static struct m0_tl nlx_kcore_tms;
826 
827 M0_TL_DESCR_DEFINE(tms, "nlx tms", static, struct nlx_kcore_transfer_mc,
828  ktm_tm_linkage, ktm_magic, M0_NET_LNET_KCORE_TM_MAGIC,
830 M0_TL_DEFINE(tms, static, struct nlx_kcore_transfer_mc);
831 
832 M0_TL_DESCR_DEFINE(drv_tms, "drv tms", static, struct nlx_kcore_transfer_mc,
833  ktm_drv_linkage, ktm_magic, M0_NET_LNET_KCORE_TM_MAGIC,
835 M0_TL_DEFINE(drv_tms, static, struct nlx_kcore_transfer_mc);
836 
837 M0_TL_DESCR_DEFINE(drv_bufs, "drv bufs", static, struct nlx_kcore_buffer,
838  kb_drv_linkage, kb_magic, M0_NET_LNET_KCORE_BUF_MAGIC,
840 M0_TL_DEFINE(drv_bufs, static, struct nlx_kcore_buffer);
841 
842 M0_TL_DESCR_DEFINE(drv_bevs, "drv bevs", static, struct nlx_kcore_buffer_event,
843  kbe_drv_linkage, kbe_magic, M0_NET_LNET_KCORE_BEV_MAGIC,
845 M0_TL_DEFINE(drv_bevs, static, struct nlx_kcore_buffer_event);
846 
847 /* assert the equivalence of LNet and Motr data types */
848 M0_BASSERT(sizeof(__u64) == sizeof(uint64_t));
849 
861  struct nlx_core_buffer *lcbuf,
862  struct nlx_kcore_buffer *kcb,
863  lnet_md_t *umd);
865  struct nlx_core_buffer *lcbuf,
866  struct nlx_kcore_buffer *kcb,
867  lnet_md_t *umd);
869  struct nlx_core_buffer *lcbuf,
870  struct nlx_kcore_buffer *kcb,
871  lnet_md_t *umd);
872 };
874 #define _NLXIS(s) ._##s = s
875 
879 
880 #undef _NLXI
881 };
882 
883 #define NLX_kcore_LNetMDAttach(ktm, lcbuf, kb, umd) \
884  (*nlx_kcore_iv._nlx_kcore_LNetMDAttach)(ktm, lcbuf, kb, umd)
885 #define NLX_kcore_LNetPut(ktm, lcbuf, kb, umd) \
886  (*nlx_kcore_iv._nlx_kcore_LNetPut)(ktm, lcbuf, kb, umd)
887 #define NLX_kcore_LNetGet(ktm, lcbuf, kb, umd) \
888  (*nlx_kcore_iv._nlx_kcore_LNetGet)(ktm, lcbuf, kb, umd)
889 
897 static bool nlx_kcore_domain_invariant(const struct nlx_kcore_domain *kd)
898 {
899  return kd != NULL && kd->kd_magic == M0_NET_LNET_KCORE_DOM_MAGIC &&
902 }
903 
908 static bool nlx_kcore_buffer_invariant(const struct nlx_kcore_buffer *kcb)
909 {
910  return _0C(kcb != NULL) &&
913 }
914 
919  const struct nlx_kcore_buffer_event *kbe)
920 {
921  return kbe != NULL && kbe->kbe_magic == M0_NET_LNET_KCORE_BEV_MAGIC &&
923 }
924 
929 static bool nlx_kcore_tm_invariant(const struct nlx_kcore_transfer_mc *kctm)
930 {
931  return kctm != NULL && kctm->ktm_magic == M0_NET_LNET_KCORE_TM_MAGIC &&
933 }
934 
939 static bool nlx_kcore_addr_in_use(struct nlx_core_ep_addr *cepa)
940 {
942 
943  return m0_tl_exists(tms, scan, &nlx_kcore_tms,
944  nlx_core_ep_eq(&scan->ktm_addr, cepa));
945 }
946 
954 {
955  int tmid = M0_NET_LNET_TMID_MAX;
956  struct nlx_kcore_transfer_mc *scan;
957  struct nlx_core_ep_addr *scanaddr;
959 
960  /* list is in descending order by tmid */
961  m0_tl_for(tms, &nlx_kcore_tms, scan) {
962  scanaddr = &scan->ktm_addr;
963  if (scanaddr->cepa_nid == cepa->cepa_nid &&
964  scanaddr->cepa_pid == cepa->cepa_pid &&
965  scanaddr->cepa_portal == cepa->cepa_portal) {
966  if (scanaddr->cepa_tmid == tmid)
967  --tmid;
968  else if (scanaddr->cepa_tmid < tmid)
969  break;
970  }
971  } m0_tl_endfor;
972  return tmid >= 0 ? tmid : M0_ERR(-EADDRNOTAVAIL);
973 }
974 
981 {
982  struct nlx_kcore_transfer_mc *scan;
983  struct nlx_core_ep_addr *scanaddr;
984  struct nlx_core_ep_addr *cepa = &kctm->ktm_addr;
986 
987  m0_tl_for(tms, &nlx_kcore_tms, scan) {
988  scanaddr = &scan->ktm_addr;
989  if (scanaddr->cepa_tmid <= cepa->cepa_tmid) {
990  tms_tlist_add_before(scan, kctm);
991  return;
992  }
993  } m0_tl_endfor;
994  tms_tlist_add_tail(&nlx_kcore_tms, kctm);
995 }
996 
1004 static void nlx_kcore_eq_cb(lnet_event_t *event)
1005 {
1006  struct nlx_kcore_buffer *kbp;
1007  struct nlx_kcore_transfer_mc *ktm;
1008  struct nlx_core_transfer_mc *ctm;
1009  struct nlx_core_bev_link *ql;
1010  struct nlx_core_buffer_event *bev;
1011  m0_time_t now = m0_time_now();
1012  bool is_unlinked = false;
1013  unsigned mlength;
1014  unsigned offset;
1015  int status;
1016 
1017  M0_PRE(event != NULL);
1018  if (event->type == LNET_EVENT_ACK) {
1019  /* we do not use ACK */
1020  NLXDBG(&nlx_debug, 1,
1021  nlx_kprint_lnet_event("nlx_kcore_eq_cb: filtered ACK",
1022  event));
1023  return;
1024  }
1025  kbp = event->md.user_ptr;
1027  ktm = kbp->kb_ktm;
1029 
1030  NLXDBGP(ktm, 1, "\t%p: eq_cb: %p %s U:%d S:%d T:%d buf:%lx\n",
1031  ktm, event, nlx_kcore_lnet_event_type_to_string(event->type),
1032  event->unlinked, event->status, event->md.threshold,
1033  (unsigned long) kbp->kb_buffer_id);
1034  NLXDBG(ktm, 2, nlx_kprint_lnet_event("eq_cb", event));
1035  NLXDBG(ktm, 3, nlx_kprint_kcore_tm("eq_cb", ktm));
1036 
1037  if (event->unlinked != 0) {
1038  LNetInvalidateMDHandle(&kbp->kb_mdh); /* Invalid use, but safe */
1039  /* kbp->kb_ktm = NULL set below */
1040  is_unlinked = true;
1041  }
1042  status = event->status;
1043  mlength = event->mlength;
1044  offset = event->offset;
1045 
1046  if (event->type == LNET_EVENT_SEND &&
1048  /* An LNetGet related event, normally ignored */
1049  if (!is_unlinked) {
1050  NLXDBGP(ktm, 1, "\t%p: ignored LNetGet() SEND\n", ktm);
1051  return;
1052  }
1053  /* An out-of-order SEND, or
1054  cancellation notification piggy-backed onto an in-order SEND.
1055  The only way to distinguish is from the value of the
1056  event->kb_ooo_reply field.
1057  */
1058  NLXDBGP(ktm, 1,
1059  "\t%p: LNetGet() SEND with unlinked: thr:%d ooo:%d\n",
1060  ktm, event->md.threshold, (int) kbp->kb_ooo_reply);
1061  if (status == 0) {
1062  if (!kbp->kb_ooo_reply)
1063  status = -ECANCELED;
1064  else { /* from earlier REPLY */
1065  mlength = kbp->kb_ooo_mlength;
1066  offset = kbp->kb_ooo_offset;
1067  status = kbp->kb_ooo_status;
1068  }
1069  }
1070  } else if (event->type == LNET_EVENT_UNLINK) {/* see nlx_core_buf_del */
1071  M0_ASSERT(is_unlinked);
1072  status = -ECANCELED;
1073  } else if (!is_unlinked) {
1074  NLXDBGP(ktm, 1, "\t%p: eq_cb: %p %s !unlinked Q=%d\n", ktm,
1075  event, nlx_kcore_lnet_event_type_to_string(event->type),
1076  kbp->kb_qtype);
1077  /* We may get REPLY before SEND, so ignore such events,
1078  but save the significant values for when the SEND arrives.
1079  */
1080  if (kbp->kb_qtype == M0_NET_QT_ACTIVE_BULK_RECV) {
1081  kbp->kb_ooo_reply = true;
1082  kbp->kb_ooo_mlength = mlength;
1083  kbp->kb_ooo_offset = offset;
1084  kbp->kb_ooo_status = status;
1085  return;
1086  }
1087  /* we don't expect anything other than receive messages */
1089  }
1090 
1091  spin_lock(&ktm->ktm_bevq_lock);
1092  ctm = nlx_kcore_core_tm_map_atomic(ktm);
1093  ql = bev_cqueue_pnext(&ctm->ctm_bevq);
1094  bev = container_of(ql, struct nlx_core_buffer_event, cbe_tm_link);
1095  bev->cbe_buffer_id = kbp->kb_buffer_id;
1096  bev->cbe_time = m0_time_sub(now, kbp->kb_add_time);
1097  bev->cbe_status = status;
1098  bev->cbe_length = mlength;
1099  bev->cbe_offset = offset;
1100  bev->cbe_unlinked = is_unlinked;
1101  if (event->hdr_data != 0) {
1102  bev->cbe_sender.cepa_nid = event->initiator.nid;
1103  bev->cbe_sender.cepa_pid = event->initiator.pid;
1104  nlx_kcore_hdr_data_decode(event->hdr_data,
1105  &bev->cbe_sender.cepa_portal,
1106  &bev->cbe_sender.cepa_tmid);
1107  } else
1108  M0_SET0(&bev->cbe_sender);
1109 
1110  /* Reset in spinlock to synchronize with driver nlx_dev_tm_cleanup() */
1111  if (is_unlinked)
1112  kbp->kb_ktm = NULL;
1113  bev_cqueue_put(&ctm->ctm_bevq, ql);
1115  spin_unlock(&ktm->ktm_bevq_lock);
1116 
1117  wake_up(&ktm->ktm_wq);
1118 }
1119 
1128  struct page *pg, uint32_t off)
1129 {
1130  M0_PRE(off < PAGE_SIZE && ergo(pg == NULL, off == 0));
1131  M0_PRE(loc != NULL);
1132 
1133  loc->kl_page = pg;
1134  loc->kl_offset = off;
1136 }
1137 
1138 M0_INTERNAL void *nlx_core_mem_alloc(size_t size, unsigned shift)
1139 {
1140  return m0_alloc(size);
1141 }
1142 
1143 M0_INTERNAL void nlx_core_mem_free(void *data, size_t size, unsigned shift)
1144 {
1145  m0_free(data);
1146 }
1147 
1158  struct nlx_core_domain *cd)
1159 {
1160  M0_PRE(kd != NULL && cd != NULL);
1163  cd->cd_kpvt = kd;
1164  return 0;
1165 }
1166 
1167 M0_INTERNAL int nlx_core_dom_init(struct m0_net_domain *dom,
1168  struct nlx_core_domain *cd)
1169 {
1170  struct nlx_kcore_domain *kd;
1171  int rc;
1172 
1173  M0_PRE(dom != NULL && cd != NULL);
1174  NLX_ALLOC_PTR(kd);
1175  if (kd == NULL)
1176  return M0_ERR(-ENOMEM);
1178  if (rc != 0)
1179  goto fail_free_kd;
1180  nlx_core_kmem_loc_set(&kd->kd_cd_loc, virt_to_page(cd),
1181  NLX_PAGE_OFFSET((unsigned long) cd));
1182  rc = nlx_kcore_core_dom_init(kd, cd);
1183  if (rc == 0)
1184  return 0;
1185 
1186  /* failed */
1188 fail_free_kd:
1189  m0_free(kd);
1190  M0_ASSERT(rc != 0);
1191  return M0_RC(rc);
1192 }
1193 
1201  struct nlx_core_domain *cd)
1202 {
1203  M0_PRE(cd != NULL && cd->cd_kpvt == kd);
1204  cd->cd_kpvt = NULL;
1205 }
1206 
1207 M0_INTERNAL void nlx_core_dom_fini(struct nlx_core_domain *cd)
1208 {
1209  struct nlx_kcore_domain *kd;
1210 
1211  M0_PRE(cd != NULL);
1212  kd = cd->cd_kpvt;
1214  nlx_kcore_core_dom_fini(kd, cd);
1217  m0_free(kd);
1218 }
1219 
1221  *lcdom)
1222 {
1223  return LNET_MAX_PAYLOAD;
1224 }
1225 
1228  *lcdom)
1229 {
1230  /* PAGE_SIZE limit applies only when LNET_MD_KIOV has been set in
1231  * lnet_md_t::options. There's no such limit in MD fragment size when
1232  * LNET_MD_IOVEC is set. DLD calls for only LNET_MD_KIOV to be used.
1233  */
1234  return PAGE_SIZE;
1235 }
1236 
1238  *lcdom)
1239 {
1240  return LNET_MAX_IOV;
1241 }
1242 
1252  nlx_core_opaque_ptr_t buffer_id,
1253  struct nlx_core_buffer *cb,
1254  struct nlx_kcore_buffer *kb)
1255 {
1257  drv_bufs_tlink_init(kb);
1258  kb->kb_ktm = NULL;
1259  kb->kb_buffer_id = buffer_id;
1260  kb->kb_kiov = NULL;
1261  kb->kb_kiov_len = 0;
1262  kb->kb_kiov_orig_len = 0;
1263  LNetInvalidateMDHandle(&kb->kb_mdh);
1264  kb->kb_ooo_reply = false;
1265  kb->kb_ooo_mlength = 0;
1266  kb->kb_ooo_status = 0;
1267  kb->kb_ooo_offset = 0;
1268 
1269  cb->cb_kpvt = kb;
1270  cb->cb_buffer_id = buffer_id;
1272 
1274  return 0;
1275 }
1276 
1283  struct nlx_kcore_buffer *kb)
1284 {
1286  M0_PRE(LNetMDHandleIsInvalid(kb->kb_mdh));
1287  drv_bufs_tlink_fini(kb);
1288  kb->kb_magic = 0;
1289  m0_free(kb->kb_kiov);
1290  cb->cb_buffer_id = 0;
1291  cb->cb_kpvt = NULL;
1292  cb->cb_magic = 0;
1293 }
1294 
1295 M0_INTERNAL int nlx_core_buf_register(struct nlx_core_domain *cd,
1296  nlx_core_opaque_ptr_t buffer_id,
1297  const struct m0_bufvec *bvec,
1298  struct nlx_core_buffer *cb)
1299 {
1300  int rc;
1301  struct nlx_kcore_buffer *kb;
1302  struct nlx_kcore_domain *kd;
1303 
1304  M0_PRE(cb != NULL && cb->cb_kpvt == NULL);
1305  M0_PRE(cd != NULL);
1306  kd = cd->cd_kpvt;
1307  NLX_ALLOC_PTR(kb);
1308  if (kb == NULL)
1309  return M0_ERR(-ENOMEM);
1310  nlx_core_kmem_loc_set(&kb->kb_cb_loc, virt_to_page(cb),
1311  NLX_PAGE_OFFSET((unsigned long) cb));
1312  rc = nlx_kcore_buf_register(kd, buffer_id, cb, kb);
1313  if (rc != 0)
1314  goto fail_free_kb;
1316  if (rc != 0)
1317  goto fail_buf_registered;
1318  M0_ASSERT(kb->kb_kiov != NULL && kb->kb_kiov_len > 0);
1319  M0_POST(nlx_kcore_buffer_invariant(cb->cb_kpvt));
1320  return 0;
1321 
1322 fail_buf_registered:
1323  nlx_kcore_buf_deregister(cb, kb);
1324 fail_free_kb:
1325  m0_free(kb);
1326  M0_ASSERT(rc != 0);
1327  return M0_RC(rc);
1328 }
1329 
1330 M0_INTERNAL void nlx_core_buf_deregister(struct nlx_core_domain *cd,
1331  struct nlx_core_buffer *cb)
1332 {
1333  struct nlx_kcore_buffer *kb;
1334 
1336  kb = cb->cb_kpvt;
1337  nlx_kcore_buf_deregister(cb, kb);
1338  m0_free(kb);
1339 }
1340 
1349  struct nlx_core_buffer *cb,
1350  struct nlx_kcore_buffer *kb)
1351 {
1352  lnet_md_t umd;
1353  int rc;
1354 
1359  M0_PRE(cb->cb_length > 0);
1360  M0_PRE(cb->cb_min_receive_size <= cb->cb_length);
1361  M0_PRE(cb->cb_max_operations > 0);
1362 
1363  nlx_kcore_umd_init(ktm, cb, kb, cb->cb_max_operations,
1364  cb->cb_min_receive_size, LNET_MD_OP_PUT,
1365  false, &umd);
1366  cb->cb_match_bits =
1368  cb->cb_addr = ktm->ktm_addr;
1369  rc = NLX_kcore_LNetMDAttach(ktm, cb, kb, &umd);
1370  return M0_RC(rc);
1371 }
1372 
1373 M0_INTERNAL int nlx_core_buf_msg_recv(struct nlx_core_domain *cd, /* not used */
1374  struct nlx_core_transfer_mc *ctm,
1375  struct nlx_core_buffer *cb)
1376 {
1377  struct nlx_kcore_transfer_mc *ktm;
1378  struct nlx_kcore_buffer *kb;
1379 
1381  ktm = ctm->ctm_kpvt;
1383  kb = cb->cb_kpvt;
1384  return nlx_kcore_buf_msg_recv(ktm, cb, kb);
1385 }
1386 
1395  struct nlx_core_buffer *cb,
1396  struct nlx_kcore_buffer *kb)
1397 {
1398  lnet_md_t umd;
1399  int rc;
1400 
1405  M0_PRE(cb->cb_length > 0);
1406  M0_PRE(cb->cb_max_operations == 1);
1407 
1408  nlx_kcore_umd_init(ktm, cb, kb, 1, 0, 0, false, &umd);
1409  nlx_kcore_kiov_adjust_length(ktm, kb, &umd, cb->cb_length);
1410  cb->cb_match_bits =
1412  rc = NLX_kcore_LNetPut(ktm, cb, kb, &umd);
1414  return M0_RC(rc);
1415 }
1416 
1417 M0_INTERNAL int nlx_core_buf_msg_send(struct nlx_core_domain *cd, /* not used */
1418  struct nlx_core_transfer_mc *ctm,
1419  struct nlx_core_buffer *cb)
1420 {
1421  struct nlx_kcore_transfer_mc *ktm;
1422  struct nlx_kcore_buffer *kb;
1423 
1425  ktm = ctm->ctm_kpvt;
1427  kb = cb->cb_kpvt;
1428  return nlx_kcore_buf_msg_send(ktm, cb, kb);
1429 }
1430 
1439  struct nlx_core_buffer *cb,
1440  struct nlx_kcore_buffer *kb)
1441 {
1442  uint32_t tmid;
1443  uint64_t counter;
1444  lnet_md_t umd;
1445  int rc;
1446 
1451  M0_PRE(cb->cb_length > 0);
1452  M0_PRE(cb->cb_max_operations == 1);
1453 
1454  M0_PRE(cb->cb_match_bits > 0);
1456  M0_PRE(tmid == cb->cb_addr.cepa_tmid);
1459 
1460  nlx_kcore_umd_init(ktm, cb, kb, 1, 0, 0, true, &umd);
1461  nlx_kcore_kiov_adjust_length(ktm, kb, &umd, cb->cb_length);
1462  rc = NLX_kcore_LNetGet(ktm, cb, kb, &umd);
1464  return M0_RC(rc);
1465 }
1466 
1467 M0_INTERNAL int nlx_core_buf_active_recv(struct nlx_core_domain *cd, /* not used */
1468  struct nlx_core_transfer_mc *ctm,
1469  struct nlx_core_buffer *cb)
1470 {
1471  struct nlx_kcore_transfer_mc *ktm;
1472  struct nlx_kcore_buffer *kb;
1473 
1475  ktm = ctm->ctm_kpvt;
1477  kb = cb->cb_kpvt;
1478  return nlx_kcore_buf_active_recv(ktm, cb, kb);
1479 }
1480 
1489  struct nlx_core_buffer *cb,
1490  struct nlx_kcore_buffer *kb)
1491 {
1492  uint32_t tmid;
1493  uint64_t counter;
1494  lnet_md_t umd;
1495  int rc;
1496 
1501  M0_PRE(cb->cb_length > 0);
1502  M0_PRE(cb->cb_max_operations == 1);
1503 
1504  M0_PRE(cb->cb_match_bits > 0);
1506  M0_PRE(tmid == cb->cb_addr.cepa_tmid);
1509 
1510  nlx_kcore_umd_init(ktm, cb, kb, 1, 0, 0, false, &umd);
1511  nlx_kcore_kiov_adjust_length(ktm, kb, &umd, cb->cb_length);
1512  rc = NLX_kcore_LNetPut(ktm, cb, kb, &umd);
1514  return M0_RC(rc);
1515 }
1516 
1517 M0_INTERNAL int nlx_core_buf_active_send(struct nlx_core_domain *cd, /* not used */
1518  struct nlx_core_transfer_mc *ctm,
1519  struct nlx_core_buffer *cb)
1520 {
1521  struct nlx_kcore_transfer_mc *ktm;
1522  struct nlx_kcore_buffer *kb;
1523 
1525  ktm = ctm->ctm_kpvt;
1527  kb = cb->cb_kpvt;
1528  return nlx_kcore_buf_active_send(ktm, cb, kb);
1529 }
1530 
1539  struct nlx_core_buffer *cb,
1540  struct nlx_kcore_buffer *kb)
1541 {
1542  uint32_t tmid;
1543  uint64_t counter;
1544  lnet_md_t umd;
1545  int rc;
1546 
1551  M0_PRE(cb->cb_length > 0);
1552  M0_PRE(cb->cb_max_operations == 1);
1553  M0_PRE(cb->cb_match_bits > 0);
1554 
1556  M0_PRE(tmid == ktm->ktm_addr.cepa_tmid);
1559 
1560  nlx_kcore_umd_init(ktm, cb, kb, 1, 0, LNET_MD_OP_PUT, false, &umd);
1561  cb->cb_addr = ktm->ktm_addr;
1562  rc = NLX_kcore_LNetMDAttach(ktm, cb, kb, &umd);
1563  return M0_RC(rc);
1564 }
1565 
1566 M0_INTERNAL int nlx_core_buf_passive_recv(struct nlx_core_domain *cd, /* not used */
1567  struct nlx_core_transfer_mc *ctm,
1568  struct nlx_core_buffer *cb)
1569 {
1570  struct nlx_kcore_transfer_mc *ktm;
1571  struct nlx_kcore_buffer *kb;
1572 
1574  ktm = ctm->ctm_kpvt;
1576  kb = cb->cb_kpvt;
1577  return nlx_kcore_buf_passive_recv(ktm, cb, kb);
1578 }
1579 
1588  struct nlx_core_buffer *cb,
1589  struct nlx_kcore_buffer *kb)
1590 {
1591  uint32_t tmid;
1592  uint64_t counter;
1593  lnet_md_t umd;
1594  int rc;
1595 
1600  M0_PRE(cb->cb_length > 0);
1601  M0_PRE(cb->cb_max_operations == 1);
1602  M0_PRE(cb->cb_match_bits > 0);
1603 
1605  M0_PRE(tmid == ktm->ktm_addr.cepa_tmid);
1608 
1609  nlx_kcore_umd_init(ktm, cb, kb, 1, 0, LNET_MD_OP_GET, false, &umd);
1610  nlx_kcore_kiov_adjust_length(ktm, kb, &umd, cb->cb_length);
1611  cb->cb_addr = ktm->ktm_addr;
1612  rc = NLX_kcore_LNetMDAttach(ktm, cb, kb, &umd);
1614  return M0_RC(rc);
1615 }
1616 
1617 M0_INTERNAL int nlx_core_buf_passive_send(struct nlx_core_domain *cd, /* not used */
1618  struct nlx_core_transfer_mc *ctm,
1619  struct nlx_core_buffer *cb)
1620 {
1621  struct nlx_kcore_transfer_mc *ktm;
1622  struct nlx_kcore_buffer *kb;
1623 
1625  ktm = ctm->ctm_kpvt;
1627  kb = cb->cb_kpvt;
1628  return nlx_kcore_buf_passive_send(ktm, cb, kb);
1629 }
1630 
1631 M0_INTERNAL int nlx_core_buf_del(struct nlx_core_domain *cd, /* not used */
1632  struct nlx_core_transfer_mc *ctm,
1633  struct nlx_core_buffer *cb)
1634 {
1635  struct nlx_kcore_transfer_mc *ktm;
1636  struct nlx_kcore_buffer *kb;
1637 
1639  ktm = ctm->ctm_kpvt;
1641  kb = cb->cb_kpvt;
1642 
1643  /* Subtle: Cancelling the MD associated with the buffer
1644  could result in a LNet UNLINK event if the buffer operation is
1645  terminated by LNet.
1646  The unlink bit is also set in other LNet events but does not
1647  signify cancel in those cases.
1648  */
1649  return nlx_kcore_LNetMDUnlink(ktm, kb);
1650 }
1651 
1659  struct nlx_kcore_transfer_mc *ktm,
1661 {
1662  int rc;
1663  struct timespec ts;
1664  m0_time_t now;
1665 
1668 
1669  if (!bev_cqueue_is_empty(&ctm->ctm_bevq))
1670  return M0_RC(0);
1671  else if (timeout == 0)
1672  return M0_RC(-ETIMEDOUT);
1673 
1674  now = m0_time_now();
1675  timeout = timeout > now ? m0_time_sub(timeout, now) : 0;
1676  ts.tv_sec = m0_time_seconds(timeout);
1677  ts.tv_nsec = m0_time_nanoseconds(timeout);
1678 
1679  rc = wait_event_interruptible_timeout(ktm->ktm_wq,
1680  !bev_cqueue_is_empty(&ctm->ctm_bevq), timespec_to_jiffies(&ts));
1681 
1682  return M0_RC(rc == 0 ? -ETIMEDOUT : rc < 0 ? rc : 0);
1683 }
1684 
1685 M0_INTERNAL int nlx_core_buf_event_wait(struct nlx_core_domain *cd,
1686  struct nlx_core_transfer_mc *ctm,
1688 {
1689  struct nlx_kcore_transfer_mc *ktm;
1690 
1692  ktm = ctm->ctm_kpvt;
1693  return nlx_kcore_buf_event_wait(ctm, ktm, timeout);
1694 }
1695 
1702 static int nlx_kcore_nidstr_decode(const char *nidstr, uint64_t *nid)
1703 {
1704  *nid = libcfs_str2nid(nidstr);
1705  if (*nid == LNET_NID_ANY)
1706  return M0_ERR_INFO(-EINVAL, "nidstr=%s", nidstr);
1707  return 0;
1708 }
1709 
1710 M0_INTERNAL int nlx_core_nidstr_decode(struct nlx_core_domain *lcdom, /* not used */
1711  const char *nidstr, uint64_t * nid)
1712 {
1713  return nlx_kcore_nidstr_decode(nidstr, nid);
1714 }
1715 
1721 static int nlx_kcore_nidstr_encode(uint64_t nid,
1722  char nidstr[M0_NET_LNET_NIDSTR_SIZE])
1723 {
1724  const char *cp = libcfs_nid2str(nid);
1725 
1726  M0_ASSERT(cp != NULL && *cp != 0);
1727  strncpy(nidstr, cp, M0_NET_LNET_NIDSTR_SIZE - 1);
1728  nidstr[M0_NET_LNET_NIDSTR_SIZE - 1] = 0;
1729  return 0;
1730 }
1731 
1732 M0_INTERNAL int nlx_core_nidstr_encode(struct nlx_core_domain *lcdom, /* not used */
1733  uint64_t nid,
1734  char nidstr[M0_NET_LNET_NIDSTR_SIZE])
1735 {
1736  return nlx_kcore_nidstr_encode(nid, nidstr);
1737 }
1738 
1744 M0_INTERNAL int nlx_core_nidstrs_get(struct nlx_core_domain *lcdom,
1745  char ***nidary)
1746 {
1747  lnet_process_id_t id;
1748  char *nidstr;
1749  int i;
1750  int j;
1751  int nr;
1752  int rc;
1753 
1754  for (nr = 0, rc = 0; rc != -ENOENT; ++nr)
1755  rc = LNetGetId(nr, &id);
1756  M0_ALLOC_ARR(*nidary, nr);
1757  if (*nidary == NULL)
1758  return M0_ERR(-ENOMEM);
1759  for (i = 0; i < nr - 1; ++i) {
1760  rc = LNetGetId(i, &id);
1761  M0_ASSERT(rc == 0);
1762  nidstr = libcfs_nid2str(id.nid);
1763  M0_ASSERT(nidstr != NULL);
1764  (*nidary)[i] = m0_strdup(nidstr);
1765  if ((*nidary)[i] == NULL) {
1766  for (j = 0; j < i; ++j)
1767  m0_free((*nidary)[j]);
1768  return M0_ERR(-ENOMEM);
1769  }
1770  }
1771  return M0_RC(0);
1772 }
1773 
1774 M0_INTERNAL void nlx_core_nidstrs_put(struct nlx_core_domain *lcdom,
1775  char ***nidary)
1776 {
1777  int i;
1778 
1779  for (i = 0; (*nidary)[i] != NULL; ++i)
1780  m0_free((*nidary)[i]);
1781  m0_free(*nidary);
1782  *nidary = NULL;
1783 }
1784 
1785 M0_INTERNAL int nlx_core_new_blessed_bev(struct nlx_core_domain *cd,
1786  struct nlx_core_transfer_mc *ctm,
1787  struct nlx_core_buffer_event **bevp)
1788 {
1789  struct nlx_core_buffer_event *bev;
1790  struct nlx_kcore_transfer_mc *ktm;
1791 
1792  ktm = ctm->ctm_kpvt;
1794 
1795  NLX_ALLOC_ALIGNED_PTR(bev);
1796  if (bev == NULL) {
1797  *bevp = NULL;
1798  return M0_ERR(-ENOMEM);
1799  }
1800  bev_link_bless(&bev->cbe_tm_link, virt_to_page(&bev->cbe_tm_link));
1801  *bevp = bev;
1802  return 0;
1803 }
1804 
1811 static void nlx_kcore_tm_stop(struct nlx_core_transfer_mc *ctm,
1812  struct nlx_kcore_transfer_mc *ktm)
1813 {
1814  int rc;
1815 
1817  M0_PRE(drv_bevs_tlist_is_empty(&ktm->ktm_drv_bevs));
1818 
1819  rc = LNetEQFree(ktm->ktm_eqh);
1820  M0_ASSERT(rc == 0);
1821 
1823  tms_tlist_del(ktm);
1825  drv_bevs_tlist_fini(&ktm->ktm_drv_bevs);
1826  drv_tms_tlink_fini(ktm);
1827  tms_tlink_fini(ktm);
1828  ktm->ktm_magic = 0;
1829  /* allow kernel cleanup even if core is invalid */
1830  if (nlx_core_tm_invariant(ctm))
1831  ctm->ctm_kpvt = NULL;
1832 }
1833 
1834 M0_INTERNAL void nlx_core_tm_stop(struct nlx_core_domain *cd,
1835  struct nlx_core_transfer_mc *ctm)
1836 {
1837  struct nlx_kcore_transfer_mc *ktm;
1838 
1840  ktm = ctm->ctm_kpvt;
1841  nlx_kcore_tm_stop(ctm, ktm);
1843  m0_free(ktm);
1844 }
1845 
1858 static int nlx_kcore_tm_start(struct nlx_kcore_domain *kd,
1859  struct nlx_core_transfer_mc *ctm,
1860  struct nlx_kcore_transfer_mc *ktm)
1861 {
1862  struct nlx_core_ep_addr *cepa;
1863  lnet_process_id_t id;
1864  int rc;
1865  int i;
1866 
1867  M0_ENTRY();
1868  M0_PRE(kd != NULL && ctm != NULL && ktm != NULL);
1869  cepa = &ctm->ctm_addr;
1870 
1871  /*
1872  * cepa_nid/cepa_pid must match a local NID/PID.
1873  * cepa_portal must be in range. cepa_tmid is checked below.
1874  */
1875  if (cepa->cepa_portal == LNET_RESERVED_PORTAL ||
1877  M0_LOG(M0_ERROR, "cepa_portal=%"PRIu32" "
1878  "LNET_RESERVED_PORTAL=%d M0_NET_LNET_MAX_PORTALS=%d",
1879  cepa->cepa_portal, LNET_RESERVED_PORTAL,
1881  rc = -EINVAL;
1882  goto fail;
1883  }
1884  for (i = 0; ; ++i) {
1885  rc = LNetGetId(i, &id);
1886  if (rc == -ENOENT)
1887  break;
1888  M0_ASSERT_INFO(rc == 0, "rc=%d", rc);
1889  if (id.nid == cepa->cepa_nid && id.pid == cepa->cepa_pid)
1890  break;
1891  }
1892  if (rc == -ENOENT)
1893  goto fail;
1894 
1895  rc = LNetEQAlloc(M0_NET_LNET_EQ_SIZE, nlx_kcore_eq_cb, &ktm->ktm_eqh);
1896  if (rc < 0) {
1897  M0_LOG(M0_ERROR, "LNetEQAlloc() failed: rc=%d", rc);
1898  goto fail;
1899  }
1900 
1902  if (cepa->cepa_tmid == M0_NET_LNET_TMID_INVALID) {
1903  rc = nlx_kcore_max_tmid_find(cepa);
1904  if (rc < 0) {
1906  goto fail_with_eq;
1907  }
1908  cepa->cepa_tmid = rc;
1909  } else if (cepa->cepa_tmid > M0_NET_LNET_TMID_MAX) {
1911  rc = M0_ERR(-EINVAL);
1912  goto fail_with_eq;
1913  } else if (nlx_kcore_addr_in_use(cepa)) {
1915  rc = M0_ERR(-EADDRINUSE);
1916  goto fail_with_eq;
1917  }
1918  ktm->ktm_addr = *cepa;
1919  tms_tlink_init(ktm);
1922 
1923  drv_tms_tlink_init(ktm);
1924  drv_bevs_tlist_init(&ktm->ktm_drv_bevs);
1926  spin_lock_init(&ktm->ktm_bevq_lock);
1927  init_waitqueue_head(&ktm->ktm_wq);
1928  ktm->_debug_ = ctm->_debug_;
1929  ctm->ctm_kpvt = ktm;
1932  return 0;
1933 
1934 fail_with_eq:
1935  i = LNetEQFree(ktm->ktm_eqh);
1936  M0_ASSERT(i == 0);
1937 fail:
1938  M0_ASSERT(rc != 0);
1939  return M0_ERR(rc);
1940 }
1941 
1942 M0_INTERNAL int nlx_core_tm_start(struct nlx_core_domain *cd,
1943  struct m0_net_transfer_mc *tm,
1944  struct nlx_core_transfer_mc *ctm)
1945 {
1946  struct nlx_kcore_domain *kd;
1947  struct nlx_core_buffer_event *e1;
1948  struct nlx_core_buffer_event *e2;
1949  struct nlx_kcore_transfer_mc *ktm;
1950  int rc;
1951 
1952  M0_PRE(m0_mutex_is_locked(&tm->ntm_mutex));
1953  M0_PRE(nlx_tm_invariant(tm));
1954  M0_PRE(cd != NULL);
1955  kd = cd->cd_kpvt;
1957 
1958  NLX_ALLOC_PTR(ktm);
1959  if (ktm == NULL) {
1960  rc = M0_ERR(-ENOMEM);
1961  goto fail_ktm;
1962  }
1963 
1964  nlx_core_kmem_loc_set(&ktm->ktm_ctm_loc, virt_to_page(ctm),
1965  NLX_PAGE_OFFSET((unsigned long) ctm));
1966  rc = nlx_kcore_tm_start(kd, ctm, ktm);
1967  if (rc != 0)
1968  goto fail_ktm;
1969 
1970  ctm->ctm_upvt = NULL;
1971  rc = nlx_core_new_blessed_bev(cd, ctm, &e1);
1972  if (rc == 0)
1973  rc = nlx_core_new_blessed_bev(cd, ctm, &e2);
1974  if (rc != 0)
1975  goto fail_blessed_bev;
1976  M0_ASSERT(e1 != NULL && e2 != NULL);
1977  bev_cqueue_init(&ctm->ctm_bevq, &e1->cbe_tm_link, &e2->cbe_tm_link);
1978  M0_ASSERT(bev_cqueue_is_empty(&ctm->ctm_bevq));
1979  return 0;
1980 
1981  fail_blessed_bev:
1982  if (e1 != NULL)
1984  nlx_kcore_tm_stop(ctm, ktm);
1985  fail_ktm:
1986  m0_free(ktm);
1987  M0_ASSERT(rc != 0);
1988  return M0_RC(rc);
1989 }
1990 
1991 static void nlx_core_fini(void)
1992 {
1993  int rc;
1994 
1995  nlx_dev_fini();
1996  tms_tlist_fini(&nlx_kcore_tms);
1998  rc = LNetNIFini();
1999  M0_ASSERT(rc == 0);
2000 }
2001 
2002 static int nlx_core_init(void)
2003 {
2004  int rc;
2005  /*
2006  * Temporarily reset current->journal_info, because LNetNIInit assumes
2007  * it is NULL.
2008  */
2009  struct m0_thread_tls *tls = m0_thread_tls_pop();
2010 
2011  /*
2012  * Init LNet with same PID as Lustre would use in case we are first.
2013  * Depending on the lustre version, the PID symbol may be called
2014  * LUSTRE_SRV_LNET_PID or LNET_PID_LUSTRE.
2015  */
2016 #ifdef LNET_PID_LUSTRE
2017  rc = LNetNIInit(LNET_PID_LUSTRE);
2018 #else
2019  rc = LNetNIInit(LUSTRE_SRV_LNET_PID);
2020 #endif
2022  if (rc < 0)
2023  return M0_RC(rc);
2024 
2026  tms_tlist_init(&nlx_kcore_tms);
2027 
2028  rc = nlx_dev_init();
2029  if (rc != 0)
2030  nlx_core_fini();
2031 
2032  return M0_RC(rc);
2033 }
2034 
2037  .ko_dom_fini = nlx_kcore_core_dom_fini,
2038  .ko_buf_register = nlx_kcore_buf_register,
2039  .ko_buf_deregister = nlx_kcore_buf_deregister,
2040  .ko_tm_start = nlx_kcore_tm_start,
2041  .ko_tm_stop = nlx_kcore_tm_stop,
2042  .ko_buf_msg_recv = nlx_kcore_buf_msg_recv,
2043  .ko_buf_msg_send = nlx_kcore_buf_msg_send,
2044  .ko_buf_active_recv = nlx_kcore_buf_active_recv,
2045  .ko_buf_active_send = nlx_kcore_buf_active_send,
2046  .ko_buf_passive_recv = nlx_kcore_buf_passive_recv,
2047  .ko_buf_passive_send = nlx_kcore_buf_passive_send,
2048  .ko_buf_del = nlx_kcore_LNetMDUnlink,
2049  .ko_buf_event_wait = nlx_kcore_buf_event_wait,
2050 };
2051 
2062 {
2063  M0_PRE(kd != NULL);
2068  drv_tms_tlist_init(&kd->kd_drv_tms);
2069  drv_bufs_tlist_init(&kd->kd_drv_bufs);
2071  return 0;
2072 }
2073 
2079 {
2082 
2083  drv_bufs_tlist_fini(&kd->kd_drv_bufs);
2084  drv_tms_tlist_fini(&kd->kd_drv_tms);
2085  kd->kd_drv_ops = NULL;
2087 }
2088  /* KLNetCore */
2090 
2091 /*
2092  * Local variables:
2093  * c-indentation-style: "K&R"
2094  * c-basic-offset: 8
2095  * tab-width: 8
2096  * fill-column: 79
2097  * scroll-step: 1
2098  * End:
2099  */
int(* _nlx_kcore_LNetMDAttach)(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, lnet_md_t *umd)
Definition: klnet_core.c:860
uint64_t id
Definition: cob.h:2380
static void nlx_kcore_hdr_data_decode(uint64_t hdr_data, uint32_t *portal, uint32_t *tmid)
Definition: klnet_utils.c:229
static uint32_t nlx_core_kmem_loc_checksum(const struct nlx_core_kmem_loc *loc)
Definition: lnet_core.c:155
struct nlx_core_kmem_loc kd_cd_loc
Definition: klnet_core.h:95
static void nlx_kcore_kiov_restore_length(struct nlx_kcore_buffer *kcb)
Definition: klnet_utils.c:352
static void nlx_kcore_kcore_dom_fini(struct nlx_kcore_domain *kd)
Definition: klnet_core.c:2078
size_t kb_kiov_len
Definition: klnet_core.h:191
static size_t nr
Definition: dump.c:1505
#define NLX_kcore_LNetMDAttach(ktm, lcbuf, kb, umd)
Definition: klnet_core.c:883
M0_INTERNAL void m0_thread_tls_back(struct m0_thread_tls *tls)
Definition: kthread.c:282
#define M0_PRE(cond)
static int nlx_kcore_buf_passive_send(struct nlx_kcore_transfer_mc *ktm, struct nlx_core_buffer *cb, struct nlx_kcore_buffer *kb)
Definition: klnet_core.c:1587
#define M0_ALLOC_ARR(arr, nr)
Definition: memory.h:84
M0_INTERNAL void m0_mutex_unlock(struct m0_mutex *mutex)
Definition: mutex.c:66
M0_INTERNAL int nlx_core_buf_msg_recv(struct nlx_core_domain *cd, struct nlx_core_transfer_mc *ctm, struct nlx_core_buffer *cb)
Definition: klnet_core.c:1373
#define m0_strdup(s)
Definition: string.h:43
M0_INTERNAL int nlx_dev_init(void)
Definition: klnet_drv.c:1640
M0_BASSERT(sizeof(__u64)==sizeof(uint64_t))
unsigned kb_kiov_orig_len
Definition: klnet_core.h:203
M0_INTERNAL int nlx_core_buf_del(struct nlx_core_domain *cd, struct nlx_core_transfer_mc *ctm, struct nlx_core_buffer *cb)
Definition: klnet_core.c:1631
struct nlx_core_bev_link cbe_tm_link
#define NULL
Definition: misc.h:38
M0_INTERNAL void nlx_core_dom_fini(struct nlx_core_domain *cd)
Definition: klnet_core.c:1207
#define ergo(a, b)
Definition: misc.h:293
M0_INTERNAL m0_bcount_t nlx_core_get_max_buffer_segment_size(struct nlx_core_domain *lcdom)
Definition: klnet_core.c:1226
lnet_handle_eq_t ktm_eqh
Definition: klnet_core.h:151
struct nlx_core_ep_addr cb_addr
static struct nlx_kcore_ops nlx_kcore_def_ops
Definition: klnet_core.c:2035
static struct nlx_kcore_interceptable_subs nlx_kcore_iv
Definition: klnet_core.c:873
static void bev_cqueue_put(struct nlx_core_bev_cqueue *q, struct nlx_core_bev_link *p)
Definition: kbev_cqueue.c:69
uint64_t m0_time_t
Definition: time.h:37
struct page * kl_page
#define M0_LOG(level,...)
Definition: trace.h:167
static int nlx_kcore_kcore_dom_init(struct nlx_kcore_domain *kd)
Definition: klnet_core.c:2061
static void nlx_kcore_umd_init(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, int threshold, int max_size, unsigned options, bool isLNetGetOp, lnet_md_t *umd)
Definition: klnet_utils.c:255
static void nlx_core_match_bits_decode(uint64_t mb, uint32_t *tmid, uint64_t *counter)
Definition: lnet_core.c:258
static bool nlx_kcore_tm_invariant(const struct nlx_kcore_transfer_mc *kctm)
Definition: klnet_core.c:929
static bool nlx_core_buffer_invariant(const struct nlx_core_buffer *lcb)
Definition: lnet_core.c:149
#define NLXDBG(ptr, dbg, stmt)
Definition: lnet_main.c:877
int(* _nlx_kcore_LNetGet)(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, lnet_md_t *umd)
Definition: klnet_core.c:868
#define NLX_PAGE_OFFSET(addr)
Definition: klnet_core.h:404
static void nlx_kcore_core_tm_unmap_atomic(struct nlx_core_transfer_mc *ctm)
Definition: klnet_utils.c:749
uint64_t m0_time_nanoseconds(const m0_time_t time)
Definition: time.c:89
wait_queue_head_t ktm_wq
Definition: klnet_core.h:148
static void nlx_core_fini(void)
Definition: klnet_core.c:1991
static int nlx_kcore_max_tmid_find(struct nlx_core_ep_addr *cepa)
Definition: klnet_core.c:953
struct m0_bufvec data
Definition: di.c:40
unsigned kb_ooo_mlength
Definition: klnet_core.h:214
static void bev_cqueue_init(struct nlx_core_bev_cqueue *q, struct nlx_core_bev_link *ql1, struct nlx_core_bev_link *ql2)
Definition: bev_cqueue.c:658
nlx_core_opaque_ptr_t kb_buffer_id
Definition: klnet_core.h:176
#define NLX_ALLOC_PTR(ptr)
Definition: lnet_core.h:638
static int nlx_kcore_core_dom_init(struct nlx_kcore_domain *kd, struct nlx_core_domain *cd)
Definition: klnet_core.c:1157
static void nlx_kcore_core_dom_fini(struct nlx_kcore_domain *kd, struct nlx_core_domain *cd)
Definition: klnet_core.c:1200
uint64_t m0_bcount_t
Definition: types.h:77
lnet_handle_md_t kb_mdh
Definition: klnet_core.h:206
#define PAGE_SIZE
Definition: lnet_ut.c:277
uint64_t kb_magic
Definition: klnet_core.h:160
#define container_of(ptr, type, member)
Definition: misc.h:33
#define M0_SET0(obj)
Definition: misc.h:64
M0_INTERNAL void m0_mutex_lock(struct m0_mutex *mutex)
Definition: mutex.c:49
static void nlx_kcore_tm_stop(struct nlx_core_transfer_mc *ctm, struct nlx_kcore_transfer_mc *ktm)
Definition: klnet_core.c:1811
static int nlx_kcore_LNetMDAttach(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, lnet_md_t *umd)
Definition: klnet_utils.c:377
M0_INTERNAL int nlx_core_nidstr_encode(struct nlx_core_domain *lcdom, uint64_t nid, char nidstr[M0_NET_LNET_NIDSTR_SIZE])
Definition: klnet_core.c:1732
M0_INTERNAL void nlx_core_mem_free(void *data, size_t size, unsigned shift)
Definition: klnet_core.c:1143
M0_INTERNAL int nlx_core_buf_active_recv(struct nlx_core_domain *cd, struct nlx_core_transfer_mc *ctm, struct nlx_core_buffer *cb)
Definition: klnet_core.c:1467
M0_INTERNAL int nlx_core_new_blessed_bev(struct nlx_core_domain *cd, struct nlx_core_transfer_mc *ctm, struct nlx_core_buffer_event **bevp)
Definition: klnet_core.c:1785
#define m0_tl_endfor
Definition: tlist.h:700
return M0_RC(rc)
uint64_t nlx_core_opaque_ptr_t
static int nlx_kcore_buffer_kla_to_kiov(struct nlx_kcore_buffer *kb, const struct m0_bufvec *bvec)
Definition: klnet_vec.c:133
static struct nlx_core_bev_link * bev_cqueue_pnext(const struct nlx_core_bev_cqueue *q)
Definition: kbev_cqueue.c:40
#define M0_ENTRY(...)
Definition: trace.h:170
static int nlx_kcore_nidstr_decode(const char *nidstr, uint64_t *nid)
Definition: klnet_core.c:1702
static void nlx_kcore_buf_deregister(struct nlx_core_buffer *cb, struct nlx_kcore_buffer *kb)
Definition: klnet_core.c:1282
static int nlx_kcore_nidstr_encode(uint64_t nid, char nidstr[M0_NET_LNET_NIDSTR_SIZE])
Definition: klnet_core.c:1721
int i
Definition: dir.c:1033
struct nlx_core_ep_addr ctm_addr
#define M0_ERR_INFO(rc, fmt,...)
Definition: trace.h:215
return M0_ERR(-EOPNOTSUPP)
uint64_t kd_magic
Definition: klnet_core.h:92
M0_INTERNAL void nlx_dev_fini(void)
Definition: klnet_drv.c:1653
int(* _nlx_kcore_LNetPut)(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, lnet_md_t *umd)
Definition: klnet_core.c:864
static bool nlx_kcore_addr_in_use(struct nlx_core_ep_addr *cepa)
Definition: klnet_core.c:939
static struct m0_mutex nlx_kcore_mutex
Definition: klnet_core.c:822
#define M0_ASSERT(cond)
M0_INTERNAL bool m0_mutex_is_locked(const struct m0_mutex *mutex)
Definition: mutex.c:95
struct nlx_core_bev_cqueue ctm_bevq
m0_time_t m0_time_now(void)
Definition: time.c:134
static int nlx_kcore_buf_passive_recv(struct nlx_kcore_transfer_mc *ktm, struct nlx_core_buffer *cb, struct nlx_kcore_buffer *kb)
Definition: klnet_core.c:1538
m0_bcount_t cb_min_receive_size
static int nlx_kcore_buf_msg_recv(struct nlx_kcore_transfer_mc *ktm, struct nlx_core_buffer *cb, struct nlx_kcore_buffer *kb)
Definition: klnet_core.c:1348
static int counter
Definition: mutex.c:32
Definition: tlist.h:251
nlx_core_opaque_ptr_t cbe_buffer_id
static struct m0_bufvec bvec
Definition: xcode.c:169
static int nlx_kcore_tm_start(struct nlx_kcore_domain *kd, struct nlx_core_transfer_mc *ctm, struct nlx_kcore_transfer_mc *ktm)
Definition: klnet_core.c:1858
static bool nlx_kcore_buffer_event_invariant(const struct nlx_kcore_buffer_event *kbe)
Definition: klnet_core.c:918
static struct m0_stob_domain * dom
Definition: storage.c:38
unsigned kb_ooo_offset
Definition: klnet_core.h:224
M0_INTERNAL void nlx_core_tm_stop(struct nlx_core_domain *cd, struct nlx_core_transfer_mc *ctm)
Definition: klnet_core.c:1834
M0_INTERNAL m0_bcount_t nlx_core_get_max_buffer_size(struct nlx_core_domain *lcdom)
Definition: klnet_core.c:1220
M0_INTERNAL struct m0_thread_tls * m0_thread_tls_pop(void)
Definition: kthread.c:274
M0_INTERNAL int32_t nlx_core_get_max_buffer_segments(struct nlx_core_domain *lcdom)
Definition: klnet_core.c:1237
#define NLX_ALLOC_ALIGNED_PTR(ptr)
Definition: lnet_core.h:630
void * m0_alloc(size_t size)
Definition: memory.c:126
spinlock_t ktm_bevq_lock
Definition: klnet_core.h:145
static bool nlx_tm_invariant(const struct m0_net_transfer_mc *tm)
M0_INTERNAL void m0_mutex_init(struct m0_mutex *mutex)
Definition: mutex.c:35
M0_INTERNAL int nlx_core_buf_active_send(struct nlx_core_domain *cd, struct nlx_core_transfer_mc *ctm, struct nlx_core_buffer *cb)
Definition: klnet_core.c:1517
#define M0_POST(cond)
static bool bev_cqueue_is_empty(const struct nlx_core_bev_cqueue *q)
Definition: bev_cqueue.c:702
static __thread struct m0_thread_tls * tls
Definition: uthread.c:66
M0_INTERNAL void nlx_core_buf_deregister(struct nlx_core_domain *cd, struct nlx_core_buffer *cb)
Definition: klnet_core.c:1330
static void nlx_core_kmem_loc_set(struct nlx_core_kmem_loc *loc, struct page *pg, uint32_t off)
Definition: klnet_core.c:1127
static m0_bindex_t offset
Definition: dump.c:173
M0_INTERNAL int nlx_core_buf_register(struct nlx_core_domain *cd, nlx_core_opaque_ptr_t buffer_id, const struct m0_bufvec *bvec, struct nlx_core_buffer *cb)
Definition: klnet_core.c:1295
static void bev_link_bless(struct nlx_core_bev_link *ql, struct page *pg)
Definition: kbev_cqueue.c:92
static int nlx_kcore_LNetGet(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, lnet_md_t *umd)
Definition: klnet_utils.c:523
M0_INTERNAL void * nlx_core_mem_alloc(size_t size, unsigned shift)
Definition: klnet_core.c:1138
M0_INTERNAL void nlx_core_nidstrs_put(struct nlx_core_domain *lcdom, char ***nidary)
Definition: klnet_core.c:1774
uint32_t cb_max_operations
uint64_t m0_time_seconds(const m0_time_t time)
Definition: time.c:83
struct nlx_core_ep_addr cbe_sender
struct nlx_core_ep_addr ktm_addr
Definition: klnet_core.h:139
lnet_kiov_t * kb_kiov
Definition: klnet_core.h:188
static void nlx_kcore_kiov_adjust_length(struct nlx_kcore_transfer_mc *ktm, struct nlx_kcore_buffer *kcb, lnet_md_t *umd, m0_bcount_t bytes)
Definition: klnet_utils.c:314
struct nlx_kcore_transfer_mc * kb_ktm
Definition: klnet_core.h:166
struct m0_mutex kd_drv_mutex
Definition: klnet_core.h:98
#define NLX_kcore_LNetPut(ktm, lcbuf, kb, umd)
Definition: klnet_core.c:885
static bool nlx_core_ep_eq(const struct nlx_core_ep_addr *cep1, const struct nlx_core_ep_addr *cep2)
Definition: lnet_core.h:537
static uint32_t timeout
Definition: console.c:52
static int nlx_kcore_LNetPut(struct nlx_kcore_transfer_mc *kctm, struct nlx_core_buffer *lcbuf, struct nlx_kcore_buffer *kcb, lnet_md_t *umd)
Definition: klnet_utils.c:461
uint64_t cb_match_bits
struct nlx_kcore_ops * kd_drv_ops
Definition: klnet_core.h:101
M0_INTERNAL int nlx_core_buf_passive_recv(struct nlx_core_domain *cd, struct nlx_core_transfer_mc *ctm, struct nlx_core_buffer *cb)
Definition: klnet_core.c:1566
#define PRIu32
Definition: types.h:66
static uint8_t fail[DATA_UNIT_COUNT_MAX+PARITY_UNIT_COUNT_MAX]
M0_INTERNAL int nlx_core_dom_init(struct m0_net_domain *dom, struct nlx_core_domain *cd)
Definition: klnet_core.c:1167
M0_INTERNAL int nlx_core_buf_passive_send(struct nlx_core_domain *cd, struct nlx_core_transfer_mc *ctm, struct nlx_core_buffer *cb)
Definition: klnet_core.c:1617
static int nlx_kcore_buf_register(struct nlx_kcore_domain *kd, nlx_core_opaque_ptr_t buffer_id, struct nlx_core_buffer *cb, struct nlx_kcore_buffer *kb)
Definition: klnet_core.c:1251
m0_time_t kb_add_time
Definition: klnet_core.h:185
static struct m0_tl nlx_kcore_tms
Definition: klnet_core.c:825
struct m0_tl kd_drv_tms
Definition: klnet_core.h:107
static struct nlx_core_transfer_mc * nlx_kcore_core_tm_map_atomic(struct nlx_kcore_transfer_mc *ktm)
Definition: klnet_utils.c:722
static void bev_cqueue_fini(struct nlx_core_bev_cqueue *q, void(*free_cb)(struct nlx_core_bev_link *))
Definition: bev_cqueue.c:682
struct nlx_core_kmem_loc kb_cb_loc
Definition: klnet_core.h:163
#define NLXDBGP(ptr, dbg, fmt,...)
Definition: lnet_main.c:879
m0_bcount_t cb_length
static int nlx_kcore_buf_event_wait(struct nlx_core_transfer_mc *ctm, struct nlx_kcore_transfer_mc *ktm, m0_time_t timeout)
Definition: klnet_core.c:1658
m0_time_t m0_time_sub(const m0_time_t t1, const m0_time_t t2)
Definition: time.c:65
static void nlx_kcore_tms_list_add(struct nlx_kcore_transfer_mc *kctm)
Definition: klnet_core.c:980
static int nlx_kcore_buf_active_send(struct nlx_kcore_transfer_mc *ktm, struct nlx_core_buffer *cb, struct nlx_kcore_buffer *kb)
Definition: klnet_core.c:1488
m0_bcount_t size
Definition: di.c:39
#define _0C(exp)
Definition: assert.h:311
M0_INTERNAL int nlx_core_buf_event_wait(struct nlx_core_domain *cd, struct nlx_core_transfer_mc *ctm, m0_time_t timeout)
Definition: klnet_core.c:1685
M0_INTERNAL void m0_mutex_fini(struct m0_mutex *mutex)
Definition: mutex.c:42
static int nlx_kcore_buf_msg_send(struct nlx_kcore_transfer_mc *ktm, struct nlx_core_buffer *cb, struct nlx_kcore_buffer *kb)
Definition: klnet_core.c:1394
enum m0_net_queue_type cb_qtype
M0_TL_DEFINE(tms, static, struct nlx_kcore_transfer_mc)
#define M0_ASSERT_INFO(cond, fmt,...)
static bool nlx_core_tm_invariant(const struct nlx_core_transfer_mc *lctm)
Definition: lnet_core.c:120
static void nlx_core_bev_free_cb(struct nlx_core_bev_link *ql)
Definition: lnet_core.c:165
M0_INTERNAL int nlx_core_buf_msg_send(struct nlx_core_domain *cd, struct nlx_core_transfer_mc *ctm, struct nlx_core_buffer *cb)
Definition: klnet_core.c:1417
#define _NLXIS(s)
struct nlx_core_kmem_loc kbe_bev_loc
Definition: klnet_core.h:236
static int nlx_core_init(void)
Definition: klnet_core.c:2002
enum m0_net_queue_type kb_qtype
Definition: klnet_core.h:182
static bool nlx_core_kmem_loc_invariant(const struct nlx_core_kmem_loc *loc)
Definition: lnet_pvt.h:81
static bool nlx_core_kmem_loc_is_empty(const struct nlx_core_kmem_loc *loc)
Definition: lnet_pvt.h:91
struct m0_tl ktm_drv_bevs
Definition: klnet_core.h:136
static int scan(struct scanner *s)
Definition: beck.c:963
int(* ko_dom_init)(struct nlx_kcore_domain *kd, struct nlx_core_domain *cd)
Definition: klnet_core.h:275
struct nlx_core_kmem_loc ktm_ctm_loc
Definition: klnet_core.h:124
#define m0_tl_for(name, head, obj)
Definition: tlist.h:695
void m0_free(void *data)
Definition: memory.c:146
Definition: mutex.h:47
M0_INTERNAL int nlx_core_tm_start(struct nlx_core_domain *cd, struct m0_net_transfer_mc *tm, struct nlx_core_transfer_mc *ctm)
Definition: klnet_core.c:1942
static bool nlx_kcore_buffer_invariant(const struct nlx_kcore_buffer *kcb)
Definition: klnet_core.c:908
static int nlx_kcore_buf_active_recv(struct nlx_kcore_transfer_mc *ktm, struct nlx_core_buffer *cb, struct nlx_kcore_buffer *kb)
Definition: klnet_core.c:1438
M0_TL_DESCR_DEFINE(tms, "nlx tms", static, struct nlx_kcore_transfer_mc, ktm_tm_linkage, ktm_magic, M0_NET_LNET_KCORE_TM_MAGIC, M0_NET_LNET_KCORE_TMS_MAGIC)
int32_t rc
Definition: trigger_fop.h:47
#define m0_tl_exists(name, var, head,...)
Definition: tlist.h:774
M0_INTERNAL int nlx_core_nidstr_decode(struct nlx_core_domain *lcdom, const char *nidstr, uint64_t *nid)
Definition: klnet_core.c:1710
#define NLX_kcore_LNetGet(ktm, lcbuf, kb, umd)
Definition: klnet_core.c:887
nlx_core_opaque_ptr_t cb_buffer_id
M0_INTERNAL int nlx_core_nidstrs_get(struct nlx_core_domain *lcdom, char ***nidary)
Definition: klnet_core.c:1744
Definition: vec.h:145
struct m0_tl kd_drv_bufs
Definition: klnet_core.h:113
static bool nlx_kcore_domain_invariant(const struct nlx_kcore_domain *kd)
Definition: klnet_core.c:897
static void nlx_kcore_eq_cb(lnet_event_t *event)
Definition: klnet_core.c:1004
static int nlx_kcore_LNetMDUnlink(struct nlx_kcore_transfer_mc *kctm, struct nlx_kcore_buffer *kcb)
Definition: klnet_utils.c:433
static uint64_t nlx_core_match_bits_encode(uint32_t tmid, uint64_t counter)
Definition: lnet_core.c:243