Lines Matching refs:mvdev

129 	struct mlx5_vdpa_dev mvdev;  member
156 mlx5_vdpa_info(mvdev, "%s\n", #_feature); \
162 mlx5_vdpa_info(mvdev, "%s\n", #_status); \
165 static void print_status(struct mlx5_vdpa_dev *mvdev, u8 status, bool set) in print_status() argument
168 mlx5_vdpa_warn(mvdev, "Warning: there are invalid status bits 0x%x\n", in print_status()
174 mlx5_vdpa_info(mvdev, "driver status %s", set ? "set" : "get"); in print_status()
176 mlx5_vdpa_info(mvdev, "driver resets the device\n"); in print_status()
188 static void print_features(struct mlx5_vdpa_dev *mvdev, u64 features, bool set) in print_features() argument
191 mlx5_vdpa_warn(mvdev, "There are invalid feature bits 0x%llx\n", in print_features()
197 mlx5_vdpa_info(mvdev, "driver %s feature bits:\n", set ? "sets" : "reads"); in print_features()
199 mlx5_vdpa_info(mvdev, "all feature bits are cleared\n"); in print_features()
239 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_tis() local
246 err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn); in create_tis()
248 mlx5_vdpa_warn(mvdev, "create TIS (%d)\n", err); in create_tis()
255 mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn); in destroy_tis()
268 err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf, in cq_frag_buf_alloc()
269 ndev->mvdev.mdev->priv.numa_node); in cq_frag_buf_alloc()
285 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf, in umem_frag_buf_alloc()
286 ndev->mvdev.mdev->priv.numa_node); in umem_frag_buf_alloc()
291 mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf); in cq_frag_buf_free()
337 MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid); in qp_prepare()
351 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_prepare()
353 MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index); in qp_prepare()
365 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, in rq_buf_alloc()
367 ndev->mvdev.mdev->priv.numa_node); in rq_buf_alloc()
372 mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf); in rq_buf_free()
378 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in qp_create()
391 err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db); in qp_create()
407 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_create()
417 vqp->mqp.uid = ndev->mvdev.res.uid; in qp_create()
427 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_create()
441 MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid); in qp_destroy()
442 if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in)) in qp_destroy()
443 mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn); in qp_destroy()
445 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_destroy()
484 void __iomem *uar_page = ndev->mvdev.res.uar->map; in mlx5_vdpa_cq_comp()
510 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_create()
511 void __iomem *uar_page = ndev->mvdev.res.uar->map; in cq_create()
543 MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid); in cq_create()
559 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index); in cq_create()
580 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_create()
587 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_destroy()
591 mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn); in cq_destroy()
595 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_destroy()
601 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in set_umem_size()
627 mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf); in umem_frag_buf_free()
654 MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid); in create_umem()
662 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_umem()
664 mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err); in create_umem()
700 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in umem_destroy()
737 type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type); in get_queue_type()
785 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in create_virtqueue()
791 get_features_12_3(ndev->mvdev.actual_features)); in create_virtqueue()
803 !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1))); in create_virtqueue()
807 MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey.key); in create_virtqueue()
814 MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); in create_virtqueue()
816 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_virtqueue()
840 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid); in destroy_virtqueue()
843 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) { in destroy_virtqueue()
844 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); in destroy_virtqueue()
876 MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
888 MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
905 MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
923 MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
967 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen); in modify_qp()
1028 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in query_virtqueue()
1029 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen); in query_virtqueue()
1064 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in modify_virtqueue()
1070 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in modify_virtqueue()
1087 mlx5_vdpa_warn(&ndev->mvdev, "attempt re init\n"); in setup_vq()
1114 mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n", in setup_vq()
1143 mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n"); in suspend_vq()
1146 mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n"); in suspend_vq()
1184 log_max_rqt = min_t(int, 1, MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size)); in create_rqt()
1193 MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid); in create_rqt()
1200 for (i = 0, j = 0; j < ndev->mvdev.max_vqs; j++) { in create_rqt()
1210 err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn); in create_rqt()
1220 mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn); in destroy_rqt()
1243 MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid); in create_tir()
1260 err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn); in create_tir()
1267 mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn); in destroy_tir()
1282 ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS); in add_fwd_to_tir()
1284 mlx5_vdpa_warn(&ndev->mvdev, "get flow namespace\n"); in add_fwd_to_tir()
1292 ndev->rx_counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_fwd_to_tir()
1313 mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter); in add_fwd_to_tir()
1325 mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter); in remove_fwd_to_tir()
1333 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_kick_vq() local
1334 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_kick_vq()
1340 iowrite16(idx, ndev->mvdev.res.kick_addr); in mlx5_vdpa_kick_vq()
1346 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_address() local
1347 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_address()
1358 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_num() local
1359 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_num()
1368 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_cb() local
1369 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_cb()
1377 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_ready() local
1378 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_ready()
1389 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_vq_ready() local
1390 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_ready()
1399 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_state() local
1400 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_state()
1404 mlx5_vdpa_warn(mvdev, "can't modify available index\n"); in mlx5_vdpa_set_vq_state()
1415 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_vq_state() local
1416 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_state()
1436 mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n"); in mlx5_vdpa_get_vq_state()
1472 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_features() local
1473 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_features()
1476 dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, device_features_bits_mask); in mlx5_vdpa_get_features()
1477 ndev->mvdev.mlx_features = mlx_to_vritio_features(dev_features); in mlx5_vdpa_get_features()
1478 if (MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, virtio_version_1_0)) in mlx5_vdpa_get_features()
1479 ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_VERSION_1); in mlx5_vdpa_get_features()
1480 ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM); in mlx5_vdpa_get_features()
1481 print_features(mvdev, ndev->mvdev.mlx_features, false); in mlx5_vdpa_get_features()
1482 return ndev->mvdev.mlx_features; in mlx5_vdpa_get_features()
1485 static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features) in verify_driver_features() argument
1512 for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); i++) { in setup_virtqueues()
1532 for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) { in teardown_virtqueues()
1542 static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev) in mlx5_vdpa_is_little_endian() argument
1545 (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1)); in mlx5_vdpa_is_little_endian()
1548 static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val) in cpu_to_mlx5vdpa16() argument
1550 return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val); in cpu_to_mlx5vdpa16()
1555 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_features() local
1556 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_features()
1559 print_features(mvdev, features, true); in mlx5_vdpa_set_features()
1561 err = verify_driver_features(mvdev, features); in mlx5_vdpa_set_features()
1565 ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features; in mlx5_vdpa_set_features()
1566 ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu); in mlx5_vdpa_set_features()
1567 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_set_features()
1595 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_status() local
1596 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_status()
1598 print_status(mvdev, ndev->mvdev.status, false); in mlx5_vdpa_get_status()
1599 return ndev->mvdev.status; in mlx5_vdpa_get_status()
1631 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in save_channels_info()
1642 for (i = 0; i < ndev->mvdev.max_vqs; i++) in mlx5_clear_vqs()
1654 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in restore_channels_info()
1681 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_change_map()
1682 err = mlx5_vdpa_create_mr(&ndev->mvdev, iotlb); in mlx5_vdpa_change_map()
1686 if (!(ndev->mvdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) in mlx5_vdpa_change_map()
1697 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_change_map()
1708 mlx5_vdpa_warn(&ndev->mvdev, "setup driver called for already setup driver\n"); in setup_driver()
1714 mlx5_vdpa_warn(&ndev->mvdev, "setup_virtqueues\n"); in setup_driver()
1720 mlx5_vdpa_warn(&ndev->mvdev, "create_rqt\n"); in setup_driver()
1726 mlx5_vdpa_warn(&ndev->mvdev, "create_tir\n"); in setup_driver()
1732 mlx5_vdpa_warn(&ndev->mvdev, "add_fwd_to_tir\n"); in setup_driver()
1770 for (i = 0; i < ndev->mvdev.max_vqs; i++) in clear_vqs_ready()
1776 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_status() local
1777 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_status()
1780 print_status(mvdev, status, true); in mlx5_vdpa_set_status()
1782 mlx5_vdpa_info(mvdev, "performing device reset\n"); in mlx5_vdpa_set_status()
1785 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_set_status()
1786 ndev->mvdev.status = 0; in mlx5_vdpa_set_status()
1787 ndev->mvdev.mlx_features = 0; in mlx5_vdpa_set_status()
1788 ++mvdev->generation; in mlx5_vdpa_set_status()
1792 if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) { in mlx5_vdpa_set_status()
1796 mlx5_vdpa_warn(mvdev, "failed to setup driver\n"); in mlx5_vdpa_set_status()
1800 mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n"); in mlx5_vdpa_set_status()
1805 ndev->mvdev.status = status; in mlx5_vdpa_set_status()
1809 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_set_status()
1810 ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; in mlx5_vdpa_set_status()
1816 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_config() local
1817 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_config()
1831 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_generation() local
1833 return mvdev->generation; in mlx5_vdpa_get_generation()
1838 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_map() local
1839 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_map()
1843 err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map); in mlx5_vdpa_set_map()
1845 mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err); in mlx5_vdpa_set_map()
1857 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_free() local
1861 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_free()
1865 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); in mlx5_vdpa_free()
1868 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_free()
1930 mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n"); in alloc_resources()
1934 err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn); in alloc_resources()
1947 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in alloc_resources()
1959 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in free_resources()
1968 for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) { in init_mvqs()
1975 for (; i < ndev->mvdev.max_vqs; i++) { in init_mvqs()
1987 struct mlx5_vdpa_dev *mvdev; in mlx5_vdpa_add_dev() local
1996 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, in mlx5_vdpa_add_dev()
2001 ndev->mvdev.max_vqs = max_vqs; in mlx5_vdpa_add_dev()
2002 mvdev = &ndev->mvdev; in mlx5_vdpa_add_dev()
2003 mvdev->mdev = mdev; in mlx5_vdpa_add_dev()
2022 mvdev->vdev.dma_dev = mdev->device; in mlx5_vdpa_add_dev()
2023 err = mlx5_vdpa_alloc_resources(&ndev->mvdev); in mlx5_vdpa_add_dev()
2031 err = vdpa_register_device(&mvdev->vdev); in mlx5_vdpa_add_dev()
2040 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_add_dev()
2046 put_device(&mvdev->vdev.dev); in mlx5_vdpa_add_dev()
2050 void mlx5_vdpa_remove_dev(struct mlx5_vdpa_dev *mvdev) in mlx5_vdpa_remove_dev() argument
2052 vdpa_unregister_device(&mvdev->vdev); in mlx5_vdpa_remove_dev()