Lines Matching refs:mvq
330 struct mlx5_vdpa_virtqueue *mvq, u32 num_ent) in qp_prepare() argument
336 vqp = fw ? &mvq->fwqp : &mvq->vqqp; in qp_prepare()
356 MLX5_SET(qpc, qpc, cqn_rcv, mvq->cq.mcq.cqn); in qp_prepare()
375 static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in qp_create() argument
386 vqp = &mvq->vqqp; in qp_create()
387 err = rq_buf_alloc(ndev, vqp, mvq->num_ent); in qp_create()
403 qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent); in qp_create()
421 rx_post(vqp, mvq->num_ent); in qp_create()
467 static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num) in mlx5_vdpa_handle_completions() argument
469 mlx5_cq_set_ci(&mvq->cq.mcq); in mlx5_vdpa_handle_completions()
475 rx_post(&mvq->vqqp, num); in mlx5_vdpa_handle_completions()
476 if (mvq->event_cb.callback) in mlx5_vdpa_handle_completions()
477 mvq->event_cb.callback(mvq->event_cb.private); in mlx5_vdpa_handle_completions()
482 struct mlx5_vdpa_virtqueue *mvq = container_of(mcq, struct mlx5_vdpa_virtqueue, cq.mcq); in mlx5_vdpa_cq_comp() local
483 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_cq_comp()
487 while (!mlx5_vdpa_poll_one(&mvq->cq)) { in mlx5_vdpa_cq_comp()
489 if (num > mvq->num_ent / 2) { in mlx5_vdpa_cq_comp()
496 mlx5_vdpa_handle_completions(mvq, num); in mlx5_vdpa_cq_comp()
502 mlx5_vdpa_handle_completions(mvq, num); in mlx5_vdpa_cq_comp()
504 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index); in mlx5_vdpa_cq_comp()
509 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_create() local
513 struct mlx5_vdpa_cq *vcq = &mvq->cq; in cq_create()
571 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index); in cq_create()
586 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_destroy() local
588 struct mlx5_vdpa_cq *vcq = &mvq->cq; in cq_destroy()
598 static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num, in set_umem_size() argument
609 *umemp = &mvq->umem1; in set_umem_size()
614 *umemp = &mvq->umem2; in set_umem_size()
619 *umemp = &mvq->umem3; in set_umem_size()
622 (*umemp)->size = p_a * mvq->num_ent + p_b; in set_umem_size()
630 static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in create_umem() argument
640 set_umem_size(ndev, mvq, num, &umem); in create_umem()
680 static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in umem_destroy() argument
688 umem = &mvq->umem1; in umem_destroy()
691 umem = &mvq->umem2; in umem_destroy()
694 umem = &mvq->umem3; in umem_destroy()
706 static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_create() argument
712 err = create_umem(ndev, mvq, num); in umems_create()
720 umem_destroy(ndev, mvq, num); in umems_create()
725 static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_destroy() argument
730 umem_destroy(ndev, mvq, num); in umems_destroy()
761 static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in create_virtqueue() argument
771 err = umems_create(ndev, mvq); in create_virtqueue()
788 MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx); in create_virtqueue()
789 MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx); in create_virtqueue()
795 if (vq_is_tx(mvq->index)) in create_virtqueue()
799 MLX5_SET(virtio_q, vq_ctx, queue_index, mvq->index); in create_virtqueue()
800 MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn); in create_virtqueue()
801 MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent); in create_virtqueue()
804 MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr); in create_virtqueue()
805 MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); in create_virtqueue()
806 MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); in create_virtqueue()
808 MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id); in create_virtqueue()
809 MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size); in create_virtqueue()
810 MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id); in create_virtqueue()
811 MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem2.size); in create_virtqueue()
812 MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id); in create_virtqueue()
813 MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size); in create_virtqueue()
821 mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); in create_virtqueue()
828 umems_destroy(ndev, mvq); in create_virtqueue()
832 static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in destroy_virtqueue() argument
839 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_id, mvq->virtq_id); in destroy_virtqueue()
844 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); in destroy_virtqueue()
847 umems_destroy(ndev, mvq); in destroy_virtqueue()
850 static u32 get_rqpn(struct mlx5_vdpa_virtqueue *mvq, bool fw) in get_rqpn() argument
852 return fw ? mvq->vqqp.mqp.qpn : mvq->fwqp.mqp.qpn; in get_rqpn()
855 static u32 get_qpn(struct mlx5_vdpa_virtqueue *mvq, bool fw) in get_qpn() argument
857 return fw ? mvq->fwqp.mqp.qpn : mvq->vqqp.mqp.qpn; in get_qpn()
955 static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd) in modify_qp() argument
963 alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw)); in modify_qp()
972 static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in connect_qps() argument
976 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP); in connect_qps()
980 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP); in connect_qps()
984 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
988 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
992 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
996 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
1000 return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP); in connect_qps()
1009 static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in query_virtqueue() argument
1027 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id); in query_virtqueue()
1046 static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state) in modify_virtqueue() argument
1063 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id); in modify_virtqueue()
1073 mvq->fw_state = state; in modify_virtqueue()
1078 static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in setup_vq() argument
1080 u16 idx = mvq->index; in setup_vq()
1083 if (!mvq->num_ent) in setup_vq()
1086 if (mvq->initialized) { in setup_vq()
1091 err = cq_create(ndev, idx, mvq->num_ent); in setup_vq()
1095 err = qp_create(ndev, mvq, &mvq->fwqp); in setup_vq()
1099 err = qp_create(ndev, mvq, &mvq->vqqp); in setup_vq()
1103 err = connect_qps(ndev, mvq); in setup_vq()
1107 err = create_virtqueue(ndev, mvq); in setup_vq()
1111 if (mvq->ready) { in setup_vq()
1112 err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); in setup_vq()
1120 mvq->initialized = true; in setup_vq()
1124 qp_destroy(ndev, &mvq->vqqp); in setup_vq()
1126 qp_destroy(ndev, &mvq->fwqp); in setup_vq()
1132 static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in suspend_vq() argument
1136 if (!mvq->initialized) in suspend_vq()
1139 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) in suspend_vq()
1142 if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)) in suspend_vq()
1145 if (query_virtqueue(ndev, mvq, &attr)) { in suspend_vq()
1149 mvq->avail_idx = attr.available_index; in suspend_vq()
1150 mvq->used_idx = attr.used_index; in suspend_vq()
1161 static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in teardown_vq() argument
1163 if (!mvq->initialized) in teardown_vq()
1166 suspend_vq(ndev, mvq); in teardown_vq()
1167 destroy_virtqueue(ndev, mvq); in teardown_vq()
1168 qp_destroy(ndev, &mvq->vqqp); in teardown_vq()
1169 qp_destroy(ndev, &mvq->fwqp); in teardown_vq()
1170 cq_destroy(ndev, mvq->index); in teardown_vq()
1171 mvq->initialized = false; in teardown_vq()
1335 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_kick_vq() local
1337 if (unlikely(!mvq->ready)) in mlx5_vdpa_kick_vq()
1348 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_address() local
1350 mvq->desc_addr = desc_area; in mlx5_vdpa_set_vq_address()
1351 mvq->device_addr = device_area; in mlx5_vdpa_set_vq_address()
1352 mvq->driver_addr = driver_area; in mlx5_vdpa_set_vq_address()
1360 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_set_vq_num() local
1362 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_num()
1363 mvq->num_ent = num; in mlx5_vdpa_set_vq_num()
1379 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_ready() local
1382 suspend_vq(ndev, mvq); in mlx5_vdpa_set_vq_ready()
1384 mvq->ready = ready; in mlx5_vdpa_set_vq_ready()
1391 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vq_ready() local
1393 return mvq->ready; in mlx5_vdpa_get_vq_ready()
1401 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_state() local
1403 if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) { in mlx5_vdpa_set_vq_state()
1408 mvq->used_idx = state->avail_index; in mlx5_vdpa_set_vq_state()
1409 mvq->avail_idx = state->avail_index; in mlx5_vdpa_set_vq_state()
1417 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vq_state() local
1425 if (!mvq->initialized) { in mlx5_vdpa_get_vq_state()
1430 state->avail_index = mvq->used_idx; in mlx5_vdpa_get_vq_state()
1434 err = query_virtqueue(ndev, mvq, &attr); in mlx5_vdpa_get_vq_state()
1529 struct mlx5_vdpa_virtqueue *mvq; in teardown_virtqueues() local
1533 mvq = &ndev->vqs[i]; in teardown_virtqueues()
1534 if (!mvq->initialized) in teardown_virtqueues()
1537 teardown_vq(ndev, mvq); in teardown_virtqueues()
1602 static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in save_channel_info() argument
1604 struct mlx5_vq_restore_info *ri = &mvq->ri; in save_channel_info()
1608 if (!mvq->initialized) in save_channel_info()
1611 err = query_virtqueue(ndev, mvq, &attr); in save_channel_info()
1617 ri->ready = mvq->ready; in save_channel_info()
1618 ri->num_ent = mvq->num_ent; in save_channel_info()
1619 ri->desc_addr = mvq->desc_addr; in save_channel_info()
1620 ri->device_addr = mvq->device_addr; in save_channel_info()
1621 ri->driver_addr = mvq->driver_addr; in save_channel_info()
1622 ri->cb = mvq->event_cb; in save_channel_info()
1648 struct mlx5_vdpa_virtqueue *mvq; in restore_channels_info() local
1655 mvq = &ndev->vqs[i]; in restore_channels_info()
1656 ri = &mvq->ri; in restore_channels_info()
1660 mvq->avail_idx = ri->avail_index; in restore_channels_info()
1661 mvq->used_idx = ri->used_index; in restore_channels_info()
1662 mvq->ready = ri->ready; in restore_channels_info()
1663 mvq->num_ent = ri->num_ent; in restore_channels_info()
1664 mvq->desc_addr = ri->desc_addr; in restore_channels_info()
1665 mvq->device_addr = ri->device_addr; in restore_channels_info()
1666 mvq->driver_addr = ri->driver_addr; in restore_channels_info()
1667 mvq->event_cb = ri->cb; in restore_channels_info()
1965 struct mlx5_vdpa_virtqueue *mvq; in init_mvqs() local
1969 mvq = &ndev->vqs[i]; in init_mvqs()
1970 memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); in init_mvqs()
1971 mvq->index = i; in init_mvqs()
1972 mvq->ndev = ndev; in init_mvqs()
1973 mvq->fwqp.fw = true; in init_mvqs()
1976 mvq = &ndev->vqs[i]; in init_mvqs()
1977 memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); in init_mvqs()
1978 mvq->index = i; in init_mvqs()
1979 mvq->ndev = ndev; in init_mvqs()