Lines Matching refs:sess

49 	struct rtrs_clt_sess *sess;  in rtrs_clt_is_connected()  local
53 list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) in rtrs_clt_is_connected()
54 connected |= READ_ONCE(sess->state) == RTRS_CLT_CONNECTED; in rtrs_clt_is_connected()
177 struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess, in rtrs_permit_to_clt_con() argument
183 id = (permit->cpu_id % (sess->s.con_num - 1)) + 1; in rtrs_permit_to_clt_con()
185 return to_clt_con(sess->s.con[id]); in rtrs_permit_to_clt_con()
200 static bool __rtrs_clt_change_state(struct rtrs_clt_sess *sess, in __rtrs_clt_change_state() argument
206 lockdep_assert_held(&sess->state_wq.lock); in __rtrs_clt_change_state()
208 old_state = sess->state; in __rtrs_clt_change_state()
282 sess->state = new_state; in __rtrs_clt_change_state()
283 wake_up_locked(&sess->state_wq); in __rtrs_clt_change_state()
289 static bool rtrs_clt_change_state_from_to(struct rtrs_clt_sess *sess, in rtrs_clt_change_state_from_to() argument
295 spin_lock_irq(&sess->state_wq.lock); in rtrs_clt_change_state_from_to()
296 if (sess->state == old_state) in rtrs_clt_change_state_from_to()
297 changed = __rtrs_clt_change_state(sess, new_state); in rtrs_clt_change_state_from_to()
298 spin_unlock_irq(&sess->state_wq.lock); in rtrs_clt_change_state_from_to()
305 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); in rtrs_rdma_error_recovery() local
307 if (rtrs_clt_change_state_from_to(sess, in rtrs_rdma_error_recovery()
310 struct rtrs_clt *clt = sess->clt; in rtrs_rdma_error_recovery()
317 queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, in rtrs_rdma_error_recovery()
326 rtrs_clt_change_state_from_to(sess, in rtrs_rdma_error_recovery()
337 rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n", in rtrs_clt_fast_reg_done()
357 rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n", in rtrs_clt_inv_rkey_done()
387 struct rtrs_clt_sess *sess; in complete_rdma_req() local
394 sess = to_clt_sess(con->c.sess); in complete_rdma_req()
423 rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n", in complete_rdma_req()
437 ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist, in complete_rdma_req()
440 if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT) in complete_rdma_req()
441 atomic_dec(&sess->stats->inflight); in complete_rdma_req()
455 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); in rtrs_post_send_rdma() local
460 rtrs_wrn(con->c.sess, in rtrs_post_send_rdma()
468 sge.lkey = sess->s.dev->ib_pd->local_dma_lkey; in rtrs_post_send_rdma()
474 flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ? in rtrs_post_send_rdma()
477 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr, in rtrs_post_send_rdma()
485 static void process_io_rsp(struct rtrs_clt_sess *sess, u32 msg_id, in process_io_rsp() argument
490 if (WARN_ON(msg_id >= sess->queue_depth)) in process_io_rsp()
493 req = &sess->reqs[msg_id]; in process_io_rsp()
503 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); in rtrs_clt_recv_done() local
505 WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0); in rtrs_clt_recv_done()
510 rtrs_err(con->c.sess, "post iu failed %d\n", err); in rtrs_clt_recv_done()
517 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); in rtrs_clt_rkey_rsp_done() local
525 WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0); in rtrs_clt_rkey_rsp_done()
530 rtrs_err(con->c.sess, "rkey response is malformed: size %d\n", in rtrs_clt_rkey_rsp_done()
534 ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr, in rtrs_clt_rkey_rsp_done()
538 rtrs_err(sess->clt, "rkey response is malformed: type %d\n", in rtrs_clt_rkey_rsp_done()
543 if (WARN_ON(buf_id >= sess->queue_depth)) in rtrs_clt_rkey_rsp_done()
556 sess->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey); in rtrs_clt_rkey_rsp_done()
557 process_io_rsp(sess, msg_id, err, w_inval); in rtrs_clt_rkey_rsp_done()
559 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, iu->dma_addr, in rtrs_clt_rkey_rsp_done()
596 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); in rtrs_clt_rdma_done() local
603 rtrs_err(sess->clt, "RDMA failed: %s\n", in rtrs_clt_rdma_done()
628 process_io_rsp(sess, msg_id, err, w_inval); in rtrs_clt_rdma_done()
631 rtrs_send_hb_ack(&sess->s); in rtrs_clt_rdma_done()
632 if (sess->flags & RTRS_MSG_NEW_RKEY_F) in rtrs_clt_rdma_done()
636 sess->s.hb_missed_cnt = 0; in rtrs_clt_rdma_done()
637 if (sess->flags & RTRS_MSG_NEW_RKEY_F) in rtrs_clt_rdma_done()
640 rtrs_wrn(con->c.sess, "Unknown IMM type %u\n", in rtrs_clt_rdma_done()
652 rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n", in rtrs_clt_rdma_done()
665 if (sess->flags & RTRS_MSG_NEW_RKEY_F) { in rtrs_clt_rdma_done()
679 rtrs_wrn(sess->clt, "Unexpected WC type: %d\n", wc->opcode); in rtrs_clt_rdma_done()
687 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); in post_recv_io() local
690 if (sess->flags & RTRS_MSG_NEW_RKEY_F) { in post_recv_io()
704 static int post_recv_sess(struct rtrs_clt_sess *sess) in post_recv_sess() argument
709 for (cid = 0; cid < sess->s.con_num; cid++) { in post_recv_sess()
713 q_size = sess->queue_depth; in post_recv_sess()
721 err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size); in post_recv_sess()
723 rtrs_err(sess->clt, "post_recv_io(), err: %d\n", err); in post_recv_sess()
809 struct rtrs_clt_sess *sess; in get_next_path_min_inflight() local
813 list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) { in get_next_path_min_inflight()
814 if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) in get_next_path_min_inflight()
817 if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry)))) in get_next_path_min_inflight()
820 inflight = atomic_read(&sess->stats->inflight); in get_next_path_min_inflight()
824 min_path = sess; in get_next_path_min_inflight()
881 struct rtrs_clt_sess *sess, in rtrs_clt_init_req() argument
899 req->con = rtrs_permit_to_clt_con(sess, permit); in rtrs_clt_init_req()
913 rtrs_clt_get_req(struct rtrs_clt_sess *sess, in rtrs_clt_get_req() argument
922 req = &sess->reqs[permit->mem_id]; in rtrs_clt_get_req()
923 rtrs_clt_init_req(req, sess, conf, permit, priv, vec, usr_len, in rtrs_clt_get_req()
951 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); in rtrs_post_rdma_write_sg() local
961 sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey; in rtrs_post_rdma_write_sg()
965 sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey; in rtrs_post_rdma_write_sg()
973 flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ? in rtrs_post_rdma_write_sg()
976 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr, in rtrs_post_rdma_write_sg()
987 struct rtrs_sess *s = con->c.sess; in rtrs_clt_write_req()
988 struct rtrs_clt_sess *sess = to_clt_sess(s); in rtrs_clt_write_req() local
997 if (unlikely(tsize > sess->chunk_size)) { in rtrs_clt_write_req()
999 tsize, sess->chunk_size); in rtrs_clt_write_req()
1003 count = ib_dma_map_sg(sess->s.dev->ib_dev, req->sglist, in rtrs_clt_write_req()
1020 rbuf = &sess->rbufs[buf_id]; in rtrs_clt_write_req()
1033 if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT) in rtrs_clt_write_req()
1034 atomic_dec(&sess->stats->inflight); in rtrs_clt_write_req()
1036 ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist, in rtrs_clt_write_req()
1061 struct rtrs_sess *s = con->c.sess; in rtrs_clt_read_req()
1062 struct rtrs_clt_sess *sess = to_clt_sess(s); in rtrs_clt_read_req() local
1074 s = &sess->s; in rtrs_clt_read_req()
1075 dev = sess->s.dev; in rtrs_clt_read_req()
1077 if (unlikely(tsize > sess->chunk_size)) { in rtrs_clt_read_req()
1080 tsize, sess->chunk_size); in rtrs_clt_read_req()
1150 ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id], in rtrs_clt_read_req()
1154 if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT) in rtrs_clt_read_req()
1155 atomic_dec(&sess->stats->inflight); in rtrs_clt_read_req()
1204 static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess) in fail_all_outstanding_reqs() argument
1206 struct rtrs_clt *clt = sess->clt; in fail_all_outstanding_reqs()
1210 if (!sess->reqs) in fail_all_outstanding_reqs()
1212 for (i = 0; i < sess->queue_depth; ++i) { in fail_all_outstanding_reqs()
1213 req = &sess->reqs[i]; in fail_all_outstanding_reqs()
1231 static void free_sess_reqs(struct rtrs_clt_sess *sess) in free_sess_reqs() argument
1236 if (!sess->reqs) in free_sess_reqs()
1238 for (i = 0; i < sess->queue_depth; ++i) { in free_sess_reqs()
1239 req = &sess->reqs[i]; in free_sess_reqs()
1243 rtrs_iu_free(req->iu, sess->s.dev->ib_dev, 1); in free_sess_reqs()
1245 kfree(sess->reqs); in free_sess_reqs()
1246 sess->reqs = NULL; in free_sess_reqs()
1249 static int alloc_sess_reqs(struct rtrs_clt_sess *sess) in alloc_sess_reqs() argument
1252 struct rtrs_clt *clt = sess->clt; in alloc_sess_reqs()
1255 sess->reqs = kcalloc(sess->queue_depth, sizeof(*sess->reqs), in alloc_sess_reqs()
1257 if (!sess->reqs) in alloc_sess_reqs()
1260 for (i = 0; i < sess->queue_depth; ++i) { in alloc_sess_reqs()
1261 req = &sess->reqs[i]; in alloc_sess_reqs()
1262 req->iu = rtrs_iu_alloc(1, sess->max_hdr_size, GFP_KERNEL, in alloc_sess_reqs()
1263 sess->s.dev->ib_dev, in alloc_sess_reqs()
1274 req->mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG, in alloc_sess_reqs()
1275 sess->max_pages_per_mr); in alloc_sess_reqs()
1280 sess->max_pages_per_mr); in alloc_sess_reqs()
1290 free_sess_reqs(sess); in alloc_sess_reqs()
1343 static void query_fast_reg_mode(struct rtrs_clt_sess *sess) in query_fast_reg_mode() argument
1349 ib_dev = sess->s.dev->ib_dev; in query_fast_reg_mode()
1359 sess->max_pages_per_mr = in query_fast_reg_mode()
1360 min3(sess->max_pages_per_mr, (u32)max_pages_per_mr, in query_fast_reg_mode()
1362 sess->max_send_sge = ib_dev->attrs.max_send_sge; in query_fast_reg_mode()
1365 static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess, in rtrs_clt_change_state_get_old() argument
1371 spin_lock_irq(&sess->state_wq.lock); in rtrs_clt_change_state_get_old()
1372 *old_state = sess->state; in rtrs_clt_change_state_get_old()
1373 changed = __rtrs_clt_change_state(sess, new_state); in rtrs_clt_change_state_get_old()
1374 spin_unlock_irq(&sess->state_wq.lock); in rtrs_clt_change_state_get_old()
1379 static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess, in rtrs_clt_change_state() argument
1384 return rtrs_clt_change_state_get_old(sess, new_state, &old_state); in rtrs_clt_change_state()
1394 static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess) in rtrs_clt_init_hb() argument
1396 rtrs_init_hb(&sess->s, &io_comp_cqe, in rtrs_clt_init_hb()
1403 static void rtrs_clt_start_hb(struct rtrs_clt_sess *sess) in rtrs_clt_start_hb() argument
1405 rtrs_start_hb(&sess->s); in rtrs_clt_start_hb()
1408 static void rtrs_clt_stop_hb(struct rtrs_clt_sess *sess) in rtrs_clt_stop_hb() argument
1410 rtrs_stop_hb(&sess->s); in rtrs_clt_stop_hb()
1421 struct rtrs_clt_sess *sess; in alloc_sess() local
1425 sess = kzalloc(sizeof(*sess), GFP_KERNEL); in alloc_sess()
1426 if (!sess) in alloc_sess()
1432 sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL); in alloc_sess()
1433 if (!sess->s.con) in alloc_sess()
1436 sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL); in alloc_sess()
1437 if (!sess->stats) in alloc_sess()
1440 mutex_init(&sess->init_mutex); in alloc_sess()
1441 uuid_gen(&sess->s.uuid); in alloc_sess()
1442 memcpy(&sess->s.dst_addr, path->dst, in alloc_sess()
1451 memcpy(&sess->s.src_addr, path->src, in alloc_sess()
1453 strlcpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname)); in alloc_sess()
1454 sess->s.con_num = con_num; in alloc_sess()
1455 sess->clt = clt; in alloc_sess()
1456 sess->max_pages_per_mr = max_segments * max_segment_size >> 12; in alloc_sess()
1457 init_waitqueue_head(&sess->state_wq); in alloc_sess()
1458 sess->state = RTRS_CLT_CONNECTING; in alloc_sess()
1459 atomic_set(&sess->connected_cnt, 0); in alloc_sess()
1460 INIT_WORK(&sess->close_work, rtrs_clt_close_work); in alloc_sess()
1461 INIT_DELAYED_WORK(&sess->reconnect_dwork, rtrs_clt_reconnect_work); in alloc_sess()
1462 rtrs_clt_init_hb(sess); in alloc_sess()
1464 sess->mp_skip_entry = alloc_percpu(typeof(*sess->mp_skip_entry)); in alloc_sess()
1465 if (!sess->mp_skip_entry) in alloc_sess()
1469 INIT_LIST_HEAD(per_cpu_ptr(sess->mp_skip_entry, cpu)); in alloc_sess()
1471 err = rtrs_clt_init_stats(sess->stats); in alloc_sess()
1475 return sess; in alloc_sess()
1478 free_percpu(sess->mp_skip_entry); in alloc_sess()
1480 kfree(sess->stats); in alloc_sess()
1482 kfree(sess->s.con); in alloc_sess()
1484 kfree(sess); in alloc_sess()
1489 void free_sess(struct rtrs_clt_sess *sess) in free_sess() argument
1491 free_percpu(sess->mp_skip_entry); in free_sess()
1492 mutex_destroy(&sess->init_mutex); in free_sess()
1493 kfree(sess->s.con); in free_sess()
1494 kfree(sess->rbufs); in free_sess()
1495 kfree(sess); in free_sess()
1498 static int create_con(struct rtrs_clt_sess *sess, unsigned int cid) in create_con() argument
1509 con->c.sess = &sess->s; in create_con()
1512 sess->s.con[cid] = &con->c; in create_con()
1519 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); in destroy_con() local
1521 sess->s.con[con->c.cid] = NULL; in destroy_con()
1527 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); in create_con_cq_qp() local
1551 if (WARN_ON(sess->s.dev)) in create_con_cq_qp()
1559 sess->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device, in create_con_cq_qp()
1561 if (!sess->s.dev) { in create_con_cq_qp()
1562 rtrs_wrn(sess->clt, in create_con_cq_qp()
1566 sess->s.dev_ref = 1; in create_con_cq_qp()
1567 query_fast_reg_mode(sess); in create_con_cq_qp()
1574 if (WARN_ON(!sess->s.dev)) in create_con_cq_qp()
1576 if (WARN_ON(!sess->queue_depth)) in create_con_cq_qp()
1580 sess->s.dev_ref++; in create_con_cq_qp()
1582 min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr, in create_con_cq_qp()
1584 sess->queue_depth * 3 + 1); in create_con_cq_qp()
1586 min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr, in create_con_cq_qp()
1587 sess->queue_depth * 3 + 1); in create_con_cq_qp()
1590 if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) { in create_con_cq_qp()
1592 GFP_KERNEL, sess->s.dev->ib_dev, in create_con_cq_qp()
1600 cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors; in create_con_cq_qp()
1601 err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge, in create_con_cq_qp()
1613 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); in destroy_con_cq_qp() local
1622 rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_size); in destroy_con_cq_qp()
1626 if (sess->s.dev_ref && !--sess->s.dev_ref) { in destroy_con_cq_qp()
1627 rtrs_ib_dev_put(sess->s.dev); in destroy_con_cq_qp()
1628 sess->s.dev = NULL; in destroy_con_cq_qp()
1647 struct rtrs_sess *s = con->c.sess; in rtrs_rdma_addr_resolved()
1664 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); in rtrs_rdma_route_resolved() local
1665 struct rtrs_clt *clt = sess->clt; in rtrs_rdma_route_resolved()
1682 .cid_num = cpu_to_le16(sess->s.con_num), in rtrs_rdma_route_resolved()
1683 .recon_cnt = cpu_to_le16(sess->s.recon_cnt), in rtrs_rdma_route_resolved()
1685 msg.first_conn = sess->for_new_clt ? FIRST_CONN : 0; in rtrs_rdma_route_resolved()
1686 uuid_copy(&msg.sess_uuid, &sess->s.uuid); in rtrs_rdma_route_resolved()
1699 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); in rtrs_rdma_conn_established() local
1700 struct rtrs_clt *clt = sess->clt; in rtrs_rdma_conn_established()
1731 if (sess->queue_depth > 0 && queue_depth != sess->queue_depth) { in rtrs_rdma_conn_established()
1737 sess->reconnect_attempts = -1; in rtrs_rdma_conn_established()
1743 if (!sess->rbufs) { in rtrs_rdma_conn_established()
1744 kfree(sess->rbufs); in rtrs_rdma_conn_established()
1745 sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs), in rtrs_rdma_conn_established()
1747 if (!sess->rbufs) in rtrs_rdma_conn_established()
1750 sess->queue_depth = queue_depth; in rtrs_rdma_conn_established()
1751 sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size); in rtrs_rdma_conn_established()
1752 sess->max_io_size = le32_to_cpu(msg->max_io_size); in rtrs_rdma_conn_established()
1753 sess->flags = le32_to_cpu(msg->flags); in rtrs_rdma_conn_established()
1754 sess->chunk_size = sess->max_io_size + sess->max_hdr_size; in rtrs_rdma_conn_established()
1765 clt->queue_depth = sess->queue_depth; in rtrs_rdma_conn_established()
1766 clt->max_io_size = min_not_zero(sess->max_io_size, in rtrs_rdma_conn_established()
1773 sess->hca_port = con->c.cm_id->port_num; in rtrs_rdma_conn_established()
1774 scnprintf(sess->hca_name, sizeof(sess->hca_name), in rtrs_rdma_conn_established()
1775 sess->s.dev->ib_dev->name); in rtrs_rdma_conn_established()
1776 sess->s.src_addr = con->c.cm_id->route.addr.src_addr; in rtrs_rdma_conn_established()
1778 sess->for_new_clt = 1; in rtrs_rdma_conn_established()
1786 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); in flag_success_on_conn() local
1788 atomic_inc(&sess->connected_cnt); in flag_success_on_conn()
1795 struct rtrs_sess *s = con->c.sess; in rtrs_rdma_conn_rejected()
1823 static void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait) in rtrs_clt_close_conns() argument
1825 if (rtrs_clt_change_state(sess, RTRS_CLT_CLOSING)) in rtrs_clt_close_conns()
1826 queue_work(rtrs_wq, &sess->close_work); in rtrs_clt_close_conns()
1828 flush_work(&sess->close_work); in rtrs_clt_close_conns()
1834 struct rtrs_clt_sess *sess; in flag_error_on_conn() local
1836 sess = to_clt_sess(con->c.sess); in flag_error_on_conn()
1837 if (atomic_dec_and_test(&sess->connected_cnt)) in flag_error_on_conn()
1839 wake_up(&sess->state_wq); in flag_error_on_conn()
1848 struct rtrs_sess *s = con->c.sess; in rtrs_clt_rdma_cm_handler()
1849 struct rtrs_clt_sess *sess = to_clt_sess(s); in rtrs_clt_rdma_cm_handler() local
1867 wake_up(&sess->state_wq); in rtrs_clt_rdma_cm_handler()
1892 rtrs_clt_close_conns(sess, false); in rtrs_clt_rdma_cm_handler()
1914 struct rtrs_sess *s = con->c.sess; in create_cm()
1915 struct rtrs_clt_sess *sess = to_clt_sess(s); in create_cm() local
1920 sess->s.dst_addr.ss_family == AF_IB ? in create_cm()
1936 err = rdma_resolve_addr(cm_id, (struct sockaddr *)&sess->s.src_addr, in create_cm()
1937 (struct sockaddr *)&sess->s.dst_addr, in create_cm()
1949 sess->state_wq, in create_cm()
1950 con->cm_err || sess->state != RTRS_CLT_CONNECTING, in create_cm()
1962 if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTING) { in create_cm()
1980 static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess) in rtrs_clt_sess_up() argument
1982 struct rtrs_clt *clt = sess->clt; in rtrs_clt_sess_up()
2006 sess->established = true; in rtrs_clt_sess_up()
2007 sess->reconnect_attempts = 0; in rtrs_clt_sess_up()
2008 sess->stats->reconnects.successful_cnt++; in rtrs_clt_sess_up()
2011 static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess) in rtrs_clt_sess_down() argument
2013 struct rtrs_clt *clt = sess->clt; in rtrs_clt_sess_down()
2015 if (!sess->established) in rtrs_clt_sess_down()
2018 sess->established = false; in rtrs_clt_sess_down()
2026 static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess) in rtrs_clt_stop_and_destroy_conns() argument
2031 WARN_ON(READ_ONCE(sess->state) == RTRS_CLT_CONNECTED); in rtrs_clt_stop_and_destroy_conns()
2037 mutex_lock(&sess->init_mutex); in rtrs_clt_stop_and_destroy_conns()
2038 mutex_unlock(&sess->init_mutex); in rtrs_clt_stop_and_destroy_conns()
2046 rtrs_clt_stop_hb(sess); in rtrs_clt_stop_and_destroy_conns()
2055 for (cid = 0; cid < sess->s.con_num; cid++) { in rtrs_clt_stop_and_destroy_conns()
2056 if (!sess->s.con[cid]) in rtrs_clt_stop_and_destroy_conns()
2058 con = to_clt_con(sess->s.con[cid]); in rtrs_clt_stop_and_destroy_conns()
2061 fail_all_outstanding_reqs(sess); in rtrs_clt_stop_and_destroy_conns()
2062 free_sess_reqs(sess); in rtrs_clt_stop_and_destroy_conns()
2063 rtrs_clt_sess_down(sess); in rtrs_clt_stop_and_destroy_conns()
2073 wait_event_timeout(sess->state_wq, !atomic_read(&sess->connected_cnt), in rtrs_clt_stop_and_destroy_conns()
2076 for (cid = 0; cid < sess->s.con_num; cid++) { in rtrs_clt_stop_and_destroy_conns()
2077 if (!sess->s.con[cid]) in rtrs_clt_stop_and_destroy_conns()
2079 con = to_clt_con(sess->s.con[cid]); in rtrs_clt_stop_and_destroy_conns()
2087 struct rtrs_clt_sess *sess, in xchg_sessions() argument
2094 return sess == cmpxchg(ppcpu_path, sess, next); in xchg_sessions()
2097 static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess) in rtrs_clt_remove_path_from_arr() argument
2099 struct rtrs_clt *clt = sess->clt; in rtrs_clt_remove_path_from_arr()
2105 list_del_rcu(&sess->s.entry); in rtrs_clt_remove_path_from_arr()
2146 next = list_next_or_null_rr_rcu(&clt->paths_list, &sess->s.entry, in rtrs_clt_remove_path_from_arr()
2159 lockdep_is_held(&clt->paths_mutex)) != sess) in rtrs_clt_remove_path_from_arr()
2172 if (xchg_sessions(ppcpu_path, sess, next)) in rtrs_clt_remove_path_from_arr()
2187 static void rtrs_clt_add_path_to_arr(struct rtrs_clt_sess *sess, in rtrs_clt_add_path_to_arr() argument
2190 struct rtrs_clt *clt = sess->clt; in rtrs_clt_add_path_to_arr()
2195 list_add_tail_rcu(&sess->s.entry, &clt->paths_list); in rtrs_clt_add_path_to_arr()
2201 struct rtrs_clt_sess *sess; in rtrs_clt_close_work() local
2203 sess = container_of(work, struct rtrs_clt_sess, close_work); in rtrs_clt_close_work()
2205 cancel_delayed_work_sync(&sess->reconnect_dwork); in rtrs_clt_close_work()
2206 rtrs_clt_stop_and_destroy_conns(sess); in rtrs_clt_close_work()
2207 rtrs_clt_change_state(sess, RTRS_CLT_CLOSED); in rtrs_clt_close_work()
2210 static int init_conns(struct rtrs_clt_sess *sess) in init_conns() argument
2220 sess->s.recon_cnt++; in init_conns()
2223 for (cid = 0; cid < sess->s.con_num; cid++) { in init_conns()
2224 err = create_con(sess, cid); in init_conns()
2228 err = create_cm(to_clt_con(sess->s.con[cid])); in init_conns()
2230 destroy_con(to_clt_con(sess->s.con[cid])); in init_conns()
2234 err = alloc_sess_reqs(sess); in init_conns()
2238 rtrs_clt_start_hb(sess); in init_conns()
2244 struct rtrs_clt_con *con = to_clt_con(sess->s.con[cid]); in init_conns()
2256 rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR); in init_conns()
2264 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); in rtrs_clt_info_req_done() local
2268 rtrs_iu_free(iu, sess->s.dev->ib_dev, 1); in rtrs_clt_info_req_done()
2271 rtrs_err(sess->clt, "Sess info request send failed: %s\n", in rtrs_clt_info_req_done()
2273 rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR); in rtrs_clt_info_req_done()
2280 static int process_info_rsp(struct rtrs_clt_sess *sess, in process_info_rsp() argument
2294 (ilog2(sess->chunk_size - 1) + 1) > in process_info_rsp()
2296 rtrs_err(sess->clt, in process_info_rsp()
2298 MAX_IMM_PAYL_BITS, sg_cnt, sess->chunk_size); in process_info_rsp()
2301 if (unlikely(!sg_cnt || (sess->queue_depth % sg_cnt))) { in process_info_rsp()
2302 rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n", in process_info_rsp()
2307 for (sgi = 0, i = 0; sgi < sg_cnt && i < sess->queue_depth; sgi++) { in process_info_rsp()
2318 if (unlikely(!len || (len % sess->chunk_size))) { in process_info_rsp()
2319 rtrs_err(sess->clt, "Incorrect [%d].len %d\n", sgi, in process_info_rsp()
2323 for ( ; len && i < sess->queue_depth; i++) { in process_info_rsp()
2324 sess->rbufs[i].addr = addr; in process_info_rsp()
2325 sess->rbufs[i].rkey = rkey; in process_info_rsp()
2327 len -= sess->chunk_size; in process_info_rsp()
2328 addr += sess->chunk_size; in process_info_rsp()
2332 if (unlikely(sgi != sg_cnt || i != sess->queue_depth)) { in process_info_rsp()
2333 rtrs_err(sess->clt, "Incorrect sg vector, not fully mapped\n"); in process_info_rsp()
2336 if (unlikely(total_len != sess->chunk_size * sess->queue_depth)) { in process_info_rsp()
2337 rtrs_err(sess->clt, "Incorrect total_len %d\n", total_len); in process_info_rsp()
2347 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); in rtrs_clt_info_rsp_done() local
2359 rtrs_err(sess->clt, "Sess info response recv failed: %s\n", in rtrs_clt_info_rsp_done()
2366 rtrs_err(sess->clt, "Sess info response is malformed: size %d\n", in rtrs_clt_info_rsp_done()
2370 ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr, in rtrs_clt_info_rsp_done()
2374 rtrs_err(sess->clt, "Sess info response is malformed: type %d\n", in rtrs_clt_info_rsp_done()
2381 rtrs_err(sess->clt, "Sess info response is malformed: size %d\n", in rtrs_clt_info_rsp_done()
2385 err = process_info_rsp(sess, msg); in rtrs_clt_info_rsp_done()
2389 err = post_recv_sess(sess); in rtrs_clt_info_rsp_done()
2397 rtrs_iu_free(iu, sess->s.dev->ib_dev, 1); in rtrs_clt_info_rsp_done()
2398 rtrs_clt_change_state(sess, state); in rtrs_clt_info_rsp_done()
2401 static int rtrs_send_sess_info(struct rtrs_clt_sess *sess) in rtrs_send_sess_info() argument
2403 struct rtrs_clt_con *usr_con = to_clt_con(sess->s.con[0]); in rtrs_send_sess_info()
2413 sess->s.dev->ib_dev, DMA_TO_DEVICE, in rtrs_send_sess_info()
2415 rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, sess->s.dev->ib_dev, in rtrs_send_sess_info()
2424 rtrs_err(sess->clt, "rtrs_iu_post_recv(), err: %d\n", err); in rtrs_send_sess_info()
2431 memcpy(msg->sessname, sess->s.sessname, sizeof(msg->sessname)); in rtrs_send_sess_info()
2433 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr, in rtrs_send_sess_info()
2439 rtrs_err(sess->clt, "rtrs_iu_post_send(), err: %d\n", err); in rtrs_send_sess_info()
2445 wait_event_interruptible_timeout(sess->state_wq, in rtrs_send_sess_info()
2446 sess->state != RTRS_CLT_CONNECTING, in rtrs_send_sess_info()
2449 if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) { in rtrs_send_sess_info()
2450 if (READ_ONCE(sess->state) == RTRS_CLT_CONNECTING_ERR) in rtrs_send_sess_info()
2459 rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1); in rtrs_send_sess_info()
2461 rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1); in rtrs_send_sess_info()
2464 rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR); in rtrs_send_sess_info()
2475 static int init_sess(struct rtrs_clt_sess *sess) in init_sess() argument
2479 mutex_lock(&sess->init_mutex); in init_sess()
2480 err = init_conns(sess); in init_sess()
2482 rtrs_err(sess->clt, "init_conns(), err: %d\n", err); in init_sess()
2485 err = rtrs_send_sess_info(sess); in init_sess()
2487 rtrs_err(sess->clt, "rtrs_send_sess_info(), err: %d\n", err); in init_sess()
2490 rtrs_clt_sess_up(sess); in init_sess()
2492 mutex_unlock(&sess->init_mutex); in init_sess()
2499 struct rtrs_clt_sess *sess; in rtrs_clt_reconnect_work() local
2504 sess = container_of(to_delayed_work(work), struct rtrs_clt_sess, in rtrs_clt_reconnect_work()
2506 clt = sess->clt; in rtrs_clt_reconnect_work()
2508 if (READ_ONCE(sess->state) != RTRS_CLT_RECONNECTING) in rtrs_clt_reconnect_work()
2511 if (sess->reconnect_attempts >= clt->max_reconnect_attempts) { in rtrs_clt_reconnect_work()
2513 rtrs_clt_close_conns(sess, false); in rtrs_clt_reconnect_work()
2516 sess->reconnect_attempts++; in rtrs_clt_reconnect_work()
2519 rtrs_clt_stop_and_destroy_conns(sess); in rtrs_clt_reconnect_work()
2521 if (rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING)) { in rtrs_clt_reconnect_work()
2522 err = init_sess(sess); in rtrs_clt_reconnect_work()
2530 if (rtrs_clt_change_state(sess, RTRS_CLT_RECONNECTING)) { in rtrs_clt_reconnect_work()
2531 sess->stats->reconnects.fail_cnt++; in rtrs_clt_reconnect_work()
2533 queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, in rtrs_clt_reconnect_work()
2672 struct rtrs_clt_sess *sess, *tmp; in rtrs_clt_open() local
2685 struct rtrs_clt_sess *sess; in rtrs_clt_open() local
2687 sess = alloc_sess(clt, &paths[i], nr_cpu_ids, in rtrs_clt_open()
2689 if (IS_ERR(sess)) { in rtrs_clt_open()
2690 err = PTR_ERR(sess); in rtrs_clt_open()
2694 sess->for_new_clt = 1; in rtrs_clt_open()
2695 list_add_tail_rcu(&sess->s.entry, &clt->paths_list); in rtrs_clt_open()
2697 err = init_sess(sess); in rtrs_clt_open()
2699 list_del_rcu(&sess->s.entry); in rtrs_clt_open()
2700 rtrs_clt_close_conns(sess, true); in rtrs_clt_open()
2701 free_percpu(sess->stats->pcpu_stats); in rtrs_clt_open()
2702 kfree(sess->stats); in rtrs_clt_open()
2703 free_sess(sess); in rtrs_clt_open()
2707 err = rtrs_clt_create_sess_files(sess); in rtrs_clt_open()
2709 list_del_rcu(&sess->s.entry); in rtrs_clt_open()
2710 rtrs_clt_close_conns(sess, true); in rtrs_clt_open()
2711 free_percpu(sess->stats->pcpu_stats); in rtrs_clt_open()
2712 kfree(sess->stats); in rtrs_clt_open()
2713 free_sess(sess); in rtrs_clt_open()
2724 list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) { in rtrs_clt_open()
2725 rtrs_clt_destroy_sess_files(sess, NULL); in rtrs_clt_open()
2726 rtrs_clt_close_conns(sess, true); in rtrs_clt_open()
2727 kobject_put(&sess->kobj); in rtrs_clt_open()
2744 struct rtrs_clt_sess *sess, *tmp; in rtrs_clt_close() local
2751 list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) { in rtrs_clt_close()
2752 rtrs_clt_close_conns(sess, true); in rtrs_clt_close()
2753 rtrs_clt_destroy_sess_files(sess, NULL); in rtrs_clt_close()
2754 kobject_put(&sess->kobj); in rtrs_clt_close()
2761 int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess) in rtrs_clt_reconnect_from_sysfs() argument
2767 changed = rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING, in rtrs_clt_reconnect_from_sysfs()
2770 sess->reconnect_attempts = 0; in rtrs_clt_reconnect_from_sysfs()
2771 queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 0); in rtrs_clt_reconnect_from_sysfs()
2779 flush_delayed_work(&sess->reconnect_dwork); in rtrs_clt_reconnect_from_sysfs()
2780 err = (READ_ONCE(sess->state) == in rtrs_clt_reconnect_from_sysfs()
2787 int rtrs_clt_disconnect_from_sysfs(struct rtrs_clt_sess *sess) in rtrs_clt_disconnect_from_sysfs() argument
2789 rtrs_clt_close_conns(sess, true); in rtrs_clt_disconnect_from_sysfs()
2794 int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess, in rtrs_clt_remove_path_from_sysfs() argument
2810 rtrs_clt_close_conns(sess, true); in rtrs_clt_remove_path_from_sysfs()
2811 changed = rtrs_clt_change_state_get_old(sess, in rtrs_clt_remove_path_from_sysfs()
2817 rtrs_clt_remove_path_from_arr(sess); in rtrs_clt_remove_path_from_sysfs()
2818 rtrs_clt_destroy_sess_files(sess, sysfs_self); in rtrs_clt_remove_path_from_sysfs()
2819 kobject_put(&sess->kobj); in rtrs_clt_remove_path_from_sysfs()
2865 struct rtrs_clt_sess *sess; in rtrs_clt_request() local
2887 (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { in rtrs_clt_request()
2888 if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) in rtrs_clt_request()
2891 if (unlikely(usr_len + hdr_len > sess->max_hdr_size)) { in rtrs_clt_request()
2892 rtrs_wrn_rl(sess->clt, in rtrs_clt_request()
2895 usr_len, hdr_len, sess->max_hdr_size); in rtrs_clt_request()
2899 req = rtrs_clt_get_req(sess, ops->conf_fn, permit, ops->priv, in rtrs_clt_request()
2945 struct rtrs_clt_sess *sess; in rtrs_clt_create_path_from_sysfs() local
2948 sess = alloc_sess(clt, addr, nr_cpu_ids, clt->max_segments, in rtrs_clt_create_path_from_sysfs()
2950 if (IS_ERR(sess)) in rtrs_clt_create_path_from_sysfs()
2951 return PTR_ERR(sess); in rtrs_clt_create_path_from_sysfs()
2958 rtrs_clt_add_path_to_arr(sess, addr); in rtrs_clt_create_path_from_sysfs()
2960 err = init_sess(sess); in rtrs_clt_create_path_from_sysfs()
2964 err = rtrs_clt_create_sess_files(sess); in rtrs_clt_create_path_from_sysfs()
2971 rtrs_clt_remove_path_from_arr(sess); in rtrs_clt_create_path_from_sysfs()
2972 rtrs_clt_close_conns(sess, true); in rtrs_clt_create_path_from_sysfs()
2973 free_percpu(sess->stats->pcpu_stats); in rtrs_clt_create_path_from_sysfs()
2974 kfree(sess->stats); in rtrs_clt_create_path_from_sysfs()
2975 free_sess(sess); in rtrs_clt_create_path_from_sysfs()