Lines Matching refs:csk
469 struct cxgbit_sock *csk) in cxgbit_set_conn_info() argument
472 conn->login_sockaddr = csk->com.remote_addr; in cxgbit_set_conn_info()
473 conn->local_sockaddr = csk->com.local_addr; in cxgbit_set_conn_info()
479 struct cxgbit_sock *csk; in cxgbit_accept_np() local
504 csk = list_first_entry(&cnp->np_accept_list, in cxgbit_accept_np()
508 list_del_init(&csk->accept_node); in cxgbit_accept_np()
510 conn->context = csk; in cxgbit_accept_np()
511 csk->conn = conn; in cxgbit_accept_np()
513 cxgbit_set_conn_info(np, conn, csk); in cxgbit_accept_np()
598 static void __cxgbit_free_conn(struct cxgbit_sock *csk);
603 struct cxgbit_sock *csk, *tmp; in cxgbit_free_np() local
612 list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) { in cxgbit_free_np()
613 list_del_init(&csk->accept_node); in cxgbit_free_np()
614 __cxgbit_free_conn(csk); in cxgbit_free_np()
622 static void cxgbit_send_halfclose(struct cxgbit_sock *csk) in cxgbit_send_halfclose() argument
631 cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx, in cxgbit_send_halfclose()
635 __skb_queue_tail(&csk->txq, skb); in cxgbit_send_halfclose()
636 cxgbit_push_tx_frames(csk); in cxgbit_send_halfclose()
641 struct cxgbit_sock *csk = handle; in cxgbit_arp_failure_discard() local
645 cxgbit_put_csk(csk); in cxgbit_arp_failure_discard()
658 static int cxgbit_send_abort_req(struct cxgbit_sock *csk) in cxgbit_send_abort_req() argument
664 __func__, csk, csk->tid, csk->com.state); in cxgbit_send_abort_req()
666 __skb_queue_purge(&csk->txq); in cxgbit_send_abort_req()
668 if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) in cxgbit_send_abort_req()
669 cxgbit_send_tx_flowc_wr(csk); in cxgbit_send_abort_req()
671 skb = __skb_dequeue(&csk->skbq); in cxgbit_send_abort_req()
672 cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx, in cxgbit_send_abort_req()
673 csk->com.cdev, cxgbit_abort_arp_failure); in cxgbit_send_abort_req()
675 return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t); in cxgbit_send_abort_req()
679 __cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb) in __cxgbit_abort_conn() argument
683 if (csk->com.state != CSK_STATE_ESTABLISHED) in __cxgbit_abort_conn()
686 set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags); in __cxgbit_abort_conn()
687 csk->com.state = CSK_STATE_ABORTING; in __cxgbit_abort_conn()
689 cxgbit_send_abort_req(csk); in __cxgbit_abort_conn()
694 cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE); in __cxgbit_abort_conn()
695 cxgbit_put_csk(csk); in __cxgbit_abort_conn()
698 void cxgbit_abort_conn(struct cxgbit_sock *csk) in cxgbit_abort_conn() argument
702 cxgbit_get_csk(csk); in cxgbit_abort_conn()
703 cxgbit_init_wr_wait(&csk->com.wr_wait); in cxgbit_abort_conn()
705 spin_lock_bh(&csk->lock); in cxgbit_abort_conn()
706 if (csk->lock_owner) { in cxgbit_abort_conn()
708 __skb_queue_tail(&csk->backlogq, skb); in cxgbit_abort_conn()
710 __cxgbit_abort_conn(csk, skb); in cxgbit_abort_conn()
712 spin_unlock_bh(&csk->lock); in cxgbit_abort_conn()
714 cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait, in cxgbit_abort_conn()
715 csk->tid, 600, __func__); in cxgbit_abort_conn()
718 static void __cxgbit_free_conn(struct cxgbit_sock *csk) in __cxgbit_free_conn() argument
720 struct iscsi_conn *conn = csk->conn; in __cxgbit_free_conn()
724 __func__, csk->com.state); in __cxgbit_free_conn()
726 spin_lock_bh(&csk->lock); in __cxgbit_free_conn()
727 switch (csk->com.state) { in __cxgbit_free_conn()
730 csk->com.state = CSK_STATE_CLOSING; in __cxgbit_free_conn()
731 cxgbit_send_halfclose(csk); in __cxgbit_free_conn()
733 csk->com.state = CSK_STATE_ABORTING; in __cxgbit_free_conn()
734 cxgbit_send_abort_req(csk); in __cxgbit_free_conn()
738 csk->com.state = CSK_STATE_MORIBUND; in __cxgbit_free_conn()
739 cxgbit_send_halfclose(csk); in __cxgbit_free_conn()
746 __func__, csk, csk->com.state); in __cxgbit_free_conn()
748 spin_unlock_bh(&csk->lock); in __cxgbit_free_conn()
751 cxgbit_put_csk(csk); in __cxgbit_free_conn()
759 static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt) in cxgbit_set_emss() argument
761 csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] - in cxgbit_set_emss()
762 ((csk->com.remote_addr.ss_family == AF_INET) ? in cxgbit_set_emss()
765 csk->mss = csk->emss; in cxgbit_set_emss()
767 csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4); in cxgbit_set_emss()
768 if (csk->emss < 128) in cxgbit_set_emss()
769 csk->emss = 128; in cxgbit_set_emss()
770 if (csk->emss & 7) in cxgbit_set_emss()
772 TCPOPT_MSS_G(opt), csk->mss, csk->emss); in cxgbit_set_emss()
774 csk->mss, csk->emss); in cxgbit_set_emss()
777 static void cxgbit_free_skb(struct cxgbit_sock *csk) in cxgbit_free_skb() argument
781 __skb_queue_purge(&csk->txq); in cxgbit_free_skb()
782 __skb_queue_purge(&csk->rxq); in cxgbit_free_skb()
783 __skb_queue_purge(&csk->backlogq); in cxgbit_free_skb()
784 __skb_queue_purge(&csk->ppodq); in cxgbit_free_skb()
785 __skb_queue_purge(&csk->skbq); in cxgbit_free_skb()
787 while ((skb = cxgbit_sock_dequeue_wr(csk))) in cxgbit_free_skb()
790 __kfree_skb(csk->lro_hskb); in cxgbit_free_skb()
795 struct cxgbit_sock *csk; in _cxgbit_free_csk() local
798 csk = container_of(kref, struct cxgbit_sock, kref); in _cxgbit_free_csk()
800 pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state); in _cxgbit_free_csk()
802 if (csk->com.local_addr.ss_family == AF_INET6) { in _cxgbit_free_csk()
804 &csk->com.local_addr; in _cxgbit_free_csk()
805 cxgb4_clip_release(csk->com.cdev->lldi.ports[0], in _cxgbit_free_csk()
810 cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid, in _cxgbit_free_csk()
811 csk->com.local_addr.ss_family); in _cxgbit_free_csk()
812 dst_release(csk->dst); in _cxgbit_free_csk()
813 cxgb4_l2t_release(csk->l2t); in _cxgbit_free_csk()
815 cdev = csk->com.cdev; in _cxgbit_free_csk()
817 list_del(&csk->list); in _cxgbit_free_csk()
820 cxgbit_free_skb(csk); in _cxgbit_free_csk()
821 cxgbit_put_cnp(csk->cnp); in _cxgbit_free_csk()
824 kfree(csk); in _cxgbit_free_csk()
827 static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi) in cxgbit_set_tcp_window() argument
836 csk->rcv_win = CXGBIT_10G_RCV_WIN; in cxgbit_set_tcp_window()
838 csk->rcv_win *= scale; in cxgbit_set_tcp_window()
841 csk->snd_win = CXGBIT_10G_SND_WIN; in cxgbit_set_tcp_window()
843 csk->snd_win *= scale; in cxgbit_set_tcp_window()
846 __func__, csk->snd_win, csk->rcv_win); in cxgbit_set_tcp_window()
897 cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip, in cxgbit_offload_init() argument
931 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, in cxgbit_offload_init()
933 if (!csk->l2t) in cxgbit_offload_init()
935 csk->mtu = ndev->mtu; in cxgbit_offload_init()
936 csk->tx_chan = cxgb4_port_chan(ndev); in cxgbit_offload_init()
937 csk->smac_idx = in cxgbit_offload_init()
941 csk->txq_idx = cxgb4_port_idx(ndev) * step; in cxgbit_offload_init()
944 csk->ctrlq_idx = cxgb4_port_idx(ndev); in cxgbit_offload_init()
945 csk->rss_qid = cdev->lldi.rxq_ids[ in cxgbit_offload_init()
947 csk->port_id = cxgb4_port_idx(ndev); in cxgbit_offload_init()
948 cxgbit_set_tcp_window(csk, in cxgbit_offload_init()
962 csk->dcb_priority = priority; in cxgbit_offload_init()
964 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority); in cxgbit_offload_init()
966 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0); in cxgbit_offload_init()
968 if (!csk->l2t) in cxgbit_offload_init()
971 csk->mtu = dst_mtu(dst); in cxgbit_offload_init()
972 csk->tx_chan = cxgb4_port_chan(ndev); in cxgbit_offload_init()
973 csk->smac_idx = in cxgbit_offload_init()
977 csk->txq_idx = (port_id * step) + in cxgbit_offload_init()
979 csk->ctrlq_idx = cxgb4_port_idx(ndev); in cxgbit_offload_init()
984 csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx]; in cxgbit_offload_init()
985 csk->port_id = port_id; in cxgbit_offload_init()
986 cxgbit_set_tcp_window(csk, in cxgbit_offload_init()
1043 static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_send_rx_credits() argument
1045 if (csk->com.state != CSK_STATE_ESTABLISHED) { in cxgbit_send_rx_credits()
1050 cxgbit_ofld_send(csk->com.cdev, skb); in cxgbit_send_rx_credits()
1058 int cxgbit_rx_data_ack(struct cxgbit_sock *csk) in cxgbit_rx_data_ack() argument
1069 RX_CREDITS_V(csk->rx_credits); in cxgbit_rx_data_ack()
1071 cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx, in cxgbit_rx_data_ack()
1074 csk->rx_credits = 0; in cxgbit_rx_data_ack()
1076 spin_lock_bh(&csk->lock); in cxgbit_rx_data_ack()
1077 if (csk->lock_owner) { in cxgbit_rx_data_ack()
1079 __skb_queue_tail(&csk->backlogq, skb); in cxgbit_rx_data_ack()
1080 spin_unlock_bh(&csk->lock); in cxgbit_rx_data_ack()
1084 cxgbit_send_rx_credits(csk, skb); in cxgbit_rx_data_ack()
1085 spin_unlock_bh(&csk->lock); in cxgbit_rx_data_ack()
1092 static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk) in cxgbit_alloc_csk_skb() argument
1111 __skb_queue_tail(&csk->skbq, skb); in cxgbit_alloc_csk_skb()
1119 csk->lro_hskb = skb; in cxgbit_alloc_csk_skb()
1123 __skb_queue_purge(&csk->skbq); in cxgbit_alloc_csk_skb()
1128 cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req) in cxgbit_pass_accept_rpl() argument
1133 struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; in cxgbit_pass_accept_rpl()
1141 pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid); in cxgbit_pass_accept_rpl()
1145 cxgbit_put_csk(csk); in cxgbit_pass_accept_rpl()
1151 INIT_TP_WR(rpl5, csk->tid); in cxgbit_pass_accept_rpl()
1153 csk->tid)); in cxgbit_pass_accept_rpl()
1154 cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx, in cxgbit_pass_accept_rpl()
1156 (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1); in cxgbit_pass_accept_rpl()
1157 wscale = cxgb_compute_wscale(csk->rcv_win); in cxgbit_pass_accept_rpl()
1162 win = csk->rcv_win >> 10; in cxgbit_pass_accept_rpl()
1168 L2T_IDX_V(csk->l2t->idx) | in cxgbit_pass_accept_rpl()
1169 TX_CHAN_V(csk->tx_chan) | in cxgbit_pass_accept_rpl()
1170 SMAC_SEL_V(csk->smac_idx) | in cxgbit_pass_accept_rpl()
1171 DSCP_V(csk->tos >> 2) | in cxgbit_pass_accept_rpl()
1176 RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid); in cxgbit_pass_accept_rpl()
1210 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx); in cxgbit_pass_accept_rpl()
1211 t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard); in cxgbit_pass_accept_rpl()
1212 cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t); in cxgbit_pass_accept_rpl()
1218 struct cxgbit_sock *csk = NULL; in cxgbit_pass_accept_req() local
1249 csk = lookup_tid(t, tid); in cxgbit_pass_accept_req()
1250 if (csk) { in cxgbit_pass_accept_req()
1290 csk = kzalloc(sizeof(*csk), GFP_ATOMIC); in cxgbit_pass_accept_req()
1291 if (!csk) { in cxgbit_pass_accept_req()
1296 ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port), in cxgbit_pass_accept_req()
1302 kfree(csk); in cxgbit_pass_accept_req()
1306 kref_init(&csk->kref); in cxgbit_pass_accept_req()
1307 init_completion(&csk->com.wr_wait.completion); in cxgbit_pass_accept_req()
1309 INIT_LIST_HEAD(&csk->accept_node); in cxgbit_pass_accept_req()
1313 if (peer_mss && csk->mtu > (peer_mss + hdrs)) in cxgbit_pass_accept_req()
1314 csk->mtu = peer_mss + hdrs; in cxgbit_pass_accept_req()
1316 csk->com.state = CSK_STATE_CONNECTING; in cxgbit_pass_accept_req()
1317 csk->com.cdev = cdev; in cxgbit_pass_accept_req()
1318 csk->cnp = cnp; in cxgbit_pass_accept_req()
1319 csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); in cxgbit_pass_accept_req()
1320 csk->dst = dst; in cxgbit_pass_accept_req()
1321 csk->tid = tid; in cxgbit_pass_accept_req()
1322 csk->wr_cred = cdev->lldi.wr_cred - in cxgbit_pass_accept_req()
1324 csk->wr_max_cred = csk->wr_cred; in cxgbit_pass_accept_req()
1325 csk->wr_una_cred = 0; in cxgbit_pass_accept_req()
1329 &csk->com.local_addr; in cxgbit_pass_accept_req()
1334 sin = (struct sockaddr_in *)&csk->com.remote_addr; in cxgbit_pass_accept_req()
1340 &csk->com.local_addr; in cxgbit_pass_accept_req()
1349 sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr; in cxgbit_pass_accept_req()
1355 skb_queue_head_init(&csk->rxq); in cxgbit_pass_accept_req()
1356 skb_queue_head_init(&csk->txq); in cxgbit_pass_accept_req()
1357 skb_queue_head_init(&csk->ppodq); in cxgbit_pass_accept_req()
1358 skb_queue_head_init(&csk->backlogq); in cxgbit_pass_accept_req()
1359 skb_queue_head_init(&csk->skbq); in cxgbit_pass_accept_req()
1360 cxgbit_sock_reset_wr_list(csk); in cxgbit_pass_accept_req()
1361 spin_lock_init(&csk->lock); in cxgbit_pass_accept_req()
1362 init_waitqueue_head(&csk->waitq); in cxgbit_pass_accept_req()
1363 csk->lock_owner = false; in cxgbit_pass_accept_req()
1365 if (cxgbit_alloc_csk_skb(csk)) { in cxgbit_pass_accept_req()
1367 kfree(csk); in cxgbit_pass_accept_req()
1375 list_add_tail(&csk->list, &cdev->cskq.list); in cxgbit_pass_accept_req()
1377 cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family); in cxgbit_pass_accept_req()
1378 cxgbit_pass_accept_rpl(csk, req); in cxgbit_pass_accept_req()
1388 cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp, in cxgbit_tx_flowc_wr_credits() argument
1395 if (csk->snd_wscale) in cxgbit_tx_flowc_wr_credits()
1415 u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk) in cxgbit_send_tx_flowc_wr() argument
1417 struct cxgbit_device *cdev = csk->com.cdev; in cxgbit_send_tx_flowc_wr()
1424 u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan; in cxgbit_send_tx_flowc_wr()
1427 flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen); in cxgbit_send_tx_flowc_wr()
1429 skb = __skb_dequeue(&csk->skbq); in cxgbit_send_tx_flowc_wr()
1435 FW_WR_FLOWID_V(csk->tid)); in cxgbit_send_tx_flowc_wr()
1438 (csk->com.cdev->lldi.pf)); in cxgbit_send_tx_flowc_wr()
1440 flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan); in cxgbit_send_tx_flowc_wr()
1442 flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan); in cxgbit_send_tx_flowc_wr()
1444 flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid); in cxgbit_send_tx_flowc_wr()
1446 flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt); in cxgbit_send_tx_flowc_wr()
1448 flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt); in cxgbit_send_tx_flowc_wr()
1450 flowc->mnemval[6].val = cpu_to_be32(csk->snd_win); in cxgbit_send_tx_flowc_wr()
1452 flowc->mnemval[7].val = cpu_to_be32(csk->emss); in cxgbit_send_tx_flowc_wr()
1462 if (csk->snd_wscale) { in cxgbit_send_tx_flowc_wr()
1464 flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale); in cxgbit_send_tx_flowc_wr()
1471 pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid); in cxgbit_send_tx_flowc_wr()
1480 __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt, in cxgbit_send_tx_flowc_wr()
1481 csk->rcv_nxt, csk->snd_win, csk->emss); in cxgbit_send_tx_flowc_wr()
1482 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); in cxgbit_send_tx_flowc_wr()
1483 cxgbit_ofld_send(csk->com.cdev, skb); in cxgbit_send_tx_flowc_wr()
1488 cxgbit_send_tcb_skb(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_send_tcb_skb() argument
1490 spin_lock_bh(&csk->lock); in cxgbit_send_tcb_skb()
1491 if (unlikely(csk->com.state != CSK_STATE_ESTABLISHED)) { in cxgbit_send_tcb_skb()
1492 spin_unlock_bh(&csk->lock); in cxgbit_send_tcb_skb()
1494 __func__, csk, csk->tid, csk->com.state); in cxgbit_send_tcb_skb()
1499 cxgbit_get_csk(csk); in cxgbit_send_tcb_skb()
1500 cxgbit_init_wr_wait(&csk->com.wr_wait); in cxgbit_send_tcb_skb()
1501 cxgbit_ofld_send(csk->com.cdev, skb); in cxgbit_send_tcb_skb()
1502 spin_unlock_bh(&csk->lock); in cxgbit_send_tcb_skb()
1507 int cxgbit_setup_conn_digest(struct cxgbit_sock *csk) in cxgbit_setup_conn_digest() argument
1511 u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC; in cxgbit_setup_conn_digest()
1512 u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC; in cxgbit_setup_conn_digest()
1523 INIT_TP_WR(req, csk->tid); in cxgbit_setup_conn_digest()
1524 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); in cxgbit_setup_conn_digest()
1525 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); in cxgbit_setup_conn_digest()
1530 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx); in cxgbit_setup_conn_digest()
1532 if (cxgbit_send_tcb_skb(csk, skb)) in cxgbit_setup_conn_digest()
1535 ret = cxgbit_wait_for_reply(csk->com.cdev, in cxgbit_setup_conn_digest()
1536 &csk->com.wr_wait, in cxgbit_setup_conn_digest()
1537 csk->tid, 5, __func__); in cxgbit_setup_conn_digest()
1544 int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx) in cxgbit_setup_conn_pgidx() argument
1557 INIT_TP_WR(req, csk->tid); in cxgbit_setup_conn_pgidx()
1558 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); in cxgbit_setup_conn_pgidx()
1559 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); in cxgbit_setup_conn_pgidx()
1563 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx); in cxgbit_setup_conn_pgidx()
1565 if (cxgbit_send_tcb_skb(csk, skb)) in cxgbit_setup_conn_pgidx()
1568 ret = cxgbit_wait_for_reply(csk->com.cdev, in cxgbit_setup_conn_pgidx()
1569 &csk->com.wr_wait, in cxgbit_setup_conn_pgidx()
1570 csk->tid, 5, __func__); in cxgbit_setup_conn_pgidx()
1627 struct cxgbit_sock *csk; in cxgbit_pass_establish() local
1633 csk = lookup_tid(t, tid); in cxgbit_pass_establish()
1634 if (unlikely(!csk)) { in cxgbit_pass_establish()
1638 cnp = csk->cnp; in cxgbit_pass_establish()
1641 __func__, csk, tid, cnp); in cxgbit_pass_establish()
1643 csk->write_seq = snd_isn; in cxgbit_pass_establish()
1644 csk->snd_una = snd_isn; in cxgbit_pass_establish()
1645 csk->snd_nxt = snd_isn; in cxgbit_pass_establish()
1647 csk->rcv_nxt = rcv_isn; in cxgbit_pass_establish()
1649 if (csk->rcv_win > (RCV_BUFSIZ_M << 10)) in cxgbit_pass_establish()
1650 csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10)); in cxgbit_pass_establish()
1652 csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt); in cxgbit_pass_establish()
1653 cxgbit_set_emss(csk, tcp_opt); in cxgbit_pass_establish()
1654 dst_confirm(csk->dst); in cxgbit_pass_establish()
1655 csk->com.state = CSK_STATE_ESTABLISHED; in cxgbit_pass_establish()
1657 list_add_tail(&csk->accept_node, &cnp->np_accept_list); in cxgbit_pass_establish()
1664 static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_queue_rx_skb() argument
1667 spin_lock_bh(&csk->rxq.lock); in cxgbit_queue_rx_skb()
1668 __skb_queue_tail(&csk->rxq, skb); in cxgbit_queue_rx_skb()
1669 spin_unlock_bh(&csk->rxq.lock); in cxgbit_queue_rx_skb()
1670 wake_up(&csk->waitq); in cxgbit_queue_rx_skb()
1673 static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_peer_close() argument
1676 __func__, csk, csk->tid, csk->com.state); in cxgbit_peer_close()
1678 switch (csk->com.state) { in cxgbit_peer_close()
1680 csk->com.state = CSK_STATE_CLOSING; in cxgbit_peer_close()
1681 cxgbit_queue_rx_skb(csk, skb); in cxgbit_peer_close()
1685 csk->com.state = CSK_STATE_MORIBUND; in cxgbit_peer_close()
1688 csk->com.state = CSK_STATE_DEAD; in cxgbit_peer_close()
1689 cxgbit_put_csk(csk); in cxgbit_peer_close()
1695 __func__, csk->com.state); in cxgbit_peer_close()
1701 static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_close_con_rpl() argument
1704 __func__, csk, csk->tid, csk->com.state); in cxgbit_close_con_rpl()
1706 switch (csk->com.state) { in cxgbit_close_con_rpl()
1708 csk->com.state = CSK_STATE_MORIBUND; in cxgbit_close_con_rpl()
1711 csk->com.state = CSK_STATE_DEAD; in cxgbit_close_con_rpl()
1712 cxgbit_put_csk(csk); in cxgbit_close_con_rpl()
1719 __func__, csk->com.state); in cxgbit_close_con_rpl()
1725 static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_abort_req_rss() argument
1735 __func__, csk, tid, csk->com.state); in cxgbit_abort_req_rss()
1743 switch (csk->com.state) { in cxgbit_abort_req_rss()
1746 csk->com.state = CSK_STATE_DEAD; in cxgbit_abort_req_rss()
1750 csk->com.state = CSK_STATE_DEAD; in cxgbit_abort_req_rss()
1754 csk->com.state = CSK_STATE_DEAD; in cxgbit_abort_req_rss()
1755 if (!csk->conn) in cxgbit_abort_req_rss()
1762 __func__, csk->com.state); in cxgbit_abort_req_rss()
1763 csk->com.state = CSK_STATE_DEAD; in cxgbit_abort_req_rss()
1766 __skb_queue_purge(&csk->txq); in cxgbit_abort_req_rss()
1768 if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) in cxgbit_abort_req_rss()
1769 cxgbit_send_tx_flowc_wr(csk); in cxgbit_abort_req_rss()
1771 rpl_skb = __skb_dequeue(&csk->skbq); in cxgbit_abort_req_rss()
1773 cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx); in cxgbit_abort_req_rss()
1774 cxgbit_ofld_send(csk->com.cdev, rpl_skb); in cxgbit_abort_req_rss()
1777 cxgbit_queue_rx_skb(csk, skb); in cxgbit_abort_req_rss()
1782 cxgbit_put_csk(csk); in cxgbit_abort_req_rss()
1787 static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_abort_rpl_rss() argument
1792 __func__, csk, csk->tid, csk->com.state); in cxgbit_abort_rpl_rss()
1794 switch (csk->com.state) { in cxgbit_abort_rpl_rss()
1796 csk->com.state = CSK_STATE_DEAD; in cxgbit_abort_rpl_rss()
1797 if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags)) in cxgbit_abort_rpl_rss()
1798 cxgbit_wake_up(&csk->com.wr_wait, __func__, in cxgbit_abort_rpl_rss()
1800 cxgbit_put_csk(csk); in cxgbit_abort_rpl_rss()
1804 __func__, csk->com.state); in cxgbit_abort_rpl_rss()
1810 static bool cxgbit_credit_err(const struct cxgbit_sock *csk) in cxgbit_credit_err() argument
1812 const struct sk_buff *skb = csk->wr_pending_head; in cxgbit_credit_err()
1815 if (unlikely(csk->wr_cred > csk->wr_max_cred)) { in cxgbit_credit_err()
1817 csk, csk->tid, csk->wr_cred, csk->wr_max_cred); in cxgbit_credit_err()
1826 if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) { in cxgbit_credit_err()
1828 csk, csk->tid, csk->wr_cred, in cxgbit_credit_err()
1829 credit, csk->wr_max_cred); in cxgbit_credit_err()
1837 static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_fw4_ack() argument
1843 csk->wr_cred += credits; in cxgbit_fw4_ack()
1844 if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred)) in cxgbit_fw4_ack()
1845 csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred; in cxgbit_fw4_ack()
1848 struct sk_buff *p = cxgbit_sock_peek_wr(csk); in cxgbit_fw4_ack()
1853 csk, csk->tid, credits, in cxgbit_fw4_ack()
1854 csk->wr_cred, csk->wr_una_cred); in cxgbit_fw4_ack()
1861 csk, csk->tid, in cxgbit_fw4_ack()
1862 credits, csk->wr_cred, csk->wr_una_cred, in cxgbit_fw4_ack()
1868 cxgbit_sock_dequeue_wr(csk); in cxgbit_fw4_ack()
1873 if (unlikely(cxgbit_credit_err(csk))) { in cxgbit_fw4_ack()
1874 cxgbit_queue_rx_skb(csk, skb); in cxgbit_fw4_ack()
1879 if (unlikely(before(snd_una, csk->snd_una))) { in cxgbit_fw4_ack()
1881 csk, csk->tid, snd_una, in cxgbit_fw4_ack()
1882 csk->snd_una); in cxgbit_fw4_ack()
1886 if (csk->snd_una != snd_una) { in cxgbit_fw4_ack()
1887 csk->snd_una = snd_una; in cxgbit_fw4_ack()
1888 dst_confirm(csk->dst); in cxgbit_fw4_ack()
1892 if (skb_queue_len(&csk->txq)) in cxgbit_fw4_ack()
1893 cxgbit_push_tx_frames(csk); in cxgbit_fw4_ack()
1901 struct cxgbit_sock *csk; in cxgbit_set_tcb_rpl() local
1907 csk = lookup_tid(t, tid); in cxgbit_set_tcb_rpl()
1908 if (unlikely(!csk)) { in cxgbit_set_tcb_rpl()
1912 cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status); in cxgbit_set_tcb_rpl()
1915 cxgbit_put_csk(csk); in cxgbit_set_tcb_rpl()
1922 struct cxgbit_sock *csk; in cxgbit_rx_data() local
1928 csk = lookup_tid(t, tid); in cxgbit_rx_data()
1929 if (unlikely(!csk)) { in cxgbit_rx_data()
1934 cxgbit_queue_rx_skb(csk, skb); in cxgbit_rx_data()
1941 __cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb) in __cxgbit_process_rx_cpl() argument
1943 spin_lock(&csk->lock); in __cxgbit_process_rx_cpl()
1944 if (csk->lock_owner) { in __cxgbit_process_rx_cpl()
1945 __skb_queue_tail(&csk->backlogq, skb); in __cxgbit_process_rx_cpl()
1946 spin_unlock(&csk->lock); in __cxgbit_process_rx_cpl()
1950 cxgbit_skcb_rx_backlog_fn(skb)(csk, skb); in __cxgbit_process_rx_cpl()
1951 spin_unlock(&csk->lock); in __cxgbit_process_rx_cpl()
1954 static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_process_rx_cpl() argument
1956 cxgbit_get_csk(csk); in cxgbit_process_rx_cpl()
1957 __cxgbit_process_rx_cpl(csk, skb); in cxgbit_process_rx_cpl()
1958 cxgbit_put_csk(csk); in cxgbit_process_rx_cpl()
1963 struct cxgbit_sock *csk; in cxgbit_rx_cpl() local
1992 csk = lookup_tid(t, tid); in cxgbit_rx_cpl()
1993 if (unlikely(!csk)) { in cxgbit_rx_cpl()
1999 cxgbit_process_rx_cpl(csk, skb); in cxgbit_rx_cpl()
2001 __cxgbit_process_rx_cpl(csk, skb); in cxgbit_rx_cpl()