Lines Matching refs:csk

207 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,  in send_act_open_req()  argument
210 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); in send_act_open_req()
211 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); in send_act_open_req()
214 unsigned int qid_atid = ((unsigned int)csk->atid) | in send_act_open_req()
215 (((unsigned int)csk->rss_qid) << 14); in send_act_open_req()
219 MSS_IDX_V(csk->mss_idx) | in send_act_open_req()
220 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | in send_act_open_req()
221 TX_CHAN_V(csk->tx_chan) | in send_act_open_req()
222 SMAC_SEL_V(csk->smac_idx) | in send_act_open_req()
224 RCV_BUFSIZ_V(csk->rcv_win >> 10); in send_act_open_req()
228 RSS_QUEUE_V(csk->rss_qid); in send_act_open_req()
237 req->local_port = csk->saddr.sin_port; in send_act_open_req()
238 req->peer_port = csk->daddr.sin_port; in send_act_open_req()
239 req->local_ip = csk->saddr.sin_addr.s_addr; in send_act_open_req()
240 req->peer_ip = csk->daddr.sin_addr.s_addr; in send_act_open_req()
243 csk->cdev->ports[csk->port_id], in send_act_open_req()
244 csk->l2t)); in send_act_open_req()
250 csk, &req->local_ip, ntohs(req->local_port), in send_act_open_req()
252 csk->atid, csk->rss_qid); in send_act_open_req()
261 req->local_port = csk->saddr.sin_port; in send_act_open_req()
262 req->peer_port = csk->daddr.sin_port; in send_act_open_req()
263 req->local_ip = csk->saddr.sin_addr.s_addr; in send_act_open_req()
264 req->peer_ip = csk->daddr.sin_addr.s_addr; in send_act_open_req()
268 csk->cdev->ports[csk->port_id], in send_act_open_req()
269 csk->l2t))); in send_act_open_req()
278 csk, &req->local_ip, ntohs(req->local_port), in send_act_open_req()
280 csk->atid, csk->rss_qid); in send_act_open_req()
289 req->local_port = csk->saddr.sin_port; in send_act_open_req()
290 req->peer_port = csk->daddr.sin_port; in send_act_open_req()
291 req->local_ip = csk->saddr.sin_addr.s_addr; in send_act_open_req()
292 req->peer_ip = csk->daddr.sin_addr.s_addr; in send_act_open_req()
296 csk->cdev->ports[csk->port_id], in send_act_open_req()
297 csk->l2t))); in send_act_open_req()
310 csk, &req->local_ip, ntohs(req->local_port), in send_act_open_req()
312 csk->atid, csk->rss_qid); in send_act_open_req()
315 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); in send_act_open_req()
318 (&csk->saddr), (&csk->daddr), in send_act_open_req()
319 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, in send_act_open_req()
320 csk->state, csk->flags, csk->atid, csk->rss_qid); in send_act_open_req()
322 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); in send_act_open_req()
326 static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, in send_act_open_req6() argument
329 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); in send_act_open_req6()
330 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); in send_act_open_req6()
333 unsigned int qid_atid = ((unsigned int)csk->atid) | in send_act_open_req6()
334 (((unsigned int)csk->rss_qid) << 14); in send_act_open_req6()
338 MSS_IDX_V(csk->mss_idx) | in send_act_open_req6()
339 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | in send_act_open_req6()
340 TX_CHAN_V(csk->tx_chan) | in send_act_open_req6()
341 SMAC_SEL_V(csk->smac_idx) | in send_act_open_req6()
343 RCV_BUFSIZ_V(csk->rcv_win >> 10); in send_act_open_req6()
347 RSS_QUEUE_V(csk->rss_qid); in send_act_open_req6()
356 req->local_port = csk->saddr6.sin6_port; in send_act_open_req6()
357 req->peer_port = csk->daddr6.sin6_port; in send_act_open_req6()
359 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); in send_act_open_req6()
360 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + in send_act_open_req6()
362 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); in send_act_open_req6()
363 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + in send_act_open_req6()
372 csk->cdev->ports[csk->port_id], in send_act_open_req6()
373 csk->l2t)); in send_act_open_req6()
381 req->local_port = csk->saddr6.sin6_port; in send_act_open_req6()
382 req->peer_port = csk->daddr6.sin6_port; in send_act_open_req6()
383 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); in send_act_open_req6()
384 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + in send_act_open_req6()
386 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); in send_act_open_req6()
387 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + in send_act_open_req6()
395 csk->cdev->ports[csk->port_id], in send_act_open_req6()
396 csk->l2t))); in send_act_open_req6()
404 req->local_port = csk->saddr6.sin6_port; in send_act_open_req6()
405 req->peer_port = csk->daddr6.sin6_port; in send_act_open_req6()
406 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); in send_act_open_req6()
407 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + in send_act_open_req6()
409 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); in send_act_open_req6()
410 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + in send_act_open_req6()
420 csk->cdev->ports[csk->port_id], in send_act_open_req6()
421 csk->l2t))); in send_act_open_req6()
427 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); in send_act_open_req6()
430 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state, in send_act_open_req6()
431 csk->flags, csk->atid, in send_act_open_req6()
432 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port), in send_act_open_req6()
433 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port), in send_act_open_req6()
434 csk->rss_qid); in send_act_open_req6()
436 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); in send_act_open_req6()
440 static void send_close_req(struct cxgbi_sock *csk) in send_close_req() argument
442 struct sk_buff *skb = csk->cpl_close; in send_close_req()
444 unsigned int tid = csk->tid; in send_close_req()
448 csk, csk->state, csk->flags, csk->tid); in send_close_req()
449 csk->cpl_close = NULL; in send_close_req()
450 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); in send_close_req()
455 cxgbi_sock_skb_entail(csk, skb); in send_close_req()
456 if (csk->state >= CTP_ESTABLISHED) in send_close_req()
457 push_tx_frames(csk, 1); in send_close_req()
462 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle; in abort_arp_failure() local
467 csk, csk->state, csk->flags, csk->tid); in abort_arp_failure()
470 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in abort_arp_failure()
473 static void send_abort_req(struct cxgbi_sock *csk) in send_abort_req() argument
476 struct sk_buff *skb = csk->cpl_abort_req; in send_abort_req()
478 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev) in send_abort_req()
481 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { in send_abort_req()
482 send_tx_flowc_wr(csk); in send_abort_req()
483 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); in send_abort_req()
486 cxgbi_sock_set_state(csk, CTP_ABORTING); in send_abort_req()
487 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); in send_abort_req()
488 cxgbi_sock_purge_write_queue(csk); in send_abort_req()
490 csk->cpl_abort_req = NULL; in send_abort_req()
492 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); in send_abort_req()
494 t4_set_arp_err_handler(skb, csk, abort_arp_failure); in send_abort_req()
495 INIT_TP_WR(req, csk->tid); in send_abort_req()
496 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); in send_abort_req()
497 req->rsvd0 = htonl(csk->snd_nxt); in send_abort_req()
498 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); in send_abort_req()
502 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, in send_abort_req()
505 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); in send_abort_req()
508 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) in send_abort_rpl() argument
510 struct sk_buff *skb = csk->cpl_abort_rpl; in send_abort_rpl()
515 csk, csk->state, csk->flags, csk->tid, rst_status); in send_abort_rpl()
517 csk->cpl_abort_rpl = NULL; in send_abort_rpl()
518 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); in send_abort_rpl()
519 INIT_TP_WR(rpl, csk->tid); in send_abort_rpl()
520 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); in send_abort_rpl()
522 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in send_abort_rpl()
530 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) in send_rx_credits() argument
537 csk, csk->state, csk->flags, csk->tid, credits); in send_rx_credits()
541 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); in send_rx_credits()
546 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id); in send_rx_credits()
547 INIT_TP_WR(req, csk->tid); in send_rx_credits()
549 csk->tid)); in send_rx_credits()
552 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in send_rx_credits()
613 static inline int send_tx_flowc_wr(struct cxgbi_sock *csk) in send_tx_flowc_wr() argument
620 u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan; in send_tx_flowc_wr()
628 htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid)); in send_tx_flowc_wr()
630 flowc->mnemval[0].val = htonl(csk->cdev->pfvf); in send_tx_flowc_wr()
632 flowc->mnemval[1].val = htonl(csk->tx_chan); in send_tx_flowc_wr()
634 flowc->mnemval[2].val = htonl(csk->tx_chan); in send_tx_flowc_wr()
636 flowc->mnemval[3].val = htonl(csk->rss_qid); in send_tx_flowc_wr()
638 flowc->mnemval[4].val = htonl(csk->snd_nxt); in send_tx_flowc_wr()
640 flowc->mnemval[5].val = htonl(csk->rcv_nxt); in send_tx_flowc_wr()
642 flowc->mnemval[6].val = htonl(csk->snd_win); in send_tx_flowc_wr()
644 flowc->mnemval[7].val = htonl(csk->advmss); in send_tx_flowc_wr()
648 if (csk->cdev->skb_iso_txhdr) in send_tx_flowc_wr()
656 csk->tid); in send_tx_flowc_wr()
664 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); in send_tx_flowc_wr()
668 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid, in send_tx_flowc_wr()
669 csk->snd_nxt, csk->rcv_nxt, csk->snd_win, in send_tx_flowc_wr()
670 csk->advmss); in send_tx_flowc_wr()
672 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in send_tx_flowc_wr()
713 cxgb4i_make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, int dlen, in cxgb4i_make_tx_data_wr() argument
716 struct cxgbi_device *cdev = csk->cdev; in cxgb4i_make_tx_data_wr()
742 req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) | in cxgb4i_make_tx_data_wr()
757 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) in cxgb4i_make_tx_data_wr()
758 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); in cxgb4i_make_tx_data_wr()
766 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) in push_tx_frames() argument
771 if (unlikely(csk->state < CTP_ESTABLISHED || in push_tx_frames()
772 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { in push_tx_frames()
776 csk, csk->state, csk->flags, csk->tid); in push_tx_frames()
780 while (csk->wr_cred && ((skb = skb_peek(&csk->write_queue)) != NULL)) { in push_tx_frames()
807 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { in push_tx_frames()
808 flowclen16 = send_tx_flowc_wr(csk); in push_tx_frames()
809 csk->wr_cred -= flowclen16; in push_tx_frames()
810 csk->wr_una_cred += flowclen16; in push_tx_frames()
811 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); in push_tx_frames()
814 if (csk->wr_cred < credits_needed) { in push_tx_frames()
817 csk, skb->len, skb->data_len, in push_tx_frames()
818 credits_needed, csk->wr_cred); in push_tx_frames()
820 csk->no_tx_credits++; in push_tx_frames()
824 csk->no_tx_credits = 0; in push_tx_frames()
826 __skb_unlink(skb, &csk->write_queue); in push_tx_frames()
827 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); in push_tx_frames()
829 csk->wr_cred -= credits_needed; in push_tx_frames()
830 csk->wr_una_cred += credits_needed; in push_tx_frames()
831 cxgbi_sock_enqueue_wr(csk, skb); in push_tx_frames()
835 csk, skb->len, skb->data_len, credits_needed, in push_tx_frames()
836 csk->wr_cred, csk->wr_una_cred); in push_tx_frames()
839 ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) || in push_tx_frames()
840 after(csk->write_seq, (csk->snd_una + csk->snd_win / 2)))) in push_tx_frames()
856 cxgb4i_make_tx_data_wr(csk, skb, dlen, len, in push_tx_frames()
858 csk->snd_nxt += len; in push_tx_frames()
861 (csk->wr_una_cred >= (csk->wr_max_cred / 2))) { in push_tx_frames()
869 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard); in push_tx_frames()
873 csk, csk->state, csk->flags, csk->tid, skb, len); in push_tx_frames()
874 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); in push_tx_frames()
879 static inline void free_atid(struct cxgbi_sock *csk) in free_atid() argument
881 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); in free_atid()
883 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { in free_atid()
884 cxgb4_free_atid(lldi->tids, csk->atid); in free_atid()
885 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); in free_atid()
886 cxgbi_sock_put(csk); in free_atid()
892 struct cxgbi_sock *csk; in do_act_establish() local
901 csk = lookup_atid(t, atid); in do_act_establish()
902 if (unlikely(!csk)) { in do_act_establish()
907 if (csk->atid != atid) { in do_act_establish()
909 atid, csk, csk->state, csk->flags, csk->tid, csk->atid); in do_act_establish()
914 (&csk->saddr), (&csk->daddr), in do_act_establish()
915 atid, tid, csk, csk->state, csk->flags, rcv_isn); in do_act_establish()
919 cxgbi_sock_get(csk); in do_act_establish()
920 csk->tid = tid; in do_act_establish()
921 cxgb4_insert_tid(lldi->tids, csk, tid, csk->csk_family); in do_act_establish()
922 cxgbi_sock_set_flag(csk, CTPF_HAS_TID); in do_act_establish()
924 free_atid(csk); in do_act_establish()
926 spin_lock_bh(&csk->lock); in do_act_establish()
927 if (unlikely(csk->state != CTP_ACTIVE_OPEN)) in do_act_establish()
929 csk, csk->state, csk->flags, csk->tid); in do_act_establish()
931 if (csk->retry_timer.function) { in do_act_establish()
932 del_timer(&csk->retry_timer); in do_act_establish()
933 csk->retry_timer.function = NULL; in do_act_establish()
936 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; in do_act_establish()
941 if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10)) in do_act_establish()
942 csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10); in do_act_establish()
944 csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40; in do_act_establish()
946 csk->advmss -= 12; in do_act_establish()
947 if (csk->advmss < 128) in do_act_establish()
948 csk->advmss = 128; in do_act_establish()
952 csk, TCPOPT_MSS_G(tcp_opt), csk->advmss); in do_act_establish()
954 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); in do_act_establish()
956 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) in do_act_establish()
957 send_abort_req(csk); in do_act_establish()
959 if (skb_queue_len(&csk->write_queue)) in do_act_establish()
960 push_tx_frames(csk, 0); in do_act_establish()
961 cxgbi_conn_tx_open(csk); in do_act_establish()
963 spin_unlock_bh(&csk->lock); in do_act_establish()
990 struct cxgbi_sock *csk = from_timer(csk, t, retry_timer); in csk_act_open_retry_timer() local
991 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); in csk_act_open_retry_timer()
998 csk, csk->state, csk->flags, csk->tid); in csk_act_open_retry_timer()
1000 cxgbi_sock_get(csk); in csk_act_open_retry_timer()
1001 spin_lock_bh(&csk->lock); in csk_act_open_retry_timer()
1011 if (csk->csk_family == AF_INET) { in csk_act_open_retry_timer()
1022 cxgbi_sock_fail_act_open(csk, -ENOMEM); in csk_act_open_retry_timer()
1024 skb->sk = (struct sock *)csk; in csk_act_open_retry_timer()
1025 t4_set_arp_err_handler(skb, csk, in csk_act_open_retry_timer()
1027 send_act_open_func(csk, skb, csk->l2t); in csk_act_open_retry_timer()
1030 spin_unlock_bh(&csk->lock); in csk_act_open_retry_timer()
1031 cxgbi_sock_put(csk); in csk_act_open_retry_timer()
1044 struct cxgbi_sock *csk; in do_act_open_rpl() local
1053 csk = lookup_atid(t, atid); in do_act_open_rpl()
1054 if (unlikely(!csk)) { in do_act_open_rpl()
1060 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr), in do_act_open_rpl()
1061 atid, tid, status, csk, csk->state, csk->flags); in do_act_open_rpl()
1071 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl), in do_act_open_rpl()
1072 csk->csk_family); in do_act_open_rpl()
1074 cxgbi_sock_get(csk); in do_act_open_rpl()
1075 spin_lock_bh(&csk->lock); in do_act_open_rpl()
1078 csk->retry_timer.function != csk_act_open_retry_timer) { in do_act_open_rpl()
1079 csk->retry_timer.function = csk_act_open_retry_timer; in do_act_open_rpl()
1080 mod_timer(&csk->retry_timer, jiffies + HZ / 2); in do_act_open_rpl()
1082 cxgbi_sock_fail_act_open(csk, in do_act_open_rpl()
1085 spin_unlock_bh(&csk->lock); in do_act_open_rpl()
1086 cxgbi_sock_put(csk); in do_act_open_rpl()
1093 struct cxgbi_sock *csk; in do_peer_close() local
1099 csk = lookup_tid(t, tid); in do_peer_close()
1100 if (unlikely(!csk)) { in do_peer_close()
1105 (&csk->saddr), (&csk->daddr), in do_peer_close()
1106 csk, csk->state, csk->flags, csk->tid); in do_peer_close()
1107 cxgbi_sock_rcv_peer_close(csk); in do_peer_close()
1114 struct cxgbi_sock *csk; in do_close_con_rpl() local
1120 csk = lookup_tid(t, tid); in do_close_con_rpl()
1121 if (unlikely(!csk)) { in do_close_con_rpl()
1126 (&csk->saddr), (&csk->daddr), in do_close_con_rpl()
1127 csk, csk->state, csk->flags, csk->tid); in do_close_con_rpl()
1128 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); in do_close_con_rpl()
1133 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, in abort_status_to_errno() argument
1139 return csk->state > CTP_ESTABLISHED ? in abort_status_to_errno()
1153 struct cxgbi_sock *csk; in do_abort_req_rss() local
1160 csk = lookup_tid(t, tid); in do_abort_req_rss()
1161 if (unlikely(!csk)) { in do_abort_req_rss()
1167 (&csk->saddr), (&csk->daddr), in do_abort_req_rss()
1168 csk, csk->state, csk->flags, csk->tid, req->status); in do_abort_req_rss()
1173 cxgbi_sock_get(csk); in do_abort_req_rss()
1174 spin_lock_bh(&csk->lock); in do_abort_req_rss()
1176 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); in do_abort_req_rss()
1178 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { in do_abort_req_rss()
1179 send_tx_flowc_wr(csk); in do_abort_req_rss()
1180 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); in do_abort_req_rss()
1183 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); in do_abort_req_rss()
1184 cxgbi_sock_set_state(csk, CTP_ABORTING); in do_abort_req_rss()
1186 send_abort_rpl(csk, rst_status); in do_abort_req_rss()
1188 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { in do_abort_req_rss()
1189 csk->err = abort_status_to_errno(csk, req->status, &rst_status); in do_abort_req_rss()
1190 cxgbi_sock_closed(csk); in do_abort_req_rss()
1193 spin_unlock_bh(&csk->lock); in do_abort_req_rss()
1194 cxgbi_sock_put(csk); in do_abort_req_rss()
1201 struct cxgbi_sock *csk; in do_abort_rpl_rss() local
1207 csk = lookup_tid(t, tid); in do_abort_rpl_rss()
1208 if (!csk) in do_abort_rpl_rss()
1212 (&csk->saddr), (&csk->daddr), csk, in do_abort_rpl_rss()
1213 csk->state, csk->flags, csk->tid, rpl->status); in do_abort_rpl_rss()
1218 cxgbi_sock_rcv_abort_rpl(csk); in do_abort_rpl_rss()
1225 struct cxgbi_sock *csk; in do_rx_data() local
1231 csk = lookup_tid(t, tid); in do_rx_data()
1232 if (!csk) { in do_rx_data()
1236 pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid); in do_rx_data()
1237 spin_lock_bh(&csk->lock); in do_rx_data()
1238 send_abort_req(csk); in do_rx_data()
1239 spin_unlock_bh(&csk->lock); in do_rx_data()
1246 struct cxgbi_sock *csk; in do_rx_iscsi_hdr() local
1253 csk = lookup_tid(t, tid); in do_rx_iscsi_hdr()
1254 if (unlikely(!csk)) { in do_rx_iscsi_hdr()
1261 csk, csk->state, csk->flags, csk->tid, skb, skb->len, in do_rx_iscsi_hdr()
1264 spin_lock_bh(&csk->lock); in do_rx_iscsi_hdr()
1266 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { in do_rx_iscsi_hdr()
1269 csk, csk->state, csk->flags, csk->tid); in do_rx_iscsi_hdr()
1270 if (csk->state != CTP_ABORTING) in do_rx_iscsi_hdr()
1283 if (!csk->skb_ulp_lhdr) { in do_rx_iscsi_hdr()
1289 csk, csk->state, csk->flags, csk->tid, skb); in do_rx_iscsi_hdr()
1290 csk->skb_ulp_lhdr = skb; in do_rx_iscsi_hdr()
1294 (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt)) { in do_rx_iscsi_hdr()
1296 csk->tid, cxgbi_skcb_tcp_seq(skb), in do_rx_iscsi_hdr()
1297 csk->rcv_nxt); in do_rx_iscsi_hdr()
1312 csk->tid, plen, hlen, dlen, in do_rx_iscsi_hdr()
1319 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len; in do_rx_iscsi_hdr()
1320 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb); in do_rx_iscsi_hdr()
1324 csk, skb, *bhs, hlen, dlen, in do_rx_iscsi_hdr()
1329 struct sk_buff *lskb = csk->skb_ulp_lhdr; in do_rx_iscsi_hdr()
1334 csk, csk->state, csk->flags, skb, lskb); in do_rx_iscsi_hdr()
1337 __skb_queue_tail(&csk->receive_queue, skb); in do_rx_iscsi_hdr()
1338 spin_unlock_bh(&csk->lock); in do_rx_iscsi_hdr()
1342 send_abort_req(csk); in do_rx_iscsi_hdr()
1344 spin_unlock_bh(&csk->lock); in do_rx_iscsi_hdr()
1351 struct cxgbi_sock *csk; in do_rx_iscsi_data() local
1359 csk = lookup_tid(t, tid); in do_rx_iscsi_data()
1360 if (unlikely(!csk)) { in do_rx_iscsi_data()
1367 csk, csk->state, csk->flags, csk->tid, skb, in do_rx_iscsi_data()
1370 spin_lock_bh(&csk->lock); in do_rx_iscsi_data()
1372 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { in do_rx_iscsi_data()
1375 csk, csk->state, csk->flags, csk->tid); in do_rx_iscsi_data()
1377 if (csk->state != CTP_ABORTING) in do_rx_iscsi_data()
1390 if (!csk->skb_ulp_lhdr) in do_rx_iscsi_data()
1391 csk->skb_ulp_lhdr = skb; in do_rx_iscsi_data()
1393 lskb = csk->skb_ulp_lhdr; in do_rx_iscsi_data()
1398 csk, csk->state, csk->flags, skb, lskb); in do_rx_iscsi_data()
1400 __skb_queue_tail(&csk->receive_queue, skb); in do_rx_iscsi_data()
1401 spin_unlock_bh(&csk->lock); in do_rx_iscsi_data()
1405 send_abort_req(csk); in do_rx_iscsi_data()
1407 spin_unlock_bh(&csk->lock); in do_rx_iscsi_data()
1413 cxgb4i_process_ddpvld(struct cxgbi_sock *csk, in cxgb4i_process_ddpvld() argument
1418 csk, skb, ddpvld, cxgbi_skcb_flags(skb)); in cxgb4i_process_ddpvld()
1424 csk, skb, ddpvld, cxgbi_skcb_flags(skb)); in cxgb4i_process_ddpvld()
1431 csk, skb, ddpvld); in cxgb4i_process_ddpvld()
1439 csk, skb, ddpvld); in cxgb4i_process_ddpvld()
1447 struct cxgbi_sock *csk; in do_rx_data_ddp() local
1455 csk = lookup_tid(t, tid); in do_rx_data_ddp()
1456 if (unlikely(!csk)) { in do_rx_data_ddp()
1463 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr); in do_rx_data_ddp()
1465 spin_lock_bh(&csk->lock); in do_rx_data_ddp()
1467 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { in do_rx_data_ddp()
1470 csk, csk->state, csk->flags, csk->tid); in do_rx_data_ddp()
1471 if (csk->state != CTP_ABORTING) in do_rx_data_ddp()
1477 if (!csk->skb_ulp_lhdr) { in do_rx_data_ddp()
1478 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid); in do_rx_data_ddp()
1482 lskb = csk->skb_ulp_lhdr; in do_rx_data_ddp()
1483 csk->skb_ulp_lhdr = NULL; in do_rx_data_ddp()
1489 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); in do_rx_data_ddp()
1491 cxgb4i_process_ddpvld(csk, lskb, ddpvld); in do_rx_data_ddp()
1495 csk, lskb, cxgbi_skcb_flags(lskb)); in do_rx_data_ddp()
1498 cxgbi_conn_pdu_ready(csk); in do_rx_data_ddp()
1499 spin_unlock_bh(&csk->lock); in do_rx_data_ddp()
1503 send_abort_req(csk); in do_rx_data_ddp()
1505 spin_unlock_bh(&csk->lock); in do_rx_data_ddp()
1513 struct cxgbi_sock *csk; in do_rx_iscsi_cmp() local
1523 csk = lookup_tid(t, tid); in do_rx_iscsi_cmp()
1524 if (unlikely(!csk)) { in do_rx_iscsi_cmp()
1532 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr, in do_rx_iscsi_cmp()
1535 spin_lock_bh(&csk->lock); in do_rx_iscsi_cmp()
1537 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { in do_rx_iscsi_cmp()
1540 csk, csk->state, csk->flags, csk->tid); in do_rx_iscsi_cmp()
1542 if (csk->state != CTP_ABORTING) in do_rx_iscsi_cmp()
1556 csk->rcv_nxt = seq + pdu_len_ddp; in do_rx_iscsi_cmp()
1558 if (csk->skb_ulp_lhdr) { in do_rx_iscsi_cmp()
1559 data_skb = skb_peek(&csk->receive_queue); in do_rx_iscsi_cmp()
1567 __skb_unlink(data_skb, &csk->receive_queue); in do_rx_iscsi_cmp()
1571 __skb_queue_tail(&csk->receive_queue, skb); in do_rx_iscsi_cmp()
1572 __skb_queue_tail(&csk->receive_queue, data_skb); in do_rx_iscsi_cmp()
1574 __skb_queue_tail(&csk->receive_queue, skb); in do_rx_iscsi_cmp()
1577 csk->skb_ulp_lhdr = NULL; in do_rx_iscsi_cmp()
1584 cxgb4i_process_ddpvld(csk, skb, ddpvld); in do_rx_iscsi_cmp()
1587 csk, skb, cxgbi_skcb_flags(skb)); in do_rx_iscsi_cmp()
1589 cxgbi_conn_pdu_ready(csk); in do_rx_iscsi_cmp()
1590 spin_unlock_bh(&csk->lock); in do_rx_iscsi_cmp()
1595 send_abort_req(csk); in do_rx_iscsi_cmp()
1597 spin_unlock_bh(&csk->lock); in do_rx_iscsi_cmp()
1604 struct cxgbi_sock *csk; in do_fw4_ack() local
1610 csk = lookup_tid(t, tid); in do_fw4_ack()
1611 if (unlikely(!csk)) in do_fw4_ack()
1616 csk, csk->state, csk->flags, csk->tid); in do_fw4_ack()
1617 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una), in do_fw4_ack()
1629 struct cxgbi_sock *csk; in do_set_tcb_rpl() local
1631 csk = lookup_tid(t, tid); in do_set_tcb_rpl()
1632 if (!csk) { in do_set_tcb_rpl()
1639 csk, csk->state, csk->flags, csk->tid, rpl->status); in do_set_tcb_rpl()
1643 csk, tid, rpl->status); in do_set_tcb_rpl()
1644 csk->err = -EINVAL; in do_set_tcb_rpl()
1647 complete(&csk->cmpl); in do_set_tcb_rpl()
1652 static int alloc_cpls(struct cxgbi_sock *csk) in alloc_cpls() argument
1654 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), in alloc_cpls()
1656 if (!csk->cpl_close) in alloc_cpls()
1659 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), in alloc_cpls()
1661 if (!csk->cpl_abort_req) in alloc_cpls()
1664 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), in alloc_cpls()
1666 if (!csk->cpl_abort_rpl) in alloc_cpls()
1671 cxgbi_sock_free_cpl_skbs(csk); in alloc_cpls()
1675 static inline void l2t_put(struct cxgbi_sock *csk) in l2t_put() argument
1677 if (csk->l2t) { in l2t_put()
1678 cxgb4_l2t_release(csk->l2t); in l2t_put()
1679 csk->l2t = NULL; in l2t_put()
1680 cxgbi_sock_put(csk); in l2t_put()
1684 static void release_offload_resources(struct cxgbi_sock *csk) in release_offload_resources() argument
1688 struct net_device *ndev = csk->cdev->ports[csk->port_id]; in release_offload_resources()
1693 csk, csk->state, csk->flags, csk->tid); in release_offload_resources()
1695 cxgbi_sock_free_cpl_skbs(csk); in release_offload_resources()
1696 cxgbi_sock_purge_write_queue(csk); in release_offload_resources()
1697 if (csk->wr_cred != csk->wr_max_cred) { in release_offload_resources()
1698 cxgbi_sock_purge_wr_queue(csk); in release_offload_resources()
1699 cxgbi_sock_reset_wr_list(csk); in release_offload_resources()
1702 l2t_put(csk); in release_offload_resources()
1704 if (csk->csk_family == AF_INET6) in release_offload_resources()
1706 (const u32 *)&csk->saddr6.sin6_addr, 1); in release_offload_resources()
1709 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) in release_offload_resources()
1710 free_atid(csk); in release_offload_resources()
1711 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { in release_offload_resources()
1712 lldi = cxgbi_cdev_priv(csk->cdev); in release_offload_resources()
1713 cxgb4_remove_tid(lldi->tids, 0, csk->tid, in release_offload_resources()
1714 csk->csk_family); in release_offload_resources()
1715 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); in release_offload_resources()
1716 cxgbi_sock_put(csk); in release_offload_resources()
1718 csk->dst = NULL; in release_offload_resources()
1765 static int init_act_open(struct cxgbi_sock *csk) in init_act_open() argument
1767 struct cxgbi_device *cdev = csk->cdev; in init_act_open()
1769 struct net_device *ndev = cdev->ports[csk->port_id]; in init_act_open()
1783 csk, csk->state, csk->flags, csk->tid); in init_act_open()
1785 if (csk->csk_family == AF_INET) in init_act_open()
1786 daddr = &csk->daddr.sin_addr.s_addr; in init_act_open()
1788 else if (csk->csk_family == AF_INET6) in init_act_open()
1789 daddr = &csk->daddr6.sin6_addr; in init_act_open()
1792 pr_err("address family 0x%x not supported\n", csk->csk_family); in init_act_open()
1796 n = dst_neigh_lookup(csk->dst, daddr); in init_act_open()
1806 csk->atid = cxgb4_alloc_atid(lldi->tids, csk); in init_act_open()
1807 if (csk->atid < 0) { in init_act_open()
1811 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); in init_act_open()
1812 cxgbi_sock_get(csk); in init_act_open()
1818 csk->dcb_priority = priority; in init_act_open()
1819 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority); in init_act_open()
1821 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); in init_act_open()
1823 if (!csk->l2t) { in init_act_open()
1827 cxgbi_sock_get(csk); in init_act_open()
1830 if (csk->csk_family == AF_INET6) in init_act_open()
1831 cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1); in init_act_open()
1845 if (csk->csk_family == AF_INET) in init_act_open()
1854 skb->sk = (struct sock *)csk; in init_act_open()
1855 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure); in init_act_open()
1857 if (!csk->mtu) in init_act_open()
1858 csk->mtu = dst_mtu(csk->dst); in init_act_open()
1859 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx); in init_act_open()
1860 csk->tx_chan = cxgb4_port_chan(ndev); in init_act_open()
1861 csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx; in init_act_open()
1863 csk->txq_idx = cxgb4_port_idx(ndev) * step; in init_act_open()
1867 csk->rss_qid = lldi->rxq_ids[rxq_idx]; in init_act_open()
1869 csk->snd_win = cxgb4i_snd_win; in init_act_open()
1870 csk->rcv_win = cxgb4i_rcv_win; in init_act_open()
1872 csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN; in init_act_open()
1875 csk->rcv_win *= rcv_winf; in init_act_open()
1878 csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN; in init_act_open()
1881 csk->snd_win *= snd_winf; in init_act_open()
1883 csk->wr_cred = lldi->wr_cred - in init_act_open()
1885 csk->wr_max_cred = csk->wr_cred; in init_act_open()
1886 csk->wr_una_cred = 0; in init_act_open()
1887 cxgbi_sock_reset_wr_list(csk); in init_act_open()
1888 csk->err = 0; in init_act_open()
1891 (&csk->saddr), (&csk->daddr), csk, csk->state, in init_act_open()
1892 csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid, in init_act_open()
1893 csk->mtu, csk->mss_idx, csk->smac_idx); in init_act_open()
1901 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); in init_act_open()
1902 if (csk->csk_family == AF_INET) in init_act_open()
1903 send_act_open_req(csk, skb, csk->l2t); in init_act_open()
1906 send_act_open_req6(csk, skb, csk->l2t); in init_act_open()
1914 if (csk->csk_family == AF_INET6) in init_act_open()
1916 (const u32 *)&csk->saddr6.sin6_addr, 1); in init_act_open()
2015 static int ddp_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, in ddp_ppod_write_idata() argument
2021 struct cxgbi_device *cdev = csk->cdev; in ddp_ppod_write_idata()
2023 csk->tid); in ddp_ppod_write_idata()
2041 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); in ddp_ppod_write_idata()
2043 spin_lock_bh(&csk->lock); in ddp_ppod_write_idata()
2044 cxgbi_sock_skb_entail(csk, skb); in ddp_ppod_write_idata()
2045 spin_unlock_bh(&csk->lock); in ddp_ppod_write_idata()
2050 static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, in ddp_set_map() argument
2060 ttinfo->cid = csk->port_id; in ddp_set_map()
2067 err = ddp_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt, in ddp_set_map()
2076 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, in ddp_setup_conn_pgidx() argument
2091 INIT_TP_WR(req, csk->tid); in ddp_setup_conn_pgidx()
2092 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); in ddp_setup_conn_pgidx()
2093 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); in ddp_setup_conn_pgidx()
2097 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); in ddp_setup_conn_pgidx()
2100 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); in ddp_setup_conn_pgidx()
2102 reinit_completion(&csk->cmpl); in ddp_setup_conn_pgidx()
2103 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in ddp_setup_conn_pgidx()
2104 wait_for_completion(&csk->cmpl); in ddp_setup_conn_pgidx()
2106 return csk->err; in ddp_setup_conn_pgidx()
2109 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, in ddp_setup_conn_digest() argument
2122 csk->hcrc_len = (hcrc ? 4 : 0); in ddp_setup_conn_digest()
2123 csk->dcrc_len = (dcrc ? 4 : 0); in ddp_setup_conn_digest()
2128 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); in ddp_setup_conn_digest()
2133 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); in ddp_setup_conn_digest()
2136 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); in ddp_setup_conn_digest()
2138 reinit_completion(&csk->cmpl); in ddp_setup_conn_digest()
2139 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in ddp_setup_conn_digest()
2140 wait_for_completion(&csk->cmpl); in ddp_setup_conn_digest()
2142 return csk->err; in ddp_setup_conn_digest()
2428 struct cxgbi_sock *csk = pmap->port_csk[i]; in cxgb4_dcb_change_notify() local
2430 if (csk->dcb_priority != priority) { in cxgb4_dcb_change_notify()
2431 iscsi_conn_failure(csk->user_data, in cxgb4_dcb_change_notify()
2434 "priority %u->%u.\n", csk, in cxgb4_dcb_change_notify()
2435 csk->dcb_priority, priority); in cxgb4_dcb_change_notify()