Lines Matching refs:cf_sk
56 static int rx_flow_is_on(struct caifsock *cf_sk) in rx_flow_is_on() argument
59 (void *) &cf_sk->flow_state); in rx_flow_is_on()
62 static int tx_flow_is_on(struct caifsock *cf_sk) in tx_flow_is_on() argument
65 (void *) &cf_sk->flow_state); in tx_flow_is_on()
68 static void set_rx_flow_off(struct caifsock *cf_sk) in set_rx_flow_off() argument
71 (void *) &cf_sk->flow_state); in set_rx_flow_off()
74 static void set_rx_flow_on(struct caifsock *cf_sk) in set_rx_flow_on() argument
77 (void *) &cf_sk->flow_state); in set_rx_flow_on()
80 static void set_tx_flow_off(struct caifsock *cf_sk) in set_tx_flow_off() argument
83 (void *) &cf_sk->flow_state); in set_tx_flow_off()
86 static void set_tx_flow_on(struct caifsock *cf_sk) in set_tx_flow_on() argument
89 (void *) &cf_sk->flow_state); in set_tx_flow_on()
94 struct caifsock *cf_sk; in caif_read_lock() local
95 cf_sk = container_of(sk, struct caifsock, sk); in caif_read_lock()
96 mutex_lock(&cf_sk->readlock); in caif_read_lock()
101 struct caifsock *cf_sk; in caif_read_unlock() local
102 cf_sk = container_of(sk, struct caifsock, sk); in caif_read_unlock()
103 mutex_unlock(&cf_sk->readlock); in caif_read_unlock()
106 static int sk_rcvbuf_lowwater(struct caifsock *cf_sk) in sk_rcvbuf_lowwater() argument
109 return cf_sk->sk.sk_rcvbuf / 4; in sk_rcvbuf_lowwater()
114 struct caifsock *cf_sk; in caif_flow_ctrl() local
115 cf_sk = container_of(sk, struct caifsock, sk); in caif_flow_ctrl()
116 if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd) in caif_flow_ctrl()
117 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); in caif_flow_ctrl()
129 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); in caif_queue_rcv_skb() local
133 (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { in caif_queue_rcv_skb()
135 atomic_read(&cf_sk->sk.sk_rmem_alloc), in caif_queue_rcv_skb()
136 sk_rcvbuf_lowwater(cf_sk)); in caif_queue_rcv_skb()
137 set_rx_flow_off(cf_sk); in caif_queue_rcv_skb()
145 if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) { in caif_queue_rcv_skb()
146 set_rx_flow_off(cf_sk); in caif_queue_rcv_skb()
167 struct caifsock *cf_sk; in caif_sktrecv_cb() local
170 cf_sk = container_of(layr, struct caifsock, layer); in caif_sktrecv_cb()
173 if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) { in caif_sktrecv_cb()
177 caif_queue_rcv_skb(&cf_sk->sk, skb); in caif_sktrecv_cb()
183 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); in cfsk_hold() local
184 sock_hold(&cf_sk->sk); in cfsk_hold()
189 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); in cfsk_put() local
190 sock_put(&cf_sk->sk); in cfsk_put()
198 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); in caif_ctrl_cb() local
202 set_tx_flow_on(cf_sk); in caif_ctrl_cb()
203 cf_sk->sk.sk_state_change(&cf_sk->sk); in caif_ctrl_cb()
208 set_tx_flow_off(cf_sk); in caif_ctrl_cb()
209 cf_sk->sk.sk_state_change(&cf_sk->sk); in caif_ctrl_cb()
214 caif_client_register_refcnt(&cf_sk->layer, in caif_ctrl_cb()
216 cf_sk->sk.sk_state = CAIF_CONNECTED; in caif_ctrl_cb()
217 set_tx_flow_on(cf_sk); in caif_ctrl_cb()
218 cf_sk->sk.sk_shutdown = 0; in caif_ctrl_cb()
219 cf_sk->sk.sk_state_change(&cf_sk->sk); in caif_ctrl_cb()
224 cf_sk->sk.sk_state = CAIF_DISCONNECTED; in caif_ctrl_cb()
225 cf_sk->sk.sk_state_change(&cf_sk->sk); in caif_ctrl_cb()
230 cf_sk->sk.sk_err = ECONNREFUSED; in caif_ctrl_cb()
231 cf_sk->sk.sk_state = CAIF_DISCONNECTED; in caif_ctrl_cb()
232 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; in caif_ctrl_cb()
237 set_tx_flow_on(cf_sk); in caif_ctrl_cb()
238 cf_sk->sk.sk_state_change(&cf_sk->sk); in caif_ctrl_cb()
243 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; in caif_ctrl_cb()
244 cf_sk->sk.sk_err = ECONNRESET; in caif_ctrl_cb()
245 set_rx_flow_on(cf_sk); in caif_ctrl_cb()
246 cf_sk->sk.sk_error_report(&cf_sk->sk); in caif_ctrl_cb()
256 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); in caif_check_flow_release() local
258 if (rx_flow_is_on(cf_sk)) in caif_check_flow_release()
261 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { in caif_check_flow_release()
262 set_rx_flow_on(cf_sk); in caif_check_flow_release()
464 static long caif_wait_for_flow_on(struct caifsock *cf_sk, in caif_wait_for_flow_on() argument
467 struct sock *sk = &cf_sk->sk; in caif_wait_for_flow_on()
471 if (tx_flow_is_on(cf_sk) && in caif_wait_for_flow_on()
472 (!wait_writeable || sock_writeable(&cf_sk->sk))) in caif_wait_for_flow_on()
488 if (cf_sk->sk.sk_state != CAIF_CONNECTED) in caif_wait_for_flow_on()
500 static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk, in transmit_skb() argument
507 cfpkt_set_prio(pkt, cf_sk->sk.sk_priority); in transmit_skb()
509 if (cf_sk->layer.dn == NULL) { in transmit_skb()
514 return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt); in transmit_skb()
522 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); in caif_seqpkt_sendmsg() local
528 caif_assert(cf_sk); in caif_seqpkt_sendmsg()
554 if (cf_sk->sk.sk_state != CAIF_CONNECTED || in caif_seqpkt_sendmsg()
561 if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM) in caif_seqpkt_sendmsg()
564 buffer_size = len + cf_sk->headroom + cf_sk->tailroom; in caif_seqpkt_sendmsg()
572 skb_reserve(skb, cf_sk->headroom); in caif_seqpkt_sendmsg()
578 ret = transmit_skb(skb, cf_sk, noblock, timeo); in caif_seqpkt_sendmsg()
598 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); in caif_stream_sendmsg() local
612 timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err); in caif_stream_sendmsg()
621 if (size > cf_sk->maxframe) in caif_stream_sendmsg()
622 size = cf_sk->maxframe; in caif_stream_sendmsg()
632 size + cf_sk->headroom + in caif_stream_sendmsg()
633 cf_sk->tailroom, in caif_stream_sendmsg()
639 skb_reserve(skb, cf_sk->headroom); in caif_stream_sendmsg()
654 err = transmit_skb(skb, cf_sk, in caif_stream_sendmsg()
677 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); in setsockopt() local
680 if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED) in setsockopt()
691 lock_sock(&(cf_sk->sk)); in setsockopt()
692 cf_sk->conn_req.link_selector = linksel; in setsockopt()
693 release_sock(&cf_sk->sk); in setsockopt()
699 if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) in setsockopt()
701 lock_sock(&(cf_sk->sk)); in setsockopt()
702 if (ol > sizeof(cf_sk->conn_req.param.data) || in setsockopt()
703 copy_from_sockptr(&cf_sk->conn_req.param.data, ov, ol)) { in setsockopt()
704 release_sock(&cf_sk->sk); in setsockopt()
707 cf_sk->conn_req.param.size = ol; in setsockopt()
708 release_sock(&cf_sk->sk); in setsockopt()
749 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); in caif_connect() local
792 caif_disconnect_client(sock_net(sk), &cf_sk->layer); in caif_connect()
793 caif_free_client(&cf_sk->layer); in caif_connect()
806 sk_stream_kill_queues(&cf_sk->sk); in caif_connect()
812 memcpy(&cf_sk->conn_req.sockaddr, uaddr, in caif_connect()
821 if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX) in caif_connect()
822 cf_sk->conn_req.priority = CAIF_PRIO_MAX; in caif_connect()
823 else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN) in caif_connect()
824 cf_sk->conn_req.priority = CAIF_PRIO_MIN; in caif_connect()
826 cf_sk->conn_req.priority = cf_sk->sk.sk_priority; in caif_connect()
829 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if; in caif_connect()
831 cf_sk->layer.receive = caif_sktrecv_cb; in caif_connect()
833 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req, in caif_connect()
834 &cf_sk->layer, &ifindex, &headroom, &tailroom); in caif_connect()
837 cf_sk->sk.sk_socket->state = SS_UNCONNECTED; in caif_connect()
838 cf_sk->sk.sk_state = CAIF_DISCONNECTED; in caif_connect()
849 cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom); in caif_connect()
853 cf_sk->tailroom = tailroom; in caif_connect()
854 cf_sk->maxframe = mtu - (headroom + tailroom); in caif_connect()
855 if (cf_sk->maxframe < 1) { in caif_connect()
902 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); in caif_release() local
907 set_tx_flow_off(cf_sk); in caif_release()
919 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir)); in caif_release()
920 debugfs_remove_recursive(cf_sk->debugfs_socket_dir); in caif_release()
922 lock_sock(&(cf_sk->sk)); in caif_release()
926 caif_disconnect_client(sock_net(sk), &cf_sk->layer); in caif_release()
927 cf_sk->sk.sk_socket->state = SS_DISCONNECTING; in caif_release()
931 sk_stream_kill_queues(&cf_sk->sk); in caif_release()
943 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); in caif_poll() local
965 if (sock_writeable(sk) && tx_flow_is_on(cf_sk)) in caif_poll()
1014 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); in caif_sock_destructor() local
1022 sk_stream_kill_queues(&cf_sk->sk); in caif_sock_destructor()
1023 caif_free_client(&cf_sk->layer); in caif_sock_destructor()
1030 struct caifsock *cf_sk = NULL; in caif_create() local
1064 cf_sk = container_of(sk, struct caifsock, sk); in caif_create()
1085 lock_sock(&(cf_sk->sk)); in caif_create()
1091 mutex_init(&cf_sk->readlock); /* single task reading lock */ in caif_create()
1092 cf_sk->layer.ctrlcmd = caif_ctrl_cb; in caif_create()
1093 cf_sk->sk.sk_socket->state = SS_UNCONNECTED; in caif_create()
1094 cf_sk->sk.sk_state = CAIF_DISCONNECTED; in caif_create()
1096 set_tx_flow_off(cf_sk); in caif_create()
1097 set_rx_flow_on(cf_sk); in caif_create()
1100 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; in caif_create()
1101 cf_sk->conn_req.protocol = protocol; in caif_create()
1102 release_sock(&cf_sk->sk); in caif_create()