Lines Matching refs:xprt
70 static void xprt_init(struct rpc_xprt *xprt, struct net *net);
71 static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
72 static void xprt_destroy(struct rpc_xprt *xprt);
216 static void xprt_clear_locked(struct rpc_xprt *xprt) in xprt_clear_locked() argument
218 xprt->snd_task = NULL; in xprt_clear_locked()
219 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { in xprt_clear_locked()
221 clear_bit(XPRT_LOCKED, &xprt->state); in xprt_clear_locked()
224 queue_work(xprtiod_workqueue, &xprt->task_cleanup); in xprt_clear_locked()
236 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_reserve_xprt() argument
240 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { in xprt_reserve_xprt()
241 if (task == xprt->snd_task) in xprt_reserve_xprt()
245 if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) in xprt_reserve_xprt()
247 xprt->snd_task = task; in xprt_reserve_xprt()
250 trace_xprt_reserve_xprt(xprt, task); in xprt_reserve_xprt()
254 xprt_clear_locked(xprt); in xprt_reserve_xprt()
258 rpc_sleep_on_timeout(&xprt->sending, task, NULL, in xprt_reserve_xprt()
261 rpc_sleep_on(&xprt->sending, task, NULL); in xprt_reserve_xprt()
267 xprt_need_congestion_window_wait(struct rpc_xprt *xprt) in xprt_need_congestion_window_wait() argument
269 return test_bit(XPRT_CWND_WAIT, &xprt->state); in xprt_need_congestion_window_wait()
273 xprt_set_congestion_window_wait(struct rpc_xprt *xprt) in xprt_set_congestion_window_wait() argument
275 if (!list_empty(&xprt->xmit_queue)) { in xprt_set_congestion_window_wait()
277 if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst, in xprt_set_congestion_window_wait()
281 set_bit(XPRT_CWND_WAIT, &xprt->state); in xprt_set_congestion_window_wait()
285 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt) in xprt_test_and_clear_congestion_window_wait() argument
287 if (!RPCXPRT_CONGESTED(xprt)) in xprt_test_and_clear_congestion_window_wait()
288 clear_bit(XPRT_CWND_WAIT, &xprt->state); in xprt_test_and_clear_congestion_window_wait()
300 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_reserve_xprt_cong() argument
304 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { in xprt_reserve_xprt_cong()
305 if (task == xprt->snd_task) in xprt_reserve_xprt_cong()
310 xprt->snd_task = task; in xprt_reserve_xprt_cong()
313 if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) in xprt_reserve_xprt_cong()
315 if (!xprt_need_congestion_window_wait(xprt)) { in xprt_reserve_xprt_cong()
316 xprt->snd_task = task; in xprt_reserve_xprt_cong()
320 xprt_clear_locked(xprt); in xprt_reserve_xprt_cong()
324 rpc_sleep_on_timeout(&xprt->sending, task, NULL, in xprt_reserve_xprt_cong()
327 rpc_sleep_on(&xprt->sending, task, NULL); in xprt_reserve_xprt_cong()
330 trace_xprt_reserve_cong(xprt, task); in xprt_reserve_xprt_cong()
335 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_lock_write() argument
339 if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task) in xprt_lock_write()
341 spin_lock(&xprt->transport_lock); in xprt_lock_write()
342 retval = xprt->ops->reserve_xprt(xprt, task); in xprt_lock_write()
343 spin_unlock(&xprt->transport_lock); in xprt_lock_write()
349 struct rpc_xprt *xprt = data; in __xprt_lock_write_func() local
351 xprt->snd_task = task; in __xprt_lock_write_func()
355 static void __xprt_lock_write_next(struct rpc_xprt *xprt) in __xprt_lock_write_next() argument
357 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) in __xprt_lock_write_next()
359 if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) in __xprt_lock_write_next()
361 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending, in __xprt_lock_write_next()
362 __xprt_lock_write_func, xprt)) in __xprt_lock_write_next()
365 xprt_clear_locked(xprt); in __xprt_lock_write_next()
368 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) in __xprt_lock_write_next_cong() argument
370 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) in __xprt_lock_write_next_cong()
372 if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) in __xprt_lock_write_next_cong()
374 if (xprt_need_congestion_window_wait(xprt)) in __xprt_lock_write_next_cong()
376 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending, in __xprt_lock_write_next_cong()
377 __xprt_lock_write_func, xprt)) in __xprt_lock_write_next_cong()
380 xprt_clear_locked(xprt); in __xprt_lock_write_next_cong()
390 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_release_xprt() argument
392 if (xprt->snd_task == task) { in xprt_release_xprt()
393 xprt_clear_locked(xprt); in xprt_release_xprt()
394 __xprt_lock_write_next(xprt); in xprt_release_xprt()
396 trace_xprt_release_xprt(xprt, task); in xprt_release_xprt()
408 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_release_xprt_cong() argument
410 if (xprt->snd_task == task) { in xprt_release_xprt_cong()
411 xprt_clear_locked(xprt); in xprt_release_xprt_cong()
412 __xprt_lock_write_next_cong(xprt); in xprt_release_xprt_cong()
414 trace_xprt_release_cong(xprt, task); in xprt_release_xprt_cong()
418 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_release_write() argument
420 if (xprt->snd_task != task) in xprt_release_write()
422 spin_lock(&xprt->transport_lock); in xprt_release_write()
423 xprt->ops->release_xprt(xprt, task); in xprt_release_write()
424 spin_unlock(&xprt->transport_lock); in xprt_release_write()
432 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) in __xprt_get_cong() argument
436 trace_xprt_get_cong(xprt, req->rq_task); in __xprt_get_cong()
437 if (RPCXPRT_CONGESTED(xprt)) { in __xprt_get_cong()
438 xprt_set_congestion_window_wait(xprt); in __xprt_get_cong()
442 xprt->cong += RPC_CWNDSCALE; in __xprt_get_cong()
451 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) in __xprt_put_cong() argument
456 xprt->cong -= RPC_CWNDSCALE; in __xprt_put_cong()
457 xprt_test_and_clear_congestion_window_wait(xprt); in __xprt_put_cong()
458 trace_xprt_put_cong(xprt, req->rq_task); in __xprt_put_cong()
459 __xprt_lock_write_next_cong(xprt); in __xprt_put_cong()
470 xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) in xprt_request_get_cong() argument
476 spin_lock(&xprt->transport_lock); in xprt_request_get_cong()
477 ret = __xprt_get_cong(xprt, req) != 0; in xprt_request_get_cong()
478 spin_unlock(&xprt->transport_lock); in xprt_request_get_cong()
497 static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt) in xprt_clear_congestion_window_wait_locked() argument
499 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) in xprt_clear_congestion_window_wait_locked()
500 __xprt_lock_write_next_cong(xprt); in xprt_clear_congestion_window_wait_locked()
508 xprt_clear_congestion_window_wait(struct rpc_xprt *xprt) in xprt_clear_congestion_window_wait() argument
510 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) { in xprt_clear_congestion_window_wait()
511 spin_lock(&xprt->transport_lock); in xprt_clear_congestion_window_wait()
512 __xprt_lock_write_next_cong(xprt); in xprt_clear_congestion_window_wait()
513 spin_unlock(&xprt->transport_lock); in xprt_clear_congestion_window_wait()
533 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result) in xprt_adjust_cwnd() argument
536 unsigned long cwnd = xprt->cwnd; in xprt_adjust_cwnd()
538 if (result >= 0 && cwnd <= xprt->cong) { in xprt_adjust_cwnd()
542 if (cwnd > RPC_MAXCWND(xprt)) in xprt_adjust_cwnd()
543 cwnd = RPC_MAXCWND(xprt); in xprt_adjust_cwnd()
544 __xprt_lock_write_next_cong(xprt); in xprt_adjust_cwnd()
551 xprt->cong, xprt->cwnd, cwnd); in xprt_adjust_cwnd()
552 xprt->cwnd = cwnd; in xprt_adjust_cwnd()
553 __xprt_put_cong(xprt, req); in xprt_adjust_cwnd()
563 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status) in xprt_wake_pending_tasks() argument
566 rpc_wake_up_status(&xprt->pending, status); in xprt_wake_pending_tasks()
568 rpc_wake_up(&xprt->pending); in xprt_wake_pending_tasks()
580 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt) in xprt_wait_for_buffer_space() argument
582 set_bit(XPRT_WRITE_SPACE, &xprt->state); in xprt_wait_for_buffer_space()
587 xprt_clear_write_space_locked(struct rpc_xprt *xprt) in xprt_clear_write_space_locked() argument
589 if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) { in xprt_clear_write_space_locked()
590 __xprt_lock_write_next(xprt); in xprt_clear_write_space_locked()
592 "xprt %p\n", xprt); in xprt_clear_write_space_locked()
604 bool xprt_write_space(struct rpc_xprt *xprt) in xprt_write_space() argument
608 if (!test_bit(XPRT_WRITE_SPACE, &xprt->state)) in xprt_write_space()
610 spin_lock(&xprt->transport_lock); in xprt_write_space()
611 ret = xprt_clear_write_space_locked(xprt); in xprt_write_space()
612 spin_unlock(&xprt->transport_lock); in xprt_write_space()
652 struct rpc_xprt *xprt = req->rq_xprt; in xprt_init_majortimeo() local
654 if (likely(xprt && xprt_connected(xprt))) in xprt_init_majortimeo()
670 struct rpc_xprt *xprt = req->rq_xprt; in xprt_adjust_timeout() local
689 spin_lock(&xprt->transport_lock); in xprt_adjust_timeout()
691 spin_unlock(&xprt->transport_lock); in xprt_adjust_timeout()
705 struct rpc_xprt *xprt = in xprt_autoclose() local
709 trace_xprt_disconnect_auto(xprt); in xprt_autoclose()
710 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); in xprt_autoclose()
711 xprt->ops->close(xprt); in xprt_autoclose()
712 xprt_release_write(xprt, NULL); in xprt_autoclose()
713 wake_up_bit(&xprt->state, XPRT_LOCKED); in xprt_autoclose()
722 void xprt_disconnect_done(struct rpc_xprt *xprt) in xprt_disconnect_done() argument
724 trace_xprt_disconnect_done(xprt); in xprt_disconnect_done()
725 spin_lock(&xprt->transport_lock); in xprt_disconnect_done()
726 xprt_clear_connected(xprt); in xprt_disconnect_done()
727 xprt_clear_write_space_locked(xprt); in xprt_disconnect_done()
728 xprt_clear_congestion_window_wait_locked(xprt); in xprt_disconnect_done()
729 xprt_wake_pending_tasks(xprt, -ENOTCONN); in xprt_disconnect_done()
730 spin_unlock(&xprt->transport_lock); in xprt_disconnect_done()
738 static void xprt_schedule_autoclose_locked(struct rpc_xprt *xprt) in xprt_schedule_autoclose_locked() argument
740 if (test_and_set_bit(XPRT_CLOSE_WAIT, &xprt->state)) in xprt_schedule_autoclose_locked()
742 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) in xprt_schedule_autoclose_locked()
743 queue_work(xprtiod_workqueue, &xprt->task_cleanup); in xprt_schedule_autoclose_locked()
744 else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state)) in xprt_schedule_autoclose_locked()
745 rpc_wake_up_queued_task_set_status(&xprt->pending, in xprt_schedule_autoclose_locked()
746 xprt->snd_task, -ENOTCONN); in xprt_schedule_autoclose_locked()
754 void xprt_force_disconnect(struct rpc_xprt *xprt) in xprt_force_disconnect() argument
756 trace_xprt_disconnect_force(xprt); in xprt_force_disconnect()
759 spin_lock(&xprt->transport_lock); in xprt_force_disconnect()
760 xprt_schedule_autoclose_locked(xprt); in xprt_force_disconnect()
761 spin_unlock(&xprt->transport_lock); in xprt_force_disconnect()
766 xprt_connect_cookie(struct rpc_xprt *xprt) in xprt_connect_cookie() argument
768 return READ_ONCE(xprt->connect_cookie); in xprt_connect_cookie()
775 struct rpc_xprt *xprt = req->rq_xprt; in xprt_request_retransmit_after_disconnect() local
777 return req->rq_connect_cookie != xprt_connect_cookie(xprt) || in xprt_request_retransmit_after_disconnect()
778 !xprt_connected(xprt); in xprt_request_retransmit_after_disconnect()
792 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie) in xprt_conditional_disconnect() argument
795 spin_lock(&xprt->transport_lock); in xprt_conditional_disconnect()
796 if (cookie != xprt->connect_cookie) in xprt_conditional_disconnect()
798 if (test_bit(XPRT_CLOSING, &xprt->state)) in xprt_conditional_disconnect()
800 xprt_schedule_autoclose_locked(xprt); in xprt_conditional_disconnect()
802 spin_unlock(&xprt->transport_lock); in xprt_conditional_disconnect()
806 xprt_has_timer(const struct rpc_xprt *xprt) in xprt_has_timer() argument
808 return xprt->idle_timeout != 0; in xprt_has_timer()
812 xprt_schedule_autodisconnect(struct rpc_xprt *xprt) in xprt_schedule_autodisconnect() argument
813 __must_hold(&xprt->transport_lock) in xprt_schedule_autodisconnect()
815 xprt->last_used = jiffies; in xprt_schedule_autodisconnect()
816 if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt)) in xprt_schedule_autodisconnect()
817 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout); in xprt_schedule_autodisconnect()
823 struct rpc_xprt *xprt = from_timer(xprt, t, timer); in xprt_init_autodisconnect() local
825 if (!RB_EMPTY_ROOT(&xprt->recv_queue)) in xprt_init_autodisconnect()
828 xprt->last_used = jiffies; in xprt_init_autodisconnect()
829 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) in xprt_init_autodisconnect()
831 queue_work(xprtiod_workqueue, &xprt->task_cleanup); in xprt_init_autodisconnect()
834 bool xprt_lock_connect(struct rpc_xprt *xprt, in xprt_lock_connect() argument
840 spin_lock(&xprt->transport_lock); in xprt_lock_connect()
841 if (!test_bit(XPRT_LOCKED, &xprt->state)) in xprt_lock_connect()
843 if (xprt->snd_task != task) in xprt_lock_connect()
845 set_bit(XPRT_SND_IS_COOKIE, &xprt->state); in xprt_lock_connect()
846 xprt->snd_task = cookie; in xprt_lock_connect()
849 spin_unlock(&xprt->transport_lock); in xprt_lock_connect()
854 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) in xprt_unlock_connect() argument
856 spin_lock(&xprt->transport_lock); in xprt_unlock_connect()
857 if (xprt->snd_task != cookie) in xprt_unlock_connect()
859 if (!test_bit(XPRT_LOCKED, &xprt->state)) in xprt_unlock_connect()
861 xprt->snd_task =NULL; in xprt_unlock_connect()
862 clear_bit(XPRT_SND_IS_COOKIE, &xprt->state); in xprt_unlock_connect()
863 xprt->ops->release_xprt(xprt, NULL); in xprt_unlock_connect()
864 xprt_schedule_autodisconnect(xprt); in xprt_unlock_connect()
866 spin_unlock(&xprt->transport_lock); in xprt_unlock_connect()
867 wake_up_bit(&xprt->state, XPRT_LOCKED); in xprt_unlock_connect()
878 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; in xprt_connect() local
880 trace_xprt_connect(xprt); in xprt_connect()
882 if (!xprt_bound(xprt)) { in xprt_connect()
886 if (!xprt_lock_write(xprt, task)) in xprt_connect()
889 if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { in xprt_connect()
890 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie; in xprt_connect()
891 rpc_sleep_on_timeout(&xprt->pending, task, NULL, in xprt_connect()
894 if (test_bit(XPRT_CLOSING, &xprt->state)) in xprt_connect()
896 if (xprt_test_and_set_connecting(xprt)) in xprt_connect()
899 if (!xprt_connected(xprt)) { in xprt_connect()
900 xprt->stat.connect_start = jiffies; in xprt_connect()
901 xprt->ops->connect(xprt, task); in xprt_connect()
903 xprt_clear_connecting(xprt); in xprt_connect()
905 rpc_wake_up_queued_task(&xprt->pending, task); in xprt_connect()
908 xprt_release_write(xprt, task); in xprt_connect()
916 unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt) in xprt_reconnect_delay() argument
920 start = xprt->stat.connect_start + xprt->reestablish_timeout; in xprt_reconnect_delay()
933 void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to) in xprt_reconnect_backoff() argument
935 xprt->reestablish_timeout <<= 1; in xprt_reconnect_backoff()
936 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout) in xprt_reconnect_backoff()
937 xprt->reestablish_timeout = xprt->max_reconnect_timeout; in xprt_reconnect_backoff()
938 if (xprt->reestablish_timeout < init_to) in xprt_reconnect_backoff()
939 xprt->reestablish_timeout = init_to; in xprt_reconnect_backoff()
959 xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid) in xprt_request_rb_find() argument
961 struct rb_node *n = xprt->recv_queue.rb_node; in xprt_request_rb_find()
981 xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new) in xprt_request_rb_insert() argument
983 struct rb_node **p = &xprt->recv_queue.rb_node; in xprt_request_rb_insert()
1003 rb_insert_color(&new->rq_recv, &xprt->recv_queue); in xprt_request_rb_insert()
1007 xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req) in xprt_request_rb_remove() argument
1009 rb_erase(&req->rq_recv, &xprt->recv_queue); in xprt_request_rb_remove()
1019 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) in xprt_lookup_rqst() argument
1023 entry = xprt_request_rb_find(xprt, xid); in xprt_lookup_rqst()
1025 trace_xprt_lookup_rqst(xprt, xid, 0); in xprt_lookup_rqst()
1032 trace_xprt_lookup_rqst(xprt, xid, -ENOENT); in xprt_lookup_rqst()
1033 xprt->stat.bad_xids++; in xprt_lookup_rqst()
1102 struct rpc_xprt *xprt = req->rq_xprt; in xprt_request_enqueue_receive() local
1108 spin_lock(&xprt->queue_lock); in xprt_request_enqueue_receive()
1115 xprt_request_rb_insert(xprt, req); in xprt_request_enqueue_receive()
1117 spin_unlock(&xprt->queue_lock); in xprt_request_enqueue_receive()
1120 del_singleshot_timer_sync(&xprt->timer); in xprt_request_enqueue_receive()
1169 struct rpc_xprt *xprt = req->rq_xprt; in xprt_complete_rqst() local
1171 xprt->stat.recvs++; in xprt_complete_rqst()
1179 rpc_wake_up_queued_task(&xprt->pending, task); in xprt_complete_rqst()
1186 struct rpc_xprt *xprt = req->rq_xprt; in xprt_timer() local
1191 trace_xprt_timer(xprt, req->rq_xid, task->tk_status); in xprt_timer()
1193 if (xprt->ops->timer) in xprt_timer()
1194 xprt->ops->timer(xprt, task); in xprt_timer()
1250 struct rpc_xprt *xprt = req->rq_xprt; in xprt_request_wait_receive() local
1259 spin_lock(&xprt->queue_lock); in xprt_request_wait_receive()
1261 xprt->ops->wait_for_reply_request(task); in xprt_request_wait_receive()
1268 rpc_wake_up_queued_task_set_status(&xprt->pending, in xprt_request_wait_receive()
1271 spin_unlock(&xprt->queue_lock); in xprt_request_wait_receive()
1290 struct rpc_xprt *xprt = req->rq_xprt; in xprt_request_enqueue_transmit() local
1294 spin_lock(&xprt->queue_lock); in xprt_request_enqueue_transmit()
1300 xprt_clear_congestion_window_wait(xprt); in xprt_request_enqueue_transmit()
1301 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { in xprt_request_enqueue_transmit()
1310 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { in xprt_request_enqueue_transmit()
1318 list_add_tail(&req->rq_xmit, &xprt->xmit_queue); in xprt_request_enqueue_transmit()
1322 spin_unlock(&xprt->queue_lock); in xprt_request_enqueue_transmit()
1362 struct rpc_xprt *xprt = req->rq_xprt; in xprt_request_dequeue_transmit() local
1364 spin_lock(&xprt->queue_lock); in xprt_request_dequeue_transmit()
1366 spin_unlock(&xprt->queue_lock); in xprt_request_dequeue_transmit()
1380 struct rpc_xprt *xprt = req->rq_xprt; in xprt_request_dequeue_xprt() local
1385 spin_lock(&xprt->queue_lock); in xprt_request_dequeue_xprt()
1390 spin_unlock(&xprt->queue_lock); in xprt_request_dequeue_xprt()
1392 spin_lock(&xprt->queue_lock); in xprt_request_dequeue_xprt()
1395 spin_unlock(&xprt->queue_lock); in xprt_request_dequeue_xprt()
1409 struct rpc_xprt *xprt = req->rq_xprt; in xprt_request_prepare() local
1411 if (xprt->ops->prepare_request) in xprt_request_prepare()
1412 xprt->ops->prepare_request(req); in xprt_request_prepare()
1435 struct rpc_xprt *xprt = req->rq_xprt; in xprt_prepare_transmit() local
1437 if (!xprt_lock_write(xprt, task)) { in xprt_prepare_transmit()
1440 rpc_wake_up_queued_task_set_status(&xprt->sending, in xprt_prepare_transmit()
1450 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; in xprt_end_transmit() local
1452 xprt_inject_disconnect(xprt); in xprt_end_transmit()
1453 xprt_release_write(xprt, task); in xprt_end_transmit()
1469 struct rpc_xprt *xprt = req->rq_xprt; in xprt_request_transmit() local
1499 connect_cookie = xprt->connect_cookie; in xprt_request_transmit()
1500 status = xprt->ops->send_request(req); in xprt_request_transmit()
1510 xprt_inject_disconnect(xprt); in xprt_request_transmit()
1513 spin_lock(&xprt->transport_lock); in xprt_request_transmit()
1515 xprt->stat.sends++; in xprt_request_transmit()
1516 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; in xprt_request_transmit()
1517 xprt->stat.bklog_u += xprt->backlog.qlen; in xprt_request_transmit()
1518 xprt->stat.sending_u += xprt->sending.qlen; in xprt_request_transmit()
1519 xprt->stat.pending_u += xprt->pending.qlen; in xprt_request_transmit()
1520 spin_unlock(&xprt->transport_lock); in xprt_request_transmit()
1526 rpc_wake_up_queued_task_set_status(&xprt->sending, task, status); in xprt_request_transmit()
1543 struct rpc_xprt *xprt = req->rq_xprt; in xprt_transmit() local
1546 spin_lock(&xprt->queue_lock); in xprt_transmit()
1548 next = list_first_entry_or_null(&xprt->xmit_queue, in xprt_transmit()
1553 spin_unlock(&xprt->queue_lock); in xprt_transmit()
1557 spin_lock(&xprt->queue_lock); in xprt_transmit()
1568 cond_resched_lock(&xprt->queue_lock); in xprt_transmit()
1570 spin_unlock(&xprt->queue_lock); in xprt_transmit()
1579 void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_add_backlog() argument
1581 set_bit(XPRT_CONGESTED, &xprt->state); in xprt_add_backlog()
1582 rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init); in xprt_add_backlog()
1598 bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req) in xprt_wake_up_backlog() argument
1600 if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) { in xprt_wake_up_backlog()
1601 clear_bit(XPRT_CONGESTED, &xprt->state); in xprt_wake_up_backlog()
1608 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_throttle_congested() argument
1612 if (!test_bit(XPRT_CONGESTED, &xprt->state)) in xprt_throttle_congested()
1614 spin_lock(&xprt->reserve_lock); in xprt_throttle_congested()
1615 if (test_bit(XPRT_CONGESTED, &xprt->state)) { in xprt_throttle_congested()
1616 xprt_add_backlog(xprt, task); in xprt_throttle_congested()
1619 spin_unlock(&xprt->reserve_lock); in xprt_throttle_congested()
1624 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt) in xprt_dynamic_alloc_slot() argument
1629 if (xprt->num_reqs >= xprt->max_reqs) in xprt_dynamic_alloc_slot()
1631 ++xprt->num_reqs; in xprt_dynamic_alloc_slot()
1632 spin_unlock(&xprt->reserve_lock); in xprt_dynamic_alloc_slot()
1636 spin_lock(&xprt->reserve_lock); in xprt_dynamic_alloc_slot()
1639 --xprt->num_reqs; in xprt_dynamic_alloc_slot()
1645 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) in xprt_dynamic_free_slot() argument
1647 if (xprt->num_reqs > xprt->min_reqs) { in xprt_dynamic_free_slot()
1648 --xprt->num_reqs; in xprt_dynamic_free_slot()
1655 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_alloc_slot() argument
1659 spin_lock(&xprt->reserve_lock); in xprt_alloc_slot()
1660 if (!list_empty(&xprt->free)) { in xprt_alloc_slot()
1661 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); in xprt_alloc_slot()
1665 req = xprt_dynamic_alloc_slot(xprt); in xprt_alloc_slot()
1675 xprt_add_backlog(xprt, task); in xprt_alloc_slot()
1681 spin_unlock(&xprt->reserve_lock); in xprt_alloc_slot()
1684 xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots, in xprt_alloc_slot()
1685 xprt->num_reqs); in xprt_alloc_slot()
1686 spin_unlock(&xprt->reserve_lock); in xprt_alloc_slot()
1693 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) in xprt_free_slot() argument
1695 spin_lock(&xprt->reserve_lock); in xprt_free_slot()
1696 if (!xprt_wake_up_backlog(xprt, req) && in xprt_free_slot()
1697 !xprt_dynamic_free_slot(xprt, req)) { in xprt_free_slot()
1699 list_add(&req->rq_list, &xprt->free); in xprt_free_slot()
1701 spin_unlock(&xprt->reserve_lock); in xprt_free_slot()
1705 static void xprt_free_all_slots(struct rpc_xprt *xprt) in xprt_free_all_slots() argument
1708 while (!list_empty(&xprt->free)) { in xprt_free_all_slots()
1709 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list); in xprt_free_all_slots()
1719 struct rpc_xprt *xprt; in xprt_alloc() local
1723 xprt = kzalloc(size, GFP_KERNEL); in xprt_alloc()
1724 if (xprt == NULL) in xprt_alloc()
1727 xprt_init(xprt, net); in xprt_alloc()
1733 list_add(&req->rq_list, &xprt->free); in xprt_alloc()
1736 xprt->max_reqs = max_alloc; in xprt_alloc()
1738 xprt->max_reqs = num_prealloc; in xprt_alloc()
1739 xprt->min_reqs = num_prealloc; in xprt_alloc()
1740 xprt->num_reqs = num_prealloc; in xprt_alloc()
1742 return xprt; in xprt_alloc()
1745 xprt_free(xprt); in xprt_alloc()
1751 void xprt_free(struct rpc_xprt *xprt) in xprt_free() argument
1753 put_net(xprt->xprt_net); in xprt_free()
1754 xprt_free_all_slots(xprt); in xprt_free()
1755 kfree_rcu(xprt, rcu); in xprt_free()
1760 xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt) in xprt_init_connect_cookie() argument
1762 req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1; in xprt_init_connect_cookie()
1766 xprt_alloc_xid(struct rpc_xprt *xprt) in xprt_alloc_xid() argument
1770 spin_lock(&xprt->reserve_lock); in xprt_alloc_xid()
1771 xid = (__force __be32)xprt->xid++; in xprt_alloc_xid()
1772 spin_unlock(&xprt->reserve_lock); in xprt_alloc_xid()
1777 xprt_init_xid(struct rpc_xprt *xprt) in xprt_init_xid() argument
1779 xprt->xid = prandom_u32(); in xprt_init_xid()
1785 struct rpc_xprt *xprt = task->tk_xprt; in xprt_request_init() local
1789 req->rq_xprt = xprt; in xprt_request_init()
1791 req->rq_xid = xprt_alloc_xid(xprt); in xprt_request_init()
1792 xprt_init_connect_cookie(req, xprt); in xprt_request_init()
1806 xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_do_reserve() argument
1808 xprt->ops->alloc_slot(xprt, task); in xprt_do_reserve()
1823 struct rpc_xprt *xprt = task->tk_xprt; in xprt_reserve() local
1830 if (!xprt_throttle_congested(xprt, task)) in xprt_reserve()
1831 xprt_do_reserve(xprt, task); in xprt_reserve()
1845 struct rpc_xprt *xprt = task->tk_xprt; in xprt_retry_reserve() local
1852 xprt_do_reserve(xprt, task); in xprt_retry_reserve()
1862 struct rpc_xprt *xprt; in xprt_release() local
1867 xprt = task->tk_xprt; in xprt_release()
1868 xprt_release_write(xprt, task); in xprt_release()
1873 xprt = req->rq_xprt; in xprt_release()
1875 spin_lock(&xprt->transport_lock); in xprt_release()
1876 xprt->ops->release_xprt(xprt, task); in xprt_release()
1877 if (xprt->ops->release_request) in xprt_release()
1878 xprt->ops->release_request(task); in xprt_release()
1879 xprt_schedule_autodisconnect(xprt); in xprt_release()
1880 spin_unlock(&xprt->transport_lock); in xprt_release()
1882 xprt->ops->buf_free(task); in xprt_release()
1892 xprt->ops->free_slot(xprt, req); in xprt_release()
1915 static void xprt_init(struct rpc_xprt *xprt, struct net *net) in xprt_init() argument
1917 kref_init(&xprt->kref); in xprt_init()
1919 spin_lock_init(&xprt->transport_lock); in xprt_init()
1920 spin_lock_init(&xprt->reserve_lock); in xprt_init()
1921 spin_lock_init(&xprt->queue_lock); in xprt_init()
1923 INIT_LIST_HEAD(&xprt->free); in xprt_init()
1924 xprt->recv_queue = RB_ROOT; in xprt_init()
1925 INIT_LIST_HEAD(&xprt->xmit_queue); in xprt_init()
1927 spin_lock_init(&xprt->bc_pa_lock); in xprt_init()
1928 INIT_LIST_HEAD(&xprt->bc_pa_list); in xprt_init()
1930 INIT_LIST_HEAD(&xprt->xprt_switch); in xprt_init()
1932 xprt->last_used = jiffies; in xprt_init()
1933 xprt->cwnd = RPC_INITCWND; in xprt_init()
1934 xprt->bind_index = 0; in xprt_init()
1936 rpc_init_wait_queue(&xprt->binding, "xprt_binding"); in xprt_init()
1937 rpc_init_wait_queue(&xprt->pending, "xprt_pending"); in xprt_init()
1938 rpc_init_wait_queue(&xprt->sending, "xprt_sending"); in xprt_init()
1939 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); in xprt_init()
1941 xprt_init_xid(xprt); in xprt_init()
1943 xprt->xprt_net = get_net(net); in xprt_init()
1953 struct rpc_xprt *xprt; in xprt_create_transport() local
1968 xprt = t->setup(args); in xprt_create_transport()
1969 if (IS_ERR(xprt)) in xprt_create_transport()
1972 xprt->idle_timeout = 0; in xprt_create_transport()
1973 INIT_WORK(&xprt->task_cleanup, xprt_autoclose); in xprt_create_transport()
1974 if (xprt_has_timer(xprt)) in xprt_create_transport()
1975 timer_setup(&xprt->timer, xprt_init_autodisconnect, 0); in xprt_create_transport()
1977 timer_setup(&xprt->timer, NULL, 0); in xprt_create_transport()
1980 xprt_destroy(xprt); in xprt_create_transport()
1983 xprt->servername = kstrdup(args->servername, GFP_KERNEL); in xprt_create_transport()
1984 if (xprt->servername == NULL) { in xprt_create_transport()
1985 xprt_destroy(xprt); in xprt_create_transport()
1989 rpc_xprt_debugfs_register(xprt); in xprt_create_transport()
1991 trace_xprt_create(xprt); in xprt_create_transport()
1993 return xprt; in xprt_create_transport()
1998 struct rpc_xprt *xprt = in xprt_destroy_cb() local
2001 trace_xprt_destroy(xprt); in xprt_destroy_cb()
2003 rpc_xprt_debugfs_unregister(xprt); in xprt_destroy_cb()
2004 rpc_destroy_wait_queue(&xprt->binding); in xprt_destroy_cb()
2005 rpc_destroy_wait_queue(&xprt->pending); in xprt_destroy_cb()
2006 rpc_destroy_wait_queue(&xprt->sending); in xprt_destroy_cb()
2007 rpc_destroy_wait_queue(&xprt->backlog); in xprt_destroy_cb()
2008 kfree(xprt->servername); in xprt_destroy_cb()
2012 xprt_destroy_backchannel(xprt, UINT_MAX); in xprt_destroy_cb()
2017 xprt->ops->destroy(xprt); in xprt_destroy_cb()
2025 static void xprt_destroy(struct rpc_xprt *xprt) in xprt_destroy() argument
2030 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); in xprt_destroy()
2037 spin_lock(&xprt->transport_lock); in xprt_destroy()
2038 del_timer_sync(&xprt->timer); in xprt_destroy()
2039 spin_unlock(&xprt->transport_lock); in xprt_destroy()
2045 INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb); in xprt_destroy()
2046 schedule_work(&xprt->task_cleanup); in xprt_destroy()
2059 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt) in xprt_get() argument
2061 if (xprt != NULL && kref_get_unless_zero(&xprt->kref)) in xprt_get()
2062 return xprt; in xprt_get()
2072 void xprt_put(struct rpc_xprt *xprt) in xprt_put() argument
2074 if (xprt != NULL) in xprt_put()
2075 kref_put(&xprt->kref, xprt_destroy_kref); in xprt_put()