Lines Matching full:incoming
1428 static void submit_fast_path(struct drbd_device *device, struct list_head *incoming) in submit_fast_path() argument
1434 list_for_each_entry_safe(req, tmp, incoming, tl_requests) { in submit_fast_path()
1455 struct list_head *incoming, in prepare_al_transaction_nonblock() argument
1464 while ((req = list_first_entry_or_null(incoming, struct drbd_request, tl_requests))) { in prepare_al_transaction_nonblock()
1500 LIST_HEAD(incoming); /* from drbd_make_request() */ in do_submit()
1504 /* grab new incoming requests */ in do_submit()
1506 list_splice_tail_init(&device->submit.writes, &incoming); in do_submit()
1512 /* move used-to-be-busy back to front of incoming */ in do_submit()
1513 list_splice_init(&busy, &incoming); in do_submit()
1514 submit_fast_path(device, &incoming); in do_submit()
1515 if (list_empty(&incoming)) in do_submit()
1521 list_splice_init(&busy, &incoming); in do_submit()
1522 prepare_al_transaction_nonblock(device, &incoming, &pending, &busy); in do_submit()
1529 * incoming requests, we still must not totally starve new in do_submit()
1531 * Something left on &incoming means there had not been in do_submit()
1539 if (!list_empty(&incoming)) in do_submit()
1543 * on incoming: all moved to busy! in do_submit()
1546 list_splice_tail_init(&device->submit.writes, &incoming); in do_submit()
1551 /* If the transaction was full, before all incoming requests in do_submit()
1553 * without splicing in more incoming requests from upper layers. in do_submit()
1555 * Else, if all incoming have been processed, in do_submit()
1567 while (list_empty(&incoming)) { in do_submit()
1587 list_splice_tail_init(&more_incoming, &incoming); in do_submit()