Lines Matching refs:fiq

196 u64 fuse_get_unique(struct fuse_iqueue *fiq)  in fuse_get_unique()  argument
198 fiq->reqctr += FUSE_REQ_ID_STEP; in fuse_get_unique()
199 return fiq->reqctr; in fuse_get_unique()
211 static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq, bool sync) in fuse_dev_wake_and_unlock() argument
212 __releases(fiq->lock) in fuse_dev_wake_and_unlock()
215 wake_up_sync(&fiq->waitq); in fuse_dev_wake_and_unlock()
217 wake_up(&fiq->waitq); in fuse_dev_wake_and_unlock()
218 kill_fasync(&fiq->fasync, SIGIO, POLL_IN); in fuse_dev_wake_and_unlock()
219 spin_unlock(&fiq->lock); in fuse_dev_wake_and_unlock()
229 static void queue_request_and_unlock(struct fuse_iqueue *fiq, in queue_request_and_unlock() argument
231 __releases(fiq->lock) in queue_request_and_unlock()
236 list_add_tail(&req->list, &fiq->pending); in queue_request_and_unlock()
237 fiq->ops->wake_pending_and_unlock(fiq, sync); in queue_request_and_unlock()
243 struct fuse_iqueue *fiq = &fc->iq; in fuse_queue_forget() local
248 spin_lock(&fiq->lock); in fuse_queue_forget()
249 if (fiq->connected) { in fuse_queue_forget()
250 fiq->forget_list_tail->next = forget; in fuse_queue_forget()
251 fiq->forget_list_tail = forget; in fuse_queue_forget()
252 fiq->ops->wake_forget_and_unlock(fiq, false); in fuse_queue_forget()
255 spin_unlock(&fiq->lock); in fuse_queue_forget()
261 struct fuse_iqueue *fiq = &fc->iq; in flush_bg_queue() local
270 spin_lock(&fiq->lock); in flush_bg_queue()
271 req->in.h.unique = fuse_get_unique(fiq); in flush_bg_queue()
272 queue_request_and_unlock(fiq, req, false); in flush_bg_queue()
288 struct fuse_iqueue *fiq = &fc->iq; in fuse_request_end() local
299 spin_lock(&fiq->lock); in fuse_request_end()
301 spin_unlock(&fiq->lock); in fuse_request_end()
344 struct fuse_iqueue *fiq = &req->fm->fc->iq; in queue_interrupt() local
346 spin_lock(&fiq->lock); in queue_interrupt()
349 spin_unlock(&fiq->lock); in queue_interrupt()
354 list_add_tail(&req->intr_entry, &fiq->interrupts); in queue_interrupt()
362 spin_unlock(&fiq->lock); in queue_interrupt()
365 fiq->ops->wake_interrupt_and_unlock(fiq, false); in queue_interrupt()
367 spin_unlock(&fiq->lock); in queue_interrupt()
375 struct fuse_iqueue *fiq = &fc->iq; in request_wait_answer() local
399 spin_lock(&fiq->lock); in request_wait_answer()
403 spin_unlock(&fiq->lock); in request_wait_answer()
408 spin_unlock(&fiq->lock); in request_wait_answer()
420 struct fuse_iqueue *fiq = &req->fm->fc->iq; in __fuse_request_send() local
423 spin_lock(&fiq->lock); in __fuse_request_send()
424 if (!fiq->connected) { in __fuse_request_send()
425 spin_unlock(&fiq->lock); in __fuse_request_send()
428 req->in.h.unique = fuse_get_unique(fiq); in __fuse_request_send()
432 queue_request_and_unlock(fiq, req, true); in __fuse_request_send()
593 struct fuse_iqueue *fiq = &fm->fc->iq; in fuse_simple_notify_reply() local
605 spin_lock(&fiq->lock); in fuse_simple_notify_reply()
606 if (fiq->connected) { in fuse_simple_notify_reply()
607 queue_request_and_unlock(fiq, req, false); in fuse_simple_notify_reply()
610 spin_unlock(&fiq->lock); in fuse_simple_notify_reply()
1047 static int forget_pending(struct fuse_iqueue *fiq) in forget_pending() argument
1049 return fiq->forget_list_head.next != NULL; in forget_pending()
1052 static int request_pending(struct fuse_iqueue *fiq) in request_pending() argument
1054 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) || in request_pending()
1055 forget_pending(fiq); in request_pending()
1066 static int fuse_read_interrupt(struct fuse_iqueue *fiq, in fuse_read_interrupt() argument
1069 __releases(fiq->lock) in fuse_read_interrupt()
1084 spin_unlock(&fiq->lock); in fuse_read_interrupt()
1096 struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq, in fuse_dequeue_forget() argument
1100 struct fuse_forget_link *head = fiq->forget_list_head.next; in fuse_dequeue_forget()
1107 fiq->forget_list_head.next = *newhead; in fuse_dequeue_forget()
1109 if (fiq->forget_list_head.next == NULL) in fuse_dequeue_forget()
1110 fiq->forget_list_tail = &fiq->forget_list_head; in fuse_dequeue_forget()
1119 static int fuse_read_single_forget(struct fuse_iqueue *fiq, in fuse_read_single_forget() argument
1122 __releases(fiq->lock) in fuse_read_single_forget()
1125 struct fuse_forget_link *forget = fuse_dequeue_forget(fiq, 1, NULL); in fuse_read_single_forget()
1132 .unique = fuse_get_unique(fiq), in fuse_read_single_forget()
1136 spin_unlock(&fiq->lock); in fuse_read_single_forget()
1152 static int fuse_read_batch_forget(struct fuse_iqueue *fiq, in fuse_read_batch_forget() argument
1154 __releases(fiq->lock) in fuse_read_batch_forget()
1163 .unique = fuse_get_unique(fiq), in fuse_read_batch_forget()
1168 spin_unlock(&fiq->lock); in fuse_read_batch_forget()
1173 head = fuse_dequeue_forget(fiq, max_forgets, &count); in fuse_read_batch_forget()
1174 spin_unlock(&fiq->lock); in fuse_read_batch_forget()
1201 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq, in fuse_read_forget() argument
1204 __releases(fiq->lock) in fuse_read_forget()
1206 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL) in fuse_read_forget()
1207 return fuse_read_single_forget(fiq, cs, nbytes); in fuse_read_forget()
1209 return fuse_read_batch_forget(fiq, cs, nbytes); in fuse_read_forget()
1226 struct fuse_iqueue *fiq = &fc->iq; in fuse_dev_do_read() local
1253 spin_lock(&fiq->lock); in fuse_dev_do_read()
1254 if (!fiq->connected || request_pending(fiq)) in fuse_dev_do_read()
1256 spin_unlock(&fiq->lock); in fuse_dev_do_read()
1260 err = wait_event_interruptible_exclusive(fiq->waitq, in fuse_dev_do_read()
1261 !fiq->connected || request_pending(fiq)); in fuse_dev_do_read()
1266 if (!fiq->connected) { in fuse_dev_do_read()
1271 if (!list_empty(&fiq->interrupts)) { in fuse_dev_do_read()
1272 req = list_entry(fiq->interrupts.next, struct fuse_req, in fuse_dev_do_read()
1274 return fuse_read_interrupt(fiq, cs, nbytes, req); in fuse_dev_do_read()
1277 if (forget_pending(fiq)) { in fuse_dev_do_read()
1278 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0) in fuse_dev_do_read()
1279 return fuse_read_forget(fc, fiq, cs, nbytes); in fuse_dev_do_read()
1281 if (fiq->forget_batch <= -8) in fuse_dev_do_read()
1282 fiq->forget_batch = 16; in fuse_dev_do_read()
1285 req = list_entry(fiq->pending.next, struct fuse_req, list); in fuse_dev_do_read()
1288 spin_unlock(&fiq->lock); in fuse_dev_do_read()
1355 spin_unlock(&fiq->lock); in fuse_dev_do_read()
2085 struct fuse_iqueue *fiq; in fuse_dev_poll() local
2091 fiq = &fud->fc->iq; in fuse_dev_poll()
2092 poll_wait(file, &fiq->waitq, wait); in fuse_dev_poll()
2094 spin_lock(&fiq->lock); in fuse_dev_poll()
2095 if (!fiq->connected) in fuse_dev_poll()
2097 else if (request_pending(fiq)) in fuse_dev_poll()
2099 spin_unlock(&fiq->lock); in fuse_dev_poll()
2152 struct fuse_iqueue *fiq = &fc->iq; in fuse_abort_conn() local
2194 spin_lock(&fiq->lock); in fuse_abort_conn()
2195 fiq->connected = 0; in fuse_abort_conn()
2196 list_for_each_entry(req, &fiq->pending, list) in fuse_abort_conn()
2198 list_splice_tail_init(&fiq->pending, &to_end); in fuse_abort_conn()
2199 while (forget_pending(fiq)) in fuse_abort_conn()
2200 kfree(fuse_dequeue_forget(fiq, 1, NULL)); in fuse_abort_conn()
2201 wake_up_all(&fiq->waitq); in fuse_abort_conn()
2202 spin_unlock(&fiq->lock); in fuse_abort_conn()
2203 kill_fasync(&fiq->fasync, SIGIO, POLL_IN); in fuse_abort_conn()