Lines Matching refs:fq
97 struct blk_flush_queue *fq, unsigned int flags);
165 struct blk_flush_queue *fq, in blk_flush_complete_seq() argument
169 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; in blk_flush_complete_seq()
186 fq->flush_pending_since = jiffies; in blk_flush_complete_seq()
191 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); in blk_flush_complete_seq()
212 blk_kick_flush(q, fq, cmd_flags); in blk_flush_complete_seq()
221 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); in flush_end_io() local
224 spin_lock_irqsave(&fq->mq_flush_lock, flags); in flush_end_io()
227 fq->rq_status = error; in flush_end_io()
228 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); in flush_end_io()
239 if (fq->rq_status != BLK_STS_OK) { in flush_end_io()
240 error = fq->rq_status; in flush_end_io()
241 fq->rq_status = BLK_STS_OK; in flush_end_io()
251 running = &fq->flush_queue[fq->flush_running_idx]; in flush_end_io()
252 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); in flush_end_io()
255 fq->flush_running_idx ^= 1; in flush_end_io()
262 blk_flush_complete_seq(rq, fq, seq, error); in flush_end_io()
265 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); in flush_end_io()
286 static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, in blk_kick_flush() argument
289 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; in blk_kick_flush()
292 struct request *flush_rq = fq->flush_rq; in blk_kick_flush()
295 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) in blk_kick_flush()
299 if (!list_empty(&fq->flush_data_in_flight) && in blk_kick_flush()
301 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) in blk_kick_flush()
308 fq->flush_pending_idx ^= 1; in blk_kick_flush()
358 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); in mq_flush_data_end_io() local
369 spin_lock_irqsave(&fq->mq_flush_lock, flags); in mq_flush_data_end_io()
370 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); in mq_flush_data_end_io()
371 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); in mq_flush_data_end_io()
390 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); in blk_insert_flush() local
442 spin_lock_irq(&fq->mq_flush_lock); in blk_insert_flush()
443 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); in blk_insert_flush()
444 spin_unlock_irq(&fq->mq_flush_lock); in blk_insert_flush()
473 struct blk_flush_queue *fq; in blk_alloc_flush_queue() local
476 fq = kzalloc_node(sizeof(*fq), flags, node); in blk_alloc_flush_queue()
477 if (!fq) in blk_alloc_flush_queue()
480 spin_lock_init(&fq->mq_flush_lock); in blk_alloc_flush_queue()
483 fq->flush_rq = kzalloc_node(rq_sz, flags, node); in blk_alloc_flush_queue()
484 if (!fq->flush_rq) in blk_alloc_flush_queue()
487 INIT_LIST_HEAD(&fq->flush_queue[0]); in blk_alloc_flush_queue()
488 INIT_LIST_HEAD(&fq->flush_queue[1]); in blk_alloc_flush_queue()
489 INIT_LIST_HEAD(&fq->flush_data_in_flight); in blk_alloc_flush_queue()
491 lockdep_register_key(&fq->key); in blk_alloc_flush_queue()
492 lockdep_set_class(&fq->mq_flush_lock, &fq->key); in blk_alloc_flush_queue()
494 return fq; in blk_alloc_flush_queue()
497 kfree(fq); in blk_alloc_flush_queue()
502 void blk_free_flush_queue(struct blk_flush_queue *fq) in blk_free_flush_queue() argument
505 if (!fq) in blk_free_flush_queue()
508 lockdep_unregister_key(&fq->key); in blk_free_flush_queue()
509 kfree(fq->flush_rq); in blk_free_flush_queue()
510 kfree(fq); in blk_free_flush_queue()