Home
last modified time | relevance | path

Searched refs:rq_flags (Results 1 – 25 of 67) sorted by relevance

123

/OK3568_Linux_fs/kernel/block/
H A Dblk-pm.h18 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_mark_last_busy()
26 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_requeue_request()
35 if (q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_add_request()
43 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_put_request()
H A Dblk-zoned.c92 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); in blk_req_zone_write_trylock()
93 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; in blk_req_zone_write_trylock()
105 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); in __blk_req_zone_write_lock()
106 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; in __blk_req_zone_write_lock()
112 rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED; in __blk_req_zone_write_unlock()
H A Dblk-flush.c131 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request()
331 flush_rq->rq_flags |= RQF_MQ_INFLIGHT; in blk_kick_flush()
337 flush_rq->rq_flags |= RQF_FLUSH_SEQ; in blk_kick_flush()
437 rq->rq_flags |= RQF_FLUSH_SEQ; in blk_insert_flush()
H A Dblk-core.c251 if (unlikely(rq->rq_flags & RQF_QUIET)) in req_bio_endio()
268 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) in req_bio_endio()
1245 if (!(rq->rq_flags & RQF_MIXED_MERGE)) in blk_rq_err_bytes()
1303 !(req->rq_flags & RQF_FLUSH_SEQ)) { in blk_account_io_done()
1456 !(req->rq_flags & RQF_QUIET))) in blk_update_request()
1500 if (req->rq_flags & RQF_MIXED_MERGE) { in blk_update_request()
1505 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { in blk_update_request()
1636 if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) { in blk_rq_prep_clone()
1637 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; in blk_rq_prep_clone()
H A Dblk-mq.c275 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator; in blk_mq_need_time_stamp()
296 rq->rq_flags = 0; in blk_mq_rq_ctx_init()
299 rq->rq_flags |= RQF_PM; in blk_mq_rq_ctx_init()
301 rq->rq_flags |= RQF_IO_STAT; in blk_mq_rq_ctx_init()
341 rq->rq_flags |= RQF_ELVPRIV; in blk_mq_rq_ctx_init()
517 if (rq->rq_flags & RQF_ELVPRIV) { in blk_mq_free_request()
527 if (rq->rq_flags & RQF_MQ_INFLIGHT) in blk_mq_free_request()
548 if (rq->rq_flags & RQF_STATS) { in __blk_mq_end_request()
745 rq->rq_flags |= RQF_STATS; in blk_mq_start_request()
772 rq->rq_flags &= ~RQF_TIMED_OUT; in __blk_mq_requeue_request()
[all …]
H A Dblk-merge.c530 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in __blk_rq_map_sg()
687 if (rq->rq_flags & RQF_MIXED_MERGE) in blk_rq_set_mixed_merge()
700 rq->rq_flags |= RQF_MIXED_MERGE; in blk_rq_set_mixed_merge()
783 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || in attempt_merge()
H A Dblk-mq.h233 if (rq->rq_flags & RQF_MQ_INFLIGHT) { in __blk_mq_put_driver_tag()
234 rq->rq_flags &= ~RQF_MQ_INFLIGHT; in __blk_mq_put_driver_tag()
H A Dblk-mq-sched.c417 if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq)) in blk_mq_sched_bypass_insert()
421 rq->rq_flags |= RQF_SORTED; in blk_mq_sched_bypass_insert()
458 at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head; in blk_mq_sched_insert_request()
H A Dblk-mq-sched.h66 if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request) in blk_mq_sched_requeue_request()
H A Dblk-timeout.c140 req->rq_flags &= ~RQF_TIMED_OUT; in blk_add_timer()
H A Dblk.h200 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
256 return rq->rq_disk && (rq->rq_flags & RQF_IO_STAT); in blk_do_io_stat()
/OK3568_Linux_fs/kernel/kernel/sched/
H A Dsched.h1247 struct rq_flags { struct
1270 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) in rq_pin_lock()
1280 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) in rq_unpin_lock()
1290 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) in rq_repin_lock()
1302 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1305 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1309 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) in __task_rq_unlock()
1317 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in task_rq_unlock()
1327 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) in rq_lock_irqsave()
1335 rq_lock_irq(struct rq *rq, struct rq_flags *rf) in rq_lock_irq()
[all …]
H A Dcore.c194 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) in __task_rq_lock()
219 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) in task_rq_lock()
366 struct rq_flags rf; in hrtick()
394 struct rq_flags rf; in __hrtick_start()
1064 struct rq_flags rf; in uclamp_update_util_min_rt_default()
1358 struct rq_flags rf; in uclamp_update_active()
1841 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, in move_queued_task()
1886 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, in __migrate_task()
1909 struct rq_flags rf; in migration_cpu_stop()
1990 struct rq_flags *rf) in __set_cpus_allowed_ptr_locked()
[all …]
H A Dstop_task.c20 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop()
/OK3568_Linux_fs/kernel/drivers/ide/
H A Dide-cd.c103 if (!sense || !rq || (rq->rq_flags & RQF_QUIET)) in cdrom_log_sense()
304 rq->rq_flags |= RQF_FAILED; in cdrom_decode_status()
324 !(rq->rq_flags & RQF_QUIET)) in cdrom_decode_status()
359 if (!(rq->rq_flags & RQF_QUIET)) in cdrom_decode_status()
368 if (!(rq->rq_flags & RQF_QUIET)) in cdrom_decode_status()
375 if (!(rq->rq_flags & RQF_QUIET)) in cdrom_decode_status()
393 rq->rq_flags |= RQF_FAILED; in cdrom_decode_status()
435 req_flags_t rq_flags) in ide_cd_queue_pc() argument
444 cmd[0], write, timeout, rq_flags); in ide_cd_queue_pc()
459 rq->rq_flags |= rq_flags; in ide_cd_queue_pc()
[all …]
H A Dide-io.c330 rq->rq_flags |= RQF_FAILED; in start_request()
466 if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) { in ide_issue_rq()
467 rq->rq_flags |= RQF_DONTPREP; in ide_issue_rq()
521 (rq->rq_flags & RQF_PM) == 0) { in ide_issue_rq()
/OK3568_Linux_fs/kernel/drivers/scsi/
H A Dscsi_lib.c156 if (cmd->request->rq_flags & RQF_DONTPREP) { in scsi_mq_requeue_cmd()
157 cmd->request->rq_flags &= ~RQF_DONTPREP; in scsi_mq_requeue_cmd()
243 int timeout, int retries, u64 flags, req_flags_t rq_flags, in __scsi_execute() argument
253 rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0); in __scsi_execute()
267 req->rq_flags |= rq_flags | RQF_QUIET; in __scsi_execute()
804 if (!(req->rq_flags & RQF_QUIET)) { in scsi_io_completion_action()
893 else if (req->rq_flags & RQF_QUIET) in scsi_io_completion_nz_result()
1127 if (rq->rq_flags & RQF_DONTPREP) { in scsi_cleanup_rq()
1129 rq->rq_flags &= ~RQF_DONTPREP; in scsi_cleanup_rq()
1237 if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM))) in scsi_device_state_check()
[all …]
/OK3568_Linux_fs/kernel/net/sunrpc/
H A Dsvc.c612 __set_bit(RQ_BUSY, &rqstp->rq_flags); in svc_rqst_alloc()
697 set_bit(RQ_VICTIM, &rqstp->rq_flags); in choose_victim()
860 if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags)) in svc_exit_thread()
1163 set_bit(RQ_AUTHERR, &rqstp->rq_flags); in svc_return_autherr()
1171 if (test_and_clear_bit(RQ_AUTHERR, &rqstp->rq_flags)) in svc_get_autherr()
1196 test_bit(RQ_DROPME, &rqstp->rq_flags)) in svc_generic_dispatch()
1199 if (test_bit(RQ_AUTHERR, &rqstp->rq_flags)) in svc_generic_dispatch()
1290 set_bit(RQ_SPLICE_OK, &rqstp->rq_flags); in svc_process_common()
1292 set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags); in svc_process_common()
1293 clear_bit(RQ_DROPME, &rqstp->rq_flags); in svc_process_common()
H A Dsvc_xprt.c362 if (!test_bit(RQ_DATA, &rqstp->rq_flags)) { in svc_xprt_reserve_slot()
366 set_bit(RQ_DATA, &rqstp->rq_flags); in svc_xprt_reserve_slot()
374 if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) { in svc_xprt_release_slot()
438 if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) in svc_xprt_do_enqueue()
561 if (test_bit(RQ_BUSY, &rqstp->rq_flags)) in svc_wake_up()
729 clear_bit(RQ_BUSY, &rqstp->rq_flags); in svc_get_next_xprt()
739 set_bit(RQ_BUSY, &rqstp->rq_flags); in svc_get_next_xprt()
1175 if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags)) in svc_defer()
1205 set_bit(RQ_DROPME, &rqstp->rq_flags); in svc_defer()
/OK3568_Linux_fs/kernel/include/trace/hooks/
H A Dsched.h100 struct rq_flags;
102 TP_PROTO(struct rq *this_rq, struct rq_flags *rf,
121 TP_PROTO(struct rq *rq, struct rq_flags *rf,
345 struct rq_flags;
/OK3568_Linux_fs/kernel/include/scsi/
H A Dscsi_device.h452 req_flags_t rq_flags, int *resid);
455 sshdr, timeout, retries, flags, rq_flags, resid) \ argument
460 sense, sshdr, timeout, retries, flags, rq_flags, \
/OK3568_Linux_fs/kernel/drivers/mmc/core/
H A Dqueue.c262 req->rq_flags |= RQF_QUIET; in mmc_mq_queue_rq()
314 if (!(req->rq_flags & RQF_DONTPREP)) { in mmc_mq_queue_rq()
316 req->rq_flags |= RQF_DONTPREP; in mmc_mq_queue_rq()
/OK3568_Linux_fs/kernel/include/linux/
H A Dblkdev.h143 req_flags_t rq_flags; member
696 return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); in blk_account_rq()
830 if (rq->rq_flags & RQF_NOMERGE_FLAGS) in rq_mergeable()
1055 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in blk_rq_payload_bytes()
1066 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in req_bvec()
1209 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in blk_rq_nr_phys_segments()
1926 if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) in blk_req_zone_write_unlock()
/OK3568_Linux_fs/kernel/drivers/md/
H A Ddm-rq.c269 if (rq->rq_flags & RQF_FAILED) in dm_softirq_done()
296 rq->rq_flags |= RQF_FAILED; in dm_kill_unmapped_request()
312 clone->rq_flags |= RQF_IO_STAT; in dm_dispatch_clone_request()
/OK3568_Linux_fs/kernel/drivers/scsi/device_handler/
H A Dscsi_dh_hp_sw.c167 req->rq_flags |= RQF_QUIET; in hp_sw_prep_fn()

123