1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun #undef TRACE_SYSTEM 3*4882a593Smuzhiyun #define TRACE_SYSTEM block 4*4882a593Smuzhiyun 5*4882a593Smuzhiyun #if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ) 6*4882a593Smuzhiyun #define _TRACE_BLOCK_H 7*4882a593Smuzhiyun 8*4882a593Smuzhiyun #include <linux/blktrace_api.h> 9*4882a593Smuzhiyun #include <linux/blkdev.h> 10*4882a593Smuzhiyun #include <linux/buffer_head.h> 11*4882a593Smuzhiyun #include <linux/tracepoint.h> 12*4882a593Smuzhiyun 13*4882a593Smuzhiyun #define RWBS_LEN 8 14*4882a593Smuzhiyun 15*4882a593Smuzhiyun DECLARE_EVENT_CLASS(block_buffer, 16*4882a593Smuzhiyun 17*4882a593Smuzhiyun TP_PROTO(struct buffer_head *bh), 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun TP_ARGS(bh), 20*4882a593Smuzhiyun 21*4882a593Smuzhiyun TP_STRUCT__entry ( 22*4882a593Smuzhiyun __field( dev_t, dev ) 23*4882a593Smuzhiyun __field( sector_t, sector ) 24*4882a593Smuzhiyun __field( size_t, size ) 25*4882a593Smuzhiyun ), 26*4882a593Smuzhiyun 27*4882a593Smuzhiyun TP_fast_assign( 28*4882a593Smuzhiyun __entry->dev = bh->b_bdev->bd_dev; 29*4882a593Smuzhiyun __entry->sector = bh->b_blocknr; 30*4882a593Smuzhiyun __entry->size = bh->b_size; 31*4882a593Smuzhiyun ), 32*4882a593Smuzhiyun 33*4882a593Smuzhiyun TP_printk("%d,%d sector=%llu size=%zu", 34*4882a593Smuzhiyun MAJOR(__entry->dev), MINOR(__entry->dev), 35*4882a593Smuzhiyun (unsigned long long)__entry->sector, __entry->size 36*4882a593Smuzhiyun ) 37*4882a593Smuzhiyun ); 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun /** 40*4882a593Smuzhiyun * block_touch_buffer - mark a buffer accessed 41*4882a593Smuzhiyun * @bh: buffer_head being touched 42*4882a593Smuzhiyun * 43*4882a593Smuzhiyun * Called from touch_buffer(). 44*4882a593Smuzhiyun */ 45*4882a593Smuzhiyun DEFINE_EVENT(block_buffer, block_touch_buffer, 46*4882a593Smuzhiyun 47*4882a593Smuzhiyun TP_PROTO(struct buffer_head *bh), 48*4882a593Smuzhiyun 49*4882a593Smuzhiyun TP_ARGS(bh) 50*4882a593Smuzhiyun ); 51*4882a593Smuzhiyun 52*4882a593Smuzhiyun /** 53*4882a593Smuzhiyun * block_dirty_buffer - mark a buffer dirty 54*4882a593Smuzhiyun * @bh: buffer_head being dirtied 55*4882a593Smuzhiyun * 56*4882a593Smuzhiyun * Called from mark_buffer_dirty(). 57*4882a593Smuzhiyun */ 58*4882a593Smuzhiyun DEFINE_EVENT(block_buffer, block_dirty_buffer, 59*4882a593Smuzhiyun 60*4882a593Smuzhiyun TP_PROTO(struct buffer_head *bh), 61*4882a593Smuzhiyun 62*4882a593Smuzhiyun TP_ARGS(bh) 63*4882a593Smuzhiyun ); 64*4882a593Smuzhiyun 65*4882a593Smuzhiyun /** 66*4882a593Smuzhiyun * block_rq_requeue - place block IO request back on a queue 67*4882a593Smuzhiyun * @q: queue holding operation 68*4882a593Smuzhiyun * @rq: block IO operation request 69*4882a593Smuzhiyun * 70*4882a593Smuzhiyun * The block operation request @rq is being placed back into queue 71*4882a593Smuzhiyun * @q. For some reason the request was not completed and needs to be 72*4882a593Smuzhiyun * put back in the queue. 73*4882a593Smuzhiyun */ 74*4882a593Smuzhiyun TRACE_EVENT(block_rq_requeue, 75*4882a593Smuzhiyun 76*4882a593Smuzhiyun TP_PROTO(struct request_queue *q, struct request *rq), 77*4882a593Smuzhiyun 78*4882a593Smuzhiyun TP_ARGS(q, rq), 79*4882a593Smuzhiyun 80*4882a593Smuzhiyun TP_STRUCT__entry( 81*4882a593Smuzhiyun __field( dev_t, dev ) 82*4882a593Smuzhiyun __field( sector_t, sector ) 83*4882a593Smuzhiyun __field( unsigned int, nr_sector ) 84*4882a593Smuzhiyun __array( char, rwbs, RWBS_LEN ) 85*4882a593Smuzhiyun __dynamic_array( char, cmd, 1 ) 86*4882a593Smuzhiyun ), 87*4882a593Smuzhiyun 88*4882a593Smuzhiyun TP_fast_assign( 89*4882a593Smuzhiyun __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 90*4882a593Smuzhiyun __entry->sector = blk_rq_trace_sector(rq); 91*4882a593Smuzhiyun __entry->nr_sector = blk_rq_trace_nr_sectors(rq); 92*4882a593Smuzhiyun 93*4882a593Smuzhiyun blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 94*4882a593Smuzhiyun __get_str(cmd)[0] = '\0'; 95*4882a593Smuzhiyun ), 96*4882a593Smuzhiyun 97*4882a593Smuzhiyun TP_printk("%d,%d %s (%s) %llu + %u [%d]", 98*4882a593Smuzhiyun MAJOR(__entry->dev), MINOR(__entry->dev), 99*4882a593Smuzhiyun __entry->rwbs, __get_str(cmd), 100*4882a593Smuzhiyun (unsigned long long)__entry->sector, 101*4882a593Smuzhiyun __entry->nr_sector, 0) 102*4882a593Smuzhiyun ); 103*4882a593Smuzhiyun 104*4882a593Smuzhiyun /** 105*4882a593Smuzhiyun * block_rq_complete - block IO operation completed by device driver 106*4882a593Smuzhiyun * @rq: block operations request 107*4882a593Smuzhiyun * @error: status code 108*4882a593Smuzhiyun * @nr_bytes: number of completed bytes 109*4882a593Smuzhiyun * 110*4882a593Smuzhiyun * The block_rq_complete tracepoint event indicates that some portion 111*4882a593Smuzhiyun * of operation request has been completed by the device driver. If 112*4882a593Smuzhiyun * the @rq->bio is %NULL, then there is absolutely no additional work to 113*4882a593Smuzhiyun * do for the request. If @rq->bio is non-NULL then there is 114*4882a593Smuzhiyun * additional work required to complete the request. 115*4882a593Smuzhiyun */ 116*4882a593Smuzhiyun TRACE_EVENT(block_rq_complete, 117*4882a593Smuzhiyun 118*4882a593Smuzhiyun TP_PROTO(struct request *rq, int error, unsigned int nr_bytes), 119*4882a593Smuzhiyun 120*4882a593Smuzhiyun TP_ARGS(rq, error, nr_bytes), 121*4882a593Smuzhiyun 122*4882a593Smuzhiyun TP_STRUCT__entry( 123*4882a593Smuzhiyun __field( dev_t, dev ) 124*4882a593Smuzhiyun __field( sector_t, sector ) 125*4882a593Smuzhiyun __field( unsigned int, nr_sector ) 126*4882a593Smuzhiyun __field( int, error ) 127*4882a593Smuzhiyun __array( char, rwbs, RWBS_LEN ) 128*4882a593Smuzhiyun __dynamic_array( char, cmd, 1 ) 129*4882a593Smuzhiyun ), 130*4882a593Smuzhiyun 131*4882a593Smuzhiyun TP_fast_assign( 132*4882a593Smuzhiyun __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 133*4882a593Smuzhiyun __entry->sector = blk_rq_pos(rq); 134*4882a593Smuzhiyun __entry->nr_sector = nr_bytes >> 9; 135*4882a593Smuzhiyun __entry->error = error; 136*4882a593Smuzhiyun 137*4882a593Smuzhiyun blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes); 138*4882a593Smuzhiyun __get_str(cmd)[0] = '\0'; 139*4882a593Smuzhiyun ), 140*4882a593Smuzhiyun 141*4882a593Smuzhiyun TP_printk("%d,%d %s (%s) %llu + %u [%d]", 142*4882a593Smuzhiyun MAJOR(__entry->dev), MINOR(__entry->dev), 143*4882a593Smuzhiyun __entry->rwbs, __get_str(cmd), 144*4882a593Smuzhiyun (unsigned long long)__entry->sector, 145*4882a593Smuzhiyun __entry->nr_sector, __entry->error) 146*4882a593Smuzhiyun ); 147*4882a593Smuzhiyun 148*4882a593Smuzhiyun DECLARE_EVENT_CLASS(block_rq, 149*4882a593Smuzhiyun 150*4882a593Smuzhiyun TP_PROTO(struct request_queue *q, struct request *rq), 151*4882a593Smuzhiyun 152*4882a593Smuzhiyun TP_ARGS(q, rq), 153*4882a593Smuzhiyun 154*4882a593Smuzhiyun TP_STRUCT__entry( 155*4882a593Smuzhiyun __field( dev_t, dev ) 156*4882a593Smuzhiyun __field( sector_t, sector ) 157*4882a593Smuzhiyun __field( unsigned int, nr_sector ) 158*4882a593Smuzhiyun __field( unsigned int, bytes ) 159*4882a593Smuzhiyun __array( char, rwbs, RWBS_LEN ) 160*4882a593Smuzhiyun __array( char, comm, TASK_COMM_LEN ) 161*4882a593Smuzhiyun __dynamic_array( char, cmd, 1 ) 162*4882a593Smuzhiyun ), 163*4882a593Smuzhiyun 164*4882a593Smuzhiyun TP_fast_assign( 165*4882a593Smuzhiyun __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 166*4882a593Smuzhiyun __entry->sector = blk_rq_trace_sector(rq); 167*4882a593Smuzhiyun __entry->nr_sector = blk_rq_trace_nr_sectors(rq); 168*4882a593Smuzhiyun __entry->bytes = blk_rq_bytes(rq); 169*4882a593Smuzhiyun 170*4882a593Smuzhiyun blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 171*4882a593Smuzhiyun __get_str(cmd)[0] = '\0'; 172*4882a593Smuzhiyun memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 173*4882a593Smuzhiyun ), 174*4882a593Smuzhiyun 175*4882a593Smuzhiyun TP_printk("%d,%d %s %u (%s) %llu + %u [%s]", 176*4882a593Smuzhiyun MAJOR(__entry->dev), MINOR(__entry->dev), 177*4882a593Smuzhiyun __entry->rwbs, __entry->bytes, __get_str(cmd), 178*4882a593Smuzhiyun (unsigned long long)__entry->sector, 179*4882a593Smuzhiyun __entry->nr_sector, __entry->comm) 180*4882a593Smuzhiyun ); 181*4882a593Smuzhiyun 182*4882a593Smuzhiyun /** 183*4882a593Smuzhiyun * block_rq_insert - insert block operation request into queue 184*4882a593Smuzhiyun * @q: target queue 185*4882a593Smuzhiyun * @rq: block IO operation request 186*4882a593Smuzhiyun * 187*4882a593Smuzhiyun * Called immediately before block operation request @rq is inserted 188*4882a593Smuzhiyun * into queue @q. The fields in the operation request @rq struct can 189*4882a593Smuzhiyun * be examined to determine which device and sectors the pending 190*4882a593Smuzhiyun * operation would access. 191*4882a593Smuzhiyun */ 192*4882a593Smuzhiyun DEFINE_EVENT(block_rq, block_rq_insert, 193*4882a593Smuzhiyun 194*4882a593Smuzhiyun TP_PROTO(struct request_queue *q, struct request *rq), 195*4882a593Smuzhiyun 196*4882a593Smuzhiyun TP_ARGS(q, rq) 197*4882a593Smuzhiyun ); 198*4882a593Smuzhiyun 199*4882a593Smuzhiyun /** 200*4882a593Smuzhiyun * block_rq_issue - issue pending block IO request operation to device driver 201*4882a593Smuzhiyun * @q: queue holding operation 202*4882a593Smuzhiyun * @rq: block IO operation operation request 203*4882a593Smuzhiyun * 204*4882a593Smuzhiyun * Called when block operation request @rq from queue @q is sent to a 205*4882a593Smuzhiyun * device driver for processing. 206*4882a593Smuzhiyun */ 207*4882a593Smuzhiyun DEFINE_EVENT(block_rq, block_rq_issue, 208*4882a593Smuzhiyun 209*4882a593Smuzhiyun TP_PROTO(struct request_queue *q, struct request *rq), 210*4882a593Smuzhiyun 211*4882a593Smuzhiyun TP_ARGS(q, rq) 212*4882a593Smuzhiyun ); 213*4882a593Smuzhiyun 214*4882a593Smuzhiyun /** 215*4882a593Smuzhiyun * block_rq_merge - merge request with another one in the elevator 216*4882a593Smuzhiyun * @q: queue holding operation 217*4882a593Smuzhiyun * @rq: block IO operation operation request 218*4882a593Smuzhiyun * 219*4882a593Smuzhiyun * Called when block operation request @rq from queue @q is merged to another 220*4882a593Smuzhiyun * request queued in the elevator. 221*4882a593Smuzhiyun */ 222*4882a593Smuzhiyun DEFINE_EVENT(block_rq, block_rq_merge, 223*4882a593Smuzhiyun 224*4882a593Smuzhiyun TP_PROTO(struct request_queue *q, struct request *rq), 225*4882a593Smuzhiyun 226*4882a593Smuzhiyun TP_ARGS(q, rq) 227*4882a593Smuzhiyun ); 228*4882a593Smuzhiyun 229*4882a593Smuzhiyun /** 230*4882a593Smuzhiyun * block_bio_bounce - used bounce buffer when processing block operation 231*4882a593Smuzhiyun * @q: queue holding the block operation 232*4882a593Smuzhiyun * @bio: block operation 233*4882a593Smuzhiyun * 234*4882a593Smuzhiyun * A bounce buffer was used to handle the block operation @bio in @q. 235*4882a593Smuzhiyun * This occurs when hardware limitations prevent a direct transfer of 236*4882a593Smuzhiyun * data between the @bio data memory area and the IO device. Use of a 237*4882a593Smuzhiyun * bounce buffer requires extra copying of data and decreases 238*4882a593Smuzhiyun * performance. 239*4882a593Smuzhiyun */ 240*4882a593Smuzhiyun TRACE_EVENT(block_bio_bounce, 241*4882a593Smuzhiyun 242*4882a593Smuzhiyun TP_PROTO(struct request_queue *q, struct bio *bio), 243*4882a593Smuzhiyun 244*4882a593Smuzhiyun TP_ARGS(q, bio), 245*4882a593Smuzhiyun 246*4882a593Smuzhiyun TP_STRUCT__entry( 247*4882a593Smuzhiyun __field( dev_t, dev ) 248*4882a593Smuzhiyun __field( sector_t, sector ) 249*4882a593Smuzhiyun __field( unsigned int, nr_sector ) 250*4882a593Smuzhiyun __array( char, rwbs, RWBS_LEN ) 251*4882a593Smuzhiyun __array( char, comm, TASK_COMM_LEN ) 252*4882a593Smuzhiyun ), 253*4882a593Smuzhiyun 254*4882a593Smuzhiyun TP_fast_assign( 255*4882a593Smuzhiyun __entry->dev = bio_dev(bio); 256*4882a593Smuzhiyun __entry->sector = bio->bi_iter.bi_sector; 257*4882a593Smuzhiyun __entry->nr_sector = bio_sectors(bio); 258*4882a593Smuzhiyun blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); 259*4882a593Smuzhiyun memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 260*4882a593Smuzhiyun ), 261*4882a593Smuzhiyun 262*4882a593Smuzhiyun TP_printk("%d,%d %s %llu + %u [%s]", 263*4882a593Smuzhiyun MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 264*4882a593Smuzhiyun (unsigned long long)__entry->sector, 265*4882a593Smuzhiyun __entry->nr_sector, __entry->comm) 266*4882a593Smuzhiyun ); 267*4882a593Smuzhiyun 268*4882a593Smuzhiyun /** 269*4882a593Smuzhiyun * block_bio_complete - completed all work on the block operation 270*4882a593Smuzhiyun * @q: queue holding the block operation 271*4882a593Smuzhiyun * @bio: block operation completed 272*4882a593Smuzhiyun * 273*4882a593Smuzhiyun * This tracepoint indicates there is no further work to do on this 274*4882a593Smuzhiyun * block IO operation @bio. 275*4882a593Smuzhiyun */ 276*4882a593Smuzhiyun TRACE_EVENT(block_bio_complete, 277*4882a593Smuzhiyun 278*4882a593Smuzhiyun TP_PROTO(struct request_queue *q, struct bio *bio), 279*4882a593Smuzhiyun 280*4882a593Smuzhiyun TP_ARGS(q, bio), 281*4882a593Smuzhiyun 282*4882a593Smuzhiyun TP_STRUCT__entry( 283*4882a593Smuzhiyun __field( dev_t, dev ) 284*4882a593Smuzhiyun __field( sector_t, sector ) 285*4882a593Smuzhiyun __field( unsigned, nr_sector ) 286*4882a593Smuzhiyun __field( int, error ) 287*4882a593Smuzhiyun __array( char, rwbs, RWBS_LEN) 288*4882a593Smuzhiyun ), 289*4882a593Smuzhiyun 290*4882a593Smuzhiyun TP_fast_assign( 291*4882a593Smuzhiyun __entry->dev = bio_dev(bio); 292*4882a593Smuzhiyun __entry->sector = bio->bi_iter.bi_sector; 293*4882a593Smuzhiyun __entry->nr_sector = bio_sectors(bio); 294*4882a593Smuzhiyun __entry->error = blk_status_to_errno(bio->bi_status); 295*4882a593Smuzhiyun blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); 296*4882a593Smuzhiyun ), 297*4882a593Smuzhiyun 298*4882a593Smuzhiyun TP_printk("%d,%d %s %llu + %u [%d]", 299*4882a593Smuzhiyun MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 300*4882a593Smuzhiyun (unsigned long long)__entry->sector, 301*4882a593Smuzhiyun __entry->nr_sector, __entry->error) 302*4882a593Smuzhiyun ); 303*4882a593Smuzhiyun 304*4882a593Smuzhiyun DECLARE_EVENT_CLASS(block_bio_merge, 305*4882a593Smuzhiyun 306*4882a593Smuzhiyun TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), 307*4882a593Smuzhiyun 308*4882a593Smuzhiyun TP_ARGS(q, rq, bio), 309*4882a593Smuzhiyun 310*4882a593Smuzhiyun TP_STRUCT__entry( 311*4882a593Smuzhiyun __field( dev_t, dev ) 312*4882a593Smuzhiyun __field( sector_t, sector ) 313*4882a593Smuzhiyun __field( unsigned int, nr_sector ) 314*4882a593Smuzhiyun __array( char, rwbs, RWBS_LEN ) 315*4882a593Smuzhiyun __array( char, comm, TASK_COMM_LEN ) 316*4882a593Smuzhiyun ), 317*4882a593Smuzhiyun 318*4882a593Smuzhiyun TP_fast_assign( 319*4882a593Smuzhiyun __entry->dev = bio_dev(bio); 320*4882a593Smuzhiyun __entry->sector = bio->bi_iter.bi_sector; 321*4882a593Smuzhiyun __entry->nr_sector = bio_sectors(bio); 322*4882a593Smuzhiyun blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); 323*4882a593Smuzhiyun memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 324*4882a593Smuzhiyun ), 325*4882a593Smuzhiyun 326*4882a593Smuzhiyun TP_printk("%d,%d %s %llu + %u [%s]", 327*4882a593Smuzhiyun MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 328*4882a593Smuzhiyun (unsigned long long)__entry->sector, 329*4882a593Smuzhiyun __entry->nr_sector, __entry->comm) 330*4882a593Smuzhiyun ); 331*4882a593Smuzhiyun 332*4882a593Smuzhiyun /** 333*4882a593Smuzhiyun * block_bio_backmerge - merging block operation to the end of an existing operation 334*4882a593Smuzhiyun * @q: queue holding operation 335*4882a593Smuzhiyun * @rq: request bio is being merged into 336*4882a593Smuzhiyun * @bio: new block operation to merge 337*4882a593Smuzhiyun * 338*4882a593Smuzhiyun * Merging block request @bio to the end of an existing block request 339*4882a593Smuzhiyun * in queue @q. 340*4882a593Smuzhiyun */ 341*4882a593Smuzhiyun DEFINE_EVENT(block_bio_merge, block_bio_backmerge, 342*4882a593Smuzhiyun 343*4882a593Smuzhiyun TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), 344*4882a593Smuzhiyun 345*4882a593Smuzhiyun TP_ARGS(q, rq, bio) 346*4882a593Smuzhiyun ); 347*4882a593Smuzhiyun 348*4882a593Smuzhiyun /** 349*4882a593Smuzhiyun * block_bio_frontmerge - merging block operation to the beginning of an existing operation 350*4882a593Smuzhiyun * @q: queue holding operation 351*4882a593Smuzhiyun * @rq: request bio is being merged into 352*4882a593Smuzhiyun * @bio: new block operation to merge 353*4882a593Smuzhiyun * 354*4882a593Smuzhiyun * Merging block IO operation @bio to the beginning of an existing block 355*4882a593Smuzhiyun * operation in queue @q. 356*4882a593Smuzhiyun */ 357*4882a593Smuzhiyun DEFINE_EVENT(block_bio_merge, block_bio_frontmerge, 358*4882a593Smuzhiyun 359*4882a593Smuzhiyun TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), 360*4882a593Smuzhiyun 361*4882a593Smuzhiyun TP_ARGS(q, rq, bio) 362*4882a593Smuzhiyun ); 363*4882a593Smuzhiyun 364*4882a593Smuzhiyun /** 365*4882a593Smuzhiyun * block_bio_queue - putting new block IO operation in queue 366*4882a593Smuzhiyun * @q: queue holding operation 367*4882a593Smuzhiyun * @bio: new block operation 368*4882a593Smuzhiyun * 369*4882a593Smuzhiyun * About to place the block IO operation @bio into queue @q. 370*4882a593Smuzhiyun */ 371*4882a593Smuzhiyun TRACE_EVENT(block_bio_queue, 372*4882a593Smuzhiyun 373*4882a593Smuzhiyun TP_PROTO(struct request_queue *q, struct bio *bio), 374*4882a593Smuzhiyun 375*4882a593Smuzhiyun TP_ARGS(q, bio), 376*4882a593Smuzhiyun 377*4882a593Smuzhiyun TP_STRUCT__entry( 378*4882a593Smuzhiyun __field( dev_t, dev ) 379*4882a593Smuzhiyun __field( sector_t, sector ) 380*4882a593Smuzhiyun __field( unsigned int, nr_sector ) 381*4882a593Smuzhiyun __array( char, rwbs, RWBS_LEN ) 382*4882a593Smuzhiyun __array( char, comm, TASK_COMM_LEN ) 383*4882a593Smuzhiyun ), 384*4882a593Smuzhiyun 385*4882a593Smuzhiyun TP_fast_assign( 386*4882a593Smuzhiyun __entry->dev = bio_dev(bio); 387*4882a593Smuzhiyun __entry->sector = bio->bi_iter.bi_sector; 388*4882a593Smuzhiyun __entry->nr_sector = bio_sectors(bio); 389*4882a593Smuzhiyun blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); 390*4882a593Smuzhiyun memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 391*4882a593Smuzhiyun ), 392*4882a593Smuzhiyun 393*4882a593Smuzhiyun TP_printk("%d,%d %s %llu + %u [%s]", 394*4882a593Smuzhiyun MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 395*4882a593Smuzhiyun (unsigned long long)__entry->sector, 396*4882a593Smuzhiyun __entry->nr_sector, __entry->comm) 397*4882a593Smuzhiyun ); 398*4882a593Smuzhiyun 399*4882a593Smuzhiyun DECLARE_EVENT_CLASS(block_get_rq, 400*4882a593Smuzhiyun 401*4882a593Smuzhiyun TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 402*4882a593Smuzhiyun 403*4882a593Smuzhiyun TP_ARGS(q, bio, rw), 404*4882a593Smuzhiyun 405*4882a593Smuzhiyun TP_STRUCT__entry( 406*4882a593Smuzhiyun __field( dev_t, dev ) 407*4882a593Smuzhiyun __field( sector_t, sector ) 408*4882a593Smuzhiyun __field( unsigned int, nr_sector ) 409*4882a593Smuzhiyun __array( char, rwbs, RWBS_LEN ) 410*4882a593Smuzhiyun __array( char, comm, TASK_COMM_LEN ) 411*4882a593Smuzhiyun ), 412*4882a593Smuzhiyun 413*4882a593Smuzhiyun TP_fast_assign( 414*4882a593Smuzhiyun __entry->dev = bio ? bio_dev(bio) : 0; 415*4882a593Smuzhiyun __entry->sector = bio ? bio->bi_iter.bi_sector : 0; 416*4882a593Smuzhiyun __entry->nr_sector = bio ? bio_sectors(bio) : 0; 417*4882a593Smuzhiyun blk_fill_rwbs(__entry->rwbs, 418*4882a593Smuzhiyun bio ? bio->bi_opf : 0, __entry->nr_sector); 419*4882a593Smuzhiyun memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 420*4882a593Smuzhiyun ), 421*4882a593Smuzhiyun 422*4882a593Smuzhiyun TP_printk("%d,%d %s %llu + %u [%s]", 423*4882a593Smuzhiyun MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 424*4882a593Smuzhiyun (unsigned long long)__entry->sector, 425*4882a593Smuzhiyun __entry->nr_sector, __entry->comm) 426*4882a593Smuzhiyun ); 427*4882a593Smuzhiyun 428*4882a593Smuzhiyun /** 429*4882a593Smuzhiyun * block_getrq - get a free request entry in queue for block IO operations 430*4882a593Smuzhiyun * @q: queue for operations 431*4882a593Smuzhiyun * @bio: pending block IO operation (can be %NULL) 432*4882a593Smuzhiyun * @rw: low bit indicates a read (%0) or a write (%1) 433*4882a593Smuzhiyun * 434*4882a593Smuzhiyun * A request struct for queue @q has been allocated to handle the 435*4882a593Smuzhiyun * block IO operation @bio. 436*4882a593Smuzhiyun */ 437*4882a593Smuzhiyun DEFINE_EVENT(block_get_rq, block_getrq, 438*4882a593Smuzhiyun 439*4882a593Smuzhiyun TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 440*4882a593Smuzhiyun 441*4882a593Smuzhiyun TP_ARGS(q, bio, rw) 442*4882a593Smuzhiyun ); 443*4882a593Smuzhiyun 444*4882a593Smuzhiyun /** 445*4882a593Smuzhiyun * block_sleeprq - waiting to get a free request entry in queue for block IO operation 446*4882a593Smuzhiyun * @q: queue for operation 447*4882a593Smuzhiyun * @bio: pending block IO operation (can be %NULL) 448*4882a593Smuzhiyun * @rw: low bit indicates a read (%0) or a write (%1) 449*4882a593Smuzhiyun * 450*4882a593Smuzhiyun * In the case where a request struct cannot be provided for queue @q 451*4882a593Smuzhiyun * the process needs to wait for an request struct to become 452*4882a593Smuzhiyun * available. This tracepoint event is generated each time the 453*4882a593Smuzhiyun * process goes to sleep waiting for request struct become available. 454*4882a593Smuzhiyun */ 455*4882a593Smuzhiyun DEFINE_EVENT(block_get_rq, block_sleeprq, 456*4882a593Smuzhiyun 457*4882a593Smuzhiyun TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 458*4882a593Smuzhiyun 459*4882a593Smuzhiyun TP_ARGS(q, bio, rw) 460*4882a593Smuzhiyun ); 461*4882a593Smuzhiyun 462*4882a593Smuzhiyun /** 463*4882a593Smuzhiyun * block_plug - keep operations requests in request queue 464*4882a593Smuzhiyun * @q: request queue to plug 465*4882a593Smuzhiyun * 466*4882a593Smuzhiyun * Plug the request queue @q. Do not allow block operation requests 467*4882a593Smuzhiyun * to be sent to the device driver. Instead, accumulate requests in 468*4882a593Smuzhiyun * the queue to improve throughput performance of the block device. 469*4882a593Smuzhiyun */ 470*4882a593Smuzhiyun TRACE_EVENT(block_plug, 471*4882a593Smuzhiyun 472*4882a593Smuzhiyun TP_PROTO(struct request_queue *q), 473*4882a593Smuzhiyun 474*4882a593Smuzhiyun TP_ARGS(q), 475*4882a593Smuzhiyun 476*4882a593Smuzhiyun TP_STRUCT__entry( 477*4882a593Smuzhiyun __array( char, comm, TASK_COMM_LEN ) 478*4882a593Smuzhiyun ), 479*4882a593Smuzhiyun 480*4882a593Smuzhiyun TP_fast_assign( 481*4882a593Smuzhiyun memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 482*4882a593Smuzhiyun ), 483*4882a593Smuzhiyun 484*4882a593Smuzhiyun TP_printk("[%s]", __entry->comm) 485*4882a593Smuzhiyun ); 486*4882a593Smuzhiyun 487*4882a593Smuzhiyun DECLARE_EVENT_CLASS(block_unplug, 488*4882a593Smuzhiyun 489*4882a593Smuzhiyun TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), 490*4882a593Smuzhiyun 491*4882a593Smuzhiyun TP_ARGS(q, depth, explicit), 492*4882a593Smuzhiyun 493*4882a593Smuzhiyun TP_STRUCT__entry( 494*4882a593Smuzhiyun __field( int, nr_rq ) 495*4882a593Smuzhiyun __array( char, comm, TASK_COMM_LEN ) 496*4882a593Smuzhiyun ), 497*4882a593Smuzhiyun 498*4882a593Smuzhiyun TP_fast_assign( 499*4882a593Smuzhiyun __entry->nr_rq = depth; 500*4882a593Smuzhiyun memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 501*4882a593Smuzhiyun ), 502*4882a593Smuzhiyun 503*4882a593Smuzhiyun TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) 504*4882a593Smuzhiyun ); 505*4882a593Smuzhiyun 506*4882a593Smuzhiyun /** 507*4882a593Smuzhiyun * block_unplug - release of operations requests in request queue 508*4882a593Smuzhiyun * @q: request queue to unplug 509*4882a593Smuzhiyun * @depth: number of requests just added to the queue 510*4882a593Smuzhiyun * @explicit: whether this was an explicit unplug, or one from schedule() 511*4882a593Smuzhiyun * 512*4882a593Smuzhiyun * Unplug request queue @q because device driver is scheduled to work 513*4882a593Smuzhiyun * on elements in the request queue. 514*4882a593Smuzhiyun */ 515*4882a593Smuzhiyun DEFINE_EVENT(block_unplug, block_unplug, 516*4882a593Smuzhiyun 517*4882a593Smuzhiyun TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), 518*4882a593Smuzhiyun 519*4882a593Smuzhiyun TP_ARGS(q, depth, explicit) 520*4882a593Smuzhiyun ); 521*4882a593Smuzhiyun 522*4882a593Smuzhiyun /** 523*4882a593Smuzhiyun * block_split - split a single bio struct into two bio structs 524*4882a593Smuzhiyun * @q: queue containing the bio 525*4882a593Smuzhiyun * @bio: block operation being split 526*4882a593Smuzhiyun * @new_sector: The starting sector for the new bio 527*4882a593Smuzhiyun * 528*4882a593Smuzhiyun * The bio request @bio in request queue @q needs to be split into two 529*4882a593Smuzhiyun * bio requests. The newly created @bio request starts at 530*4882a593Smuzhiyun * @new_sector. This split may be required due to hardware limitation 531*4882a593Smuzhiyun * such as operation crossing device boundaries in a RAID system. 532*4882a593Smuzhiyun */ 533*4882a593Smuzhiyun TRACE_EVENT(block_split, 534*4882a593Smuzhiyun 535*4882a593Smuzhiyun TP_PROTO(struct request_queue *q, struct bio *bio, 536*4882a593Smuzhiyun unsigned int new_sector), 537*4882a593Smuzhiyun 538*4882a593Smuzhiyun TP_ARGS(q, bio, new_sector), 539*4882a593Smuzhiyun 540*4882a593Smuzhiyun TP_STRUCT__entry( 541*4882a593Smuzhiyun __field( dev_t, dev ) 542*4882a593Smuzhiyun __field( sector_t, sector ) 543*4882a593Smuzhiyun __field( sector_t, new_sector ) 544*4882a593Smuzhiyun __array( char, rwbs, RWBS_LEN ) 545*4882a593Smuzhiyun __array( char, comm, TASK_COMM_LEN ) 546*4882a593Smuzhiyun ), 547*4882a593Smuzhiyun 548*4882a593Smuzhiyun TP_fast_assign( 549*4882a593Smuzhiyun __entry->dev = bio_dev(bio); 550*4882a593Smuzhiyun __entry->sector = bio->bi_iter.bi_sector; 551*4882a593Smuzhiyun __entry->new_sector = new_sector; 552*4882a593Smuzhiyun blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); 553*4882a593Smuzhiyun memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 554*4882a593Smuzhiyun ), 555*4882a593Smuzhiyun 556*4882a593Smuzhiyun TP_printk("%d,%d %s %llu / %llu [%s]", 557*4882a593Smuzhiyun MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 558*4882a593Smuzhiyun (unsigned long long)__entry->sector, 559*4882a593Smuzhiyun (unsigned long long)__entry->new_sector, 560*4882a593Smuzhiyun __entry->comm) 561*4882a593Smuzhiyun ); 562*4882a593Smuzhiyun 563*4882a593Smuzhiyun /** 564*4882a593Smuzhiyun * block_bio_remap - map request for a logical device to the raw device 565*4882a593Smuzhiyun * @q: queue holding the operation 566*4882a593Smuzhiyun * @bio: revised operation 567*4882a593Smuzhiyun * @dev: device for the operation 568*4882a593Smuzhiyun * @from: original sector for the operation 569*4882a593Smuzhiyun * 570*4882a593Smuzhiyun * An operation for a logical device has been mapped to the 571*4882a593Smuzhiyun * raw block device. 572*4882a593Smuzhiyun */ 573*4882a593Smuzhiyun TRACE_EVENT(block_bio_remap, 574*4882a593Smuzhiyun 575*4882a593Smuzhiyun TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, 576*4882a593Smuzhiyun sector_t from), 577*4882a593Smuzhiyun 578*4882a593Smuzhiyun TP_ARGS(q, bio, dev, from), 579*4882a593Smuzhiyun 580*4882a593Smuzhiyun TP_STRUCT__entry( 581*4882a593Smuzhiyun __field( dev_t, dev ) 582*4882a593Smuzhiyun __field( sector_t, sector ) 583*4882a593Smuzhiyun __field( unsigned int, nr_sector ) 584*4882a593Smuzhiyun __field( dev_t, old_dev ) 585*4882a593Smuzhiyun __field( sector_t, old_sector ) 586*4882a593Smuzhiyun __array( char, rwbs, RWBS_LEN) 587*4882a593Smuzhiyun ), 588*4882a593Smuzhiyun 589*4882a593Smuzhiyun TP_fast_assign( 590*4882a593Smuzhiyun __entry->dev = bio_dev(bio); 591*4882a593Smuzhiyun __entry->sector = bio->bi_iter.bi_sector; 592*4882a593Smuzhiyun __entry->nr_sector = bio_sectors(bio); 593*4882a593Smuzhiyun __entry->old_dev = dev; 594*4882a593Smuzhiyun __entry->old_sector = from; 595*4882a593Smuzhiyun blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); 596*4882a593Smuzhiyun ), 597*4882a593Smuzhiyun 598*4882a593Smuzhiyun TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", 599*4882a593Smuzhiyun MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 600*4882a593Smuzhiyun (unsigned long long)__entry->sector, 601*4882a593Smuzhiyun __entry->nr_sector, 602*4882a593Smuzhiyun MAJOR(__entry->old_dev), MINOR(__entry->old_dev), 603*4882a593Smuzhiyun (unsigned long long)__entry->old_sector) 604*4882a593Smuzhiyun ); 605*4882a593Smuzhiyun 606*4882a593Smuzhiyun /** 607*4882a593Smuzhiyun * block_rq_remap - map request for a block operation request 608*4882a593Smuzhiyun * @q: queue holding the operation 609*4882a593Smuzhiyun * @rq: block IO operation request 610*4882a593Smuzhiyun * @dev: device for the operation 611*4882a593Smuzhiyun * @from: original sector for the operation 612*4882a593Smuzhiyun * 613*4882a593Smuzhiyun * The block operation request @rq in @q has been remapped. The block 614*4882a593Smuzhiyun * operation request @rq holds the current information and @from hold 615*4882a593Smuzhiyun * the original sector. 616*4882a593Smuzhiyun */ 617*4882a593Smuzhiyun TRACE_EVENT(block_rq_remap, 618*4882a593Smuzhiyun 619*4882a593Smuzhiyun TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, 620*4882a593Smuzhiyun sector_t from), 621*4882a593Smuzhiyun 622*4882a593Smuzhiyun TP_ARGS(q, rq, dev, from), 623*4882a593Smuzhiyun 624*4882a593Smuzhiyun TP_STRUCT__entry( 625*4882a593Smuzhiyun __field( dev_t, dev ) 626*4882a593Smuzhiyun __field( sector_t, sector ) 627*4882a593Smuzhiyun __field( unsigned int, nr_sector ) 628*4882a593Smuzhiyun __field( dev_t, old_dev ) 629*4882a593Smuzhiyun __field( sector_t, old_sector ) 630*4882a593Smuzhiyun __field( unsigned int, nr_bios ) 631*4882a593Smuzhiyun __array( char, rwbs, RWBS_LEN) 632*4882a593Smuzhiyun ), 633*4882a593Smuzhiyun 634*4882a593Smuzhiyun TP_fast_assign( 635*4882a593Smuzhiyun __entry->dev = disk_devt(rq->rq_disk); 636*4882a593Smuzhiyun __entry->sector = blk_rq_pos(rq); 637*4882a593Smuzhiyun __entry->nr_sector = blk_rq_sectors(rq); 638*4882a593Smuzhiyun __entry->old_dev = dev; 639*4882a593Smuzhiyun __entry->old_sector = from; 640*4882a593Smuzhiyun __entry->nr_bios = blk_rq_count_bios(rq); 641*4882a593Smuzhiyun blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 642*4882a593Smuzhiyun ), 643*4882a593Smuzhiyun 644*4882a593Smuzhiyun TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u", 645*4882a593Smuzhiyun MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 646*4882a593Smuzhiyun (unsigned long long)__entry->sector, 647*4882a593Smuzhiyun __entry->nr_sector, 648*4882a593Smuzhiyun MAJOR(__entry->old_dev), MINOR(__entry->old_dev), 649*4882a593Smuzhiyun (unsigned long long)__entry->old_sector, __entry->nr_bios) 650*4882a593Smuzhiyun ); 651*4882a593Smuzhiyun 652*4882a593Smuzhiyun #endif /* _TRACE_BLOCK_H */ 653*4882a593Smuzhiyun 654*4882a593Smuzhiyun /* This part must be outside protection */ 655*4882a593Smuzhiyun #include <trace/define_trace.h> 656*4882a593Smuzhiyun 657