Lines Matching refs:bdev

373 int blkdev_report_zones(struct block_device *bdev, sector_t sector,
376 extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
382 extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
384 extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
394 static inline int blkdev_report_zones_ioctl(struct block_device *bdev, in blkdev_report_zones_ioctl() argument
401 static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev, in blkdev_zone_mgmt_ioctl() argument
972 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) in bdev_get_queue() argument
974 return bdev->bd_disk->queue; /* this is never NULL */ in bdev_get_queue()
1184 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1326 static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask) in blkdev_issue_flush() argument
1339 extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1344 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1346 extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1353 extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1356 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1382 static inline bool bdev_is_partition(struct block_device *bdev) in bdev_is_partition() argument
1384 return bdev->bd_partno; in bdev_is_partition()
1448 static inline unsigned int bdev_logical_block_size(struct block_device *bdev) in bdev_logical_block_size() argument
1450 return queue_logical_block_size(bdev_get_queue(bdev)); in bdev_logical_block_size()
1458 static inline unsigned int bdev_physical_block_size(struct block_device *bdev) in bdev_physical_block_size() argument
1460 return queue_physical_block_size(bdev_get_queue(bdev)); in bdev_physical_block_size()
1468 static inline int bdev_io_min(struct block_device *bdev) in bdev_io_min() argument
1470 return queue_io_min(bdev_get_queue(bdev)); in bdev_io_min()
1478 static inline int bdev_io_opt(struct block_device *bdev) in bdev_io_opt() argument
1480 return queue_io_opt(bdev_get_queue(bdev)); in bdev_io_opt()
1500 static inline int bdev_alignment_offset(struct block_device *bdev) in bdev_alignment_offset() argument
1502 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset()
1506 if (bdev_is_partition(bdev)) in bdev_alignment_offset()
1508 bdev->bd_part->start_sect); in bdev_alignment_offset()
1559 static inline int bdev_discard_alignment(struct block_device *bdev) in bdev_discard_alignment() argument
1561 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment()
1563 if (bdev_is_partition(bdev)) in bdev_discard_alignment()
1565 bdev->bd_part->start_sect); in bdev_discard_alignment()
1569 static inline unsigned int bdev_write_same(struct block_device *bdev) in bdev_write_same() argument
1571 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_same()
1579 static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) in bdev_write_zeroes_sectors() argument
1581 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_zeroes_sectors()
1589 static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) in bdev_zoned_model() argument
1591 struct request_queue *q = bdev_get_queue(bdev); in bdev_zoned_model()
1599 static inline bool bdev_is_zoned(struct block_device *bdev) in bdev_is_zoned() argument
1601 struct request_queue *q = bdev_get_queue(bdev); in bdev_is_zoned()
1609 static inline sector_t bdev_zone_sectors(struct block_device *bdev) in bdev_zone_sectors() argument
1611 struct request_queue *q = bdev_get_queue(bdev); in bdev_zone_sectors()
1618 static inline unsigned int bdev_max_open_zones(struct block_device *bdev) in bdev_max_open_zones() argument
1620 struct request_queue *q = bdev_get_queue(bdev); in bdev_max_open_zones()
1627 static inline unsigned int bdev_max_active_zones(struct block_device *bdev) in bdev_max_active_zones() argument
1629 struct request_queue *q = bdev_get_queue(bdev); in bdev_max_active_zones()
1659 static inline unsigned int block_size(struct block_device *bdev) in block_size() argument
1661 return 1 << bdev->bd_inode->i_blkbits; in block_size()
1720 struct blk_integrity *bdev_get_integrity(struct block_device *bdev) in bdev_get_integrity() argument
1722 return blk_get_integrity(bdev->bd_disk); in bdev_get_integrity()
2010 int bdev_read_only(struct block_device *bdev);
2011 int set_blocksize(struct block_device *bdev, int size);
2013 const char *bdevname(struct block_device *bdev, char *buffer);
2029 int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole,
2031 void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
2033 void blkdev_put(struct block_device *bdev, fmode_t mode);
2037 struct block_device *bdgrab(struct block_device *bdev);
2041 void invalidate_bdev(struct block_device *bdev);
2042 int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
2044 int sync_blockdev(struct block_device *bdev);
2046 static inline void invalidate_bdev(struct block_device *bdev) in invalidate_bdev() argument
2049 static inline int truncate_bdev_range(struct block_device *bdev, fmode_t mode, in truncate_bdev_range() argument
2054 static inline int sync_blockdev(struct block_device *bdev) in sync_blockdev() argument
2059 int fsync_bdev(struct block_device *bdev);
2061 int freeze_bdev(struct block_device *bdev);
2062 int thaw_bdev(struct block_device *bdev);