mirror of
https://github.com/fail0verflow/switch-linux.git
synced 2025-05-04 02:34:21 -04:00
md: Use new topology calls to indicate alignment and I/O sizes
Switch MD over to the new disk_stack_limits() function which checks for aligment and adjusts preferred I/O sizes when stacking. Also indicate preferred I/O sizes where applicable. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: NeilBrown <neilb@suse.de>
This commit is contained in:
parent
5a4f13fad1
commit
8f6c2e4b32
6 changed files with 39 additions and 19 deletions
|
@ -166,8 +166,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
|
||||||
rdev->sectors = sectors * mddev->chunk_sectors;
|
rdev->sectors = sectors * mddev->chunk_sectors;
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_queue_stack_limits(mddev->queue,
|
disk_stack_limits(mddev->gendisk, rdev->bdev,
|
||||||
rdev->bdev->bd_disk->queue);
|
rdev->data_offset << 9);
|
||||||
/* as we don't honour merge_bvec_fn, we must never risk
|
/* as we don't honour merge_bvec_fn, we must never risk
|
||||||
* violating it, so limit ->max_sector to one PAGE, as
|
* violating it, so limit ->max_sector to one PAGE, as
|
||||||
* a one page request is never in violation.
|
* a one page request is never in violation.
|
||||||
|
|
|
@ -294,7 +294,8 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||||
for (path = first; path <= last; path++)
|
for (path = first; path <= last; path++)
|
||||||
if ((p=conf->multipaths+path)->rdev == NULL) {
|
if ((p=conf->multipaths+path)->rdev == NULL) {
|
||||||
q = rdev->bdev->bd_disk->queue;
|
q = rdev->bdev->bd_disk->queue;
|
||||||
blk_queue_stack_limits(mddev->queue, q);
|
disk_stack_limits(mddev->gendisk, rdev->bdev,
|
||||||
|
rdev->data_offset << 9);
|
||||||
|
|
||||||
/* as we don't honour merge_bvec_fn, we must never risk
|
/* as we don't honour merge_bvec_fn, we must never risk
|
||||||
* violating it, so limit ->max_sector to one PAGE, as
|
* violating it, so limit ->max_sector to one PAGE, as
|
||||||
|
@ -463,9 +464,9 @@ static int multipath_run (mddev_t *mddev)
|
||||||
|
|
||||||
disk = conf->multipaths + disk_idx;
|
disk = conf->multipaths + disk_idx;
|
||||||
disk->rdev = rdev;
|
disk->rdev = rdev;
|
||||||
|
disk_stack_limits(mddev->gendisk, rdev->bdev,
|
||||||
|
rdev->data_offset << 9);
|
||||||
|
|
||||||
blk_queue_stack_limits(mddev->queue,
|
|
||||||
rdev->bdev->bd_disk->queue);
|
|
||||||
/* as we don't honour merge_bvec_fn, we must never risk
|
/* as we don't honour merge_bvec_fn, we must never risk
|
||||||
* violating it, not that we ever expect a device with
|
* violating it, not that we ever expect a device with
|
||||||
* a merge_bvec_fn to be involved in multipath */
|
* a merge_bvec_fn to be involved in multipath */
|
||||||
|
|
|
@ -170,8 +170,8 @@ static int create_strip_zones(mddev_t *mddev)
|
||||||
}
|
}
|
||||||
dev[j] = rdev1;
|
dev[j] = rdev1;
|
||||||
|
|
||||||
blk_queue_stack_limits(mddev->queue,
|
disk_stack_limits(mddev->gendisk, rdev1->bdev,
|
||||||
rdev1->bdev->bd_disk->queue);
|
rdev1->data_offset << 9);
|
||||||
/* as we don't honour merge_bvec_fn, we must never risk
|
/* as we don't honour merge_bvec_fn, we must never risk
|
||||||
* violating it, so limit ->max_sector to one PAGE, as
|
* violating it, so limit ->max_sector to one PAGE, as
|
||||||
* a one page request is never in violation.
|
* a one page request is never in violation.
|
||||||
|
@ -250,6 +250,11 @@ static int create_strip_zones(mddev_t *mddev)
|
||||||
mddev->chunk_sectors << 9);
|
mddev->chunk_sectors << 9);
|
||||||
goto abort;
|
goto abort;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
|
||||||
|
blk_queue_io_opt(mddev->queue,
|
||||||
|
(mddev->chunk_sectors << 9) * mddev->raid_disks);
|
||||||
|
|
||||||
printk(KERN_INFO "raid0: done.\n");
|
printk(KERN_INFO "raid0: done.\n");
|
||||||
mddev->private = conf;
|
mddev->private = conf;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1123,8 +1123,8 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||||
for (mirror = first; mirror <= last; mirror++)
|
for (mirror = first; mirror <= last; mirror++)
|
||||||
if ( !(p=conf->mirrors+mirror)->rdev) {
|
if ( !(p=conf->mirrors+mirror)->rdev) {
|
||||||
|
|
||||||
blk_queue_stack_limits(mddev->queue,
|
disk_stack_limits(mddev->gendisk, rdev->bdev,
|
||||||
rdev->bdev->bd_disk->queue);
|
rdev->data_offset << 9);
|
||||||
/* as we don't honour merge_bvec_fn, we must never risk
|
/* as we don't honour merge_bvec_fn, we must never risk
|
||||||
* violating it, so limit ->max_sector to one PAGE, as
|
* violating it, so limit ->max_sector to one PAGE, as
|
||||||
* a one page request is never in violation.
|
* a one page request is never in violation.
|
||||||
|
@ -1988,9 +1988,8 @@ static int run(mddev_t *mddev)
|
||||||
disk = conf->mirrors + disk_idx;
|
disk = conf->mirrors + disk_idx;
|
||||||
|
|
||||||
disk->rdev = rdev;
|
disk->rdev = rdev;
|
||||||
|
disk_stack_limits(mddev->gendisk, rdev->bdev,
|
||||||
blk_queue_stack_limits(mddev->queue,
|
rdev->data_offset << 9);
|
||||||
rdev->bdev->bd_disk->queue);
|
|
||||||
/* as we don't honour merge_bvec_fn, we must never risk
|
/* as we don't honour merge_bvec_fn, we must never risk
|
||||||
* violating it, so limit ->max_sector to one PAGE, as
|
* violating it, so limit ->max_sector to one PAGE, as
|
||||||
* a one page request is never in violation.
|
* a one page request is never in violation.
|
||||||
|
|
|
@ -1151,8 +1151,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||||
for ( ; mirror <= last ; mirror++)
|
for ( ; mirror <= last ; mirror++)
|
||||||
if ( !(p=conf->mirrors+mirror)->rdev) {
|
if ( !(p=conf->mirrors+mirror)->rdev) {
|
||||||
|
|
||||||
blk_queue_stack_limits(mddev->queue,
|
disk_stack_limits(mddev->gendisk, rdev->bdev,
|
||||||
rdev->bdev->bd_disk->queue);
|
rdev->data_offset << 9);
|
||||||
/* as we don't honour merge_bvec_fn, we must never risk
|
/* as we don't honour merge_bvec_fn, we must never risk
|
||||||
* violating it, so limit ->max_sector to one PAGE, as
|
* violating it, so limit ->max_sector to one PAGE, as
|
||||||
* a one page request is never in violation.
|
* a one page request is never in violation.
|
||||||
|
@ -2044,7 +2044,7 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
|
||||||
static int run(mddev_t *mddev)
|
static int run(mddev_t *mddev)
|
||||||
{
|
{
|
||||||
conf_t *conf;
|
conf_t *conf;
|
||||||
int i, disk_idx;
|
int i, disk_idx, chunk_size;
|
||||||
mirror_info_t *disk;
|
mirror_info_t *disk;
|
||||||
mdk_rdev_t *rdev;
|
mdk_rdev_t *rdev;
|
||||||
int nc, fc, fo;
|
int nc, fc, fo;
|
||||||
|
@ -2130,6 +2130,14 @@ static int run(mddev_t *mddev)
|
||||||
spin_lock_init(&conf->device_lock);
|
spin_lock_init(&conf->device_lock);
|
||||||
mddev->queue->queue_lock = &conf->device_lock;
|
mddev->queue->queue_lock = &conf->device_lock;
|
||||||
|
|
||||||
|
chunk_size = mddev->chunk_sectors << 9;
|
||||||
|
blk_queue_io_min(mddev->queue, chunk_size);
|
||||||
|
if (conf->raid_disks % conf->near_copies)
|
||||||
|
blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
|
||||||
|
else
|
||||||
|
blk_queue_io_opt(mddev->queue, chunk_size *
|
||||||
|
(conf->raid_disks / conf->near_copies));
|
||||||
|
|
||||||
list_for_each_entry(rdev, &mddev->disks, same_set) {
|
list_for_each_entry(rdev, &mddev->disks, same_set) {
|
||||||
disk_idx = rdev->raid_disk;
|
disk_idx = rdev->raid_disk;
|
||||||
if (disk_idx >= mddev->raid_disks
|
if (disk_idx >= mddev->raid_disks
|
||||||
|
@ -2138,9 +2146,8 @@ static int run(mddev_t *mddev)
|
||||||
disk = conf->mirrors + disk_idx;
|
disk = conf->mirrors + disk_idx;
|
||||||
|
|
||||||
disk->rdev = rdev;
|
disk->rdev = rdev;
|
||||||
|
disk_stack_limits(mddev->gendisk, rdev->bdev,
|
||||||
blk_queue_stack_limits(mddev->queue,
|
rdev->data_offset << 9);
|
||||||
rdev->bdev->bd_disk->queue);
|
|
||||||
/* as we don't honour merge_bvec_fn, we must never risk
|
/* as we don't honour merge_bvec_fn, we must never risk
|
||||||
* violating it, so limit ->max_sector to one PAGE, as
|
* violating it, so limit ->max_sector to one PAGE, as
|
||||||
* a one page request is never in violation.
|
* a one page request is never in violation.
|
||||||
|
|
|
@ -4452,7 +4452,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
|
||||||
static int run(mddev_t *mddev)
|
static int run(mddev_t *mddev)
|
||||||
{
|
{
|
||||||
raid5_conf_t *conf;
|
raid5_conf_t *conf;
|
||||||
int working_disks = 0;
|
int working_disks = 0, chunk_size;
|
||||||
mdk_rdev_t *rdev;
|
mdk_rdev_t *rdev;
|
||||||
|
|
||||||
if (mddev->recovery_cp != MaxSector)
|
if (mddev->recovery_cp != MaxSector)
|
||||||
|
@ -4607,6 +4607,14 @@ static int run(mddev_t *mddev)
|
||||||
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
|
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
|
||||||
|
|
||||||
blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
|
blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
|
||||||
|
chunk_size = mddev->chunk_sectors << 9;
|
||||||
|
blk_queue_io_min(mddev->queue, chunk_size);
|
||||||
|
blk_queue_io_opt(mddev->queue, chunk_size *
|
||||||
|
(conf->raid_disks - conf->max_degraded));
|
||||||
|
|
||||||
|
list_for_each_entry(rdev, &mddev->disks, same_set)
|
||||||
|
disk_stack_limits(mddev->gendisk, rdev->bdev,
|
||||||
|
rdev->data_offset << 9);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
abort:
|
abort:
|
||||||
|
|
Loading…
Add table
Reference in a new issue