View | Details | Raw Unified | Return to bug 5181 | Differences between
and this patch

Collapse All | Expand All

(-)./drivers/md/raid10.c~current~ (-47 / +66 lines)
Lines 194-218 static inline void free_r10bio(r10bio_t Link Here
194
	mempool_free(r10_bio, conf->r10bio_pool);
194
	mempool_free(r10_bio, conf->r10bio_pool);
195
}
195
}
196
196
197
static inline void put_buf(r10bio_t *r10_bio)
197
198
/*
199
 * Throttle resync depth, so that we can both get proper overlapping of
200
 * requests, but are still able to handle normal requests quickly.
201
 */
202
#define RESYNC_DEPTH 32
203
204
static void device_barrier(conf_t *conf, sector_t sect)
198
{
205
{
199
	conf_t *conf = mddev_to_conf(r10_bio->mddev);
206
	spin_lock_irq(&conf->resync_lock);
200
	unsigned long flags;
207
	wait_event_lock_irq(conf->wait_idle, !waitqueue_active(&conf->wait_resume),
208
			    conf->resync_lock, unplug_slaves(conf->mddev));
201
209
202
	mempool_free(r10_bio, conf->r10buf_pool);
210
	if (!conf->barrier++) {
211
		wait_event_lock_irq(conf->wait_idle, !conf->nr_pending,
212
				    conf->resync_lock, unplug_slaves(conf->mddev));
213
		if (conf->nr_pending)
214
			BUG();
215
	}
216
	wait_event_lock_irq(conf->wait_resume, conf->barrier < RESYNC_DEPTH,
217
			    conf->resync_lock, unplug_slaves(conf->mddev));
218
	conf->next_resync = sect;
219
	spin_unlock_irq(&conf->resync_lock);
220
}
203
221
222
static void drop_barrier(conf_t *conf)
223
{
224
	unsigned long flags;
204
	spin_lock_irqsave(&conf->resync_lock, flags);
225
	spin_lock_irqsave(&conf->resync_lock, flags);
205
	if (!conf->barrier)
226
	if (!conf->barrier)
206
		BUG();
227
		BUG();
207
	--conf->barrier;
228
	--conf->barrier;
208
	wake_up(&conf->wait_resume);
229
	wake_up(&conf->wait_resume);
209
	wake_up(&conf->wait_idle);
230
	wake_up(&conf->wait_idle);
231
	spin_unlock_irqrestore(&conf->resync_lock, flags);
232
}
210
233
211
	if (!--conf->nr_pending) {
234
static inline void put_buf(r10bio_t *r10_bio)
212
		wake_up(&conf->wait_idle);
235
{
213
		wake_up(&conf->wait_resume);
236
	conf_t *conf = mddev_to_conf(r10_bio->mddev);
214
	}
237
	unsigned long flags;
238
239
	mempool_free(r10_bio, conf->r10buf_pool);
240
241
	spin_lock_irqsave(&conf->resync_lock, flags);
242
	--conf->nr_pending;
215
	spin_unlock_irqrestore(&conf->resync_lock, flags);
243
	spin_unlock_irqrestore(&conf->resync_lock, flags);
244
245
	drop_barrier(conf);
216
}
246
}
217
247
218
static void reschedule_retry(r10bio_t *r10_bio)
248
static void reschedule_retry(r10bio_t *r10_bio)
Lines 634-663 static int raid10_issue_flush(request_qu Link Here
634
	return ret;
664
	return ret;
635
}
665
}
636
666
637
/*
638
 * Throttle resync depth, so that we can both get proper overlapping of
639
 * requests, but are still able to handle normal requests quickly.
640
 */
641
#define RESYNC_DEPTH 32
642
643
static void device_barrier(conf_t *conf, sector_t sect)
644
{
645
	spin_lock_irq(&conf->resync_lock);
646
	wait_event_lock_irq(conf->wait_idle, !waitqueue_active(&conf->wait_resume),
647
			    conf->resync_lock, unplug_slaves(conf->mddev));
648
649
	if (!conf->barrier++) {
650
		wait_event_lock_irq(conf->wait_idle, !conf->nr_pending,
651
				    conf->resync_lock, unplug_slaves(conf->mddev));
652
		if (conf->nr_pending)
653
			BUG();
654
	}
655
	wait_event_lock_irq(conf->wait_resume, conf->barrier < RESYNC_DEPTH,
656
			    conf->resync_lock, unplug_slaves(conf->mddev));
657
	conf->next_resync = sect;
658
	spin_unlock_irq(&conf->resync_lock);
659
}
660
661
static int make_request(request_queue_t *q, struct bio * bio)
667
static int make_request(request_queue_t *q, struct bio * bio)
662
{
668
{
663
	mddev_t *mddev = q->queuedata;
669
	mddev_t *mddev = q->queuedata;
Lines 1396-1402 static sector_t sync_request(mddev_t *md Link Here
1396
				r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
1402
				r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
1397
				spin_lock_irq(&conf->resync_lock);
1403
				spin_lock_irq(&conf->resync_lock);
1398
				conf->nr_pending++;
1404
				conf->nr_pending++;
1399
				if (rb2) conf->barrier++;
1405
				conf->barrier++;
1400
				spin_unlock_irq(&conf->resync_lock);
1406
				spin_unlock_irq(&conf->resync_lock);
1401
				atomic_set(&r10_bio->remaining, 0);
1407
				atomic_set(&r10_bio->remaining, 0);
1402
1408
Lines 1445-1462 static sector_t sync_request(mddev_t *md Link Here
1445
					}
1451
					}
1446
				}
1452
				}
1447
				if (j == conf->copies) {
1453
				if (j == conf->copies) {
1448
					BUG();
1454
					/* Cannot recover, so abort
1455
					 * the recovery
1456
					 */
1457
					put_buf(r10_bio);
1458
					r10_bio = rb2;
1459
					if (!test_and_set_bit(MD_RECOVERY_ERR, &mddev->recovery))
1460
						printk(KERN_INFO "raid10: %s: insufficent working devices for recovery.\n",
1461
						       mdname(mddev));
1449
				}
1462
				}
1450
			}
1463
			}
1451
		if (biolist == NULL) {
1464
		drop_barrier(conf);
1452
			while (r10_bio) {
1465
		if (biolist == NULL)
1453
				r10bio_t *rb2 = r10_bio;
1454
				r10_bio = (r10bio_t*) rb2->master_bio;
1455
				rb2->master_bio = NULL;
1456
				put_buf(rb2);
1457
			}
1458
			goto giveup;
1466
			goto giveup;
1459
		}
1460
	} else {
1467
	} else {
1461
		/* resync. Schedule a read for every block at this virt offset */
1468
		/* resync. Schedule a read for every block at this virt offset */
1462
		int count = 0;
1469
		int count = 0;
Lines 1593-1598 static int run(mddev_t *mddev) Link Here
1593
	struct list_head *tmp;
1600
	struct list_head *tmp;
1594
	int nc, fc;
1601
	int nc, fc;
1595
	sector_t stride, size;
1602
	sector_t stride, size;
1603
	int first;
1596
1604
1597
	if (mddev->level != 10) {
1605
	if (mddev->level != 10) {
1598
		printk(KERN_ERR "raid10: %s: raid level not set correctly... (%d)\n",
1606
		printk(KERN_ERR "raid10: %s: raid level not set correctly... (%d)\n",
Lines 1678-1688 static int run(mddev_t *mddev) Link Here
1678
	init_waitqueue_head(&conf->wait_idle);
1686
	init_waitqueue_head(&conf->wait_idle);
1679
	init_waitqueue_head(&conf->wait_resume);
1687
	init_waitqueue_head(&conf->wait_resume);
1680
1688
1681
	if (!conf->working_disks) {
1689
	/* need to check that every block has at least one working mirror */
1682
		printk(KERN_ERR "raid10: no operational mirrors for %s\n",
1690
	first = 0;
1683
			mdname(mddev));
1691
	do {
1684
		goto out_free_conf;
1692
		int n = conf->copies;
1685
	}
1693
		int cnt = 0;
1694
		while (n--) {
1695
			if (conf->mirrors[first].rdev)
1696
				cnt++;
1697
			first = (first+1) % conf->raid_disks;
1698
		}
1699
		if (cnt == 0) {
1700
			printk(KERN_ERR "raid10: not enough operational mirrors for %s\n",
1701
			       mdname(mddev));
1702
			goto out_free_conf;
1703
		}
1704
	} while (first != 0);
1686
1705
1687
	mddev->degraded = 0;
1706
	mddev->degraded = 0;
1688
	for (i = 0; i < conf->raid_disks; i++) {
1707
	for (i = 0; i < conf->raid_disks; i++) {

Return to bug 5181