diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 6a062ee..8085f59 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1073,9 +1073,9 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, * expire an async queue immediately if it has used up its slice. idle * queue always expire after 1 dispatch round. */ - if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && + if ((!cfq_cfqq_sync(cfqq) && dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) || - cfq_class_idle(cfqq))) { + cfq_class_idle(cfqq)) { cfqq->slice_end = jiffies + 1; cfq_slice_expired(cfqd, 0); } @@ -1119,7 +1119,7 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd) static int cfq_dispatch_requests(struct request_queue *q, int force) { struct cfq_data *cfqd = q->elevator->elevator_data; - struct cfq_queue *cfqq; + struct cfq_queue *cfqq, *prev_cfqq; int dispatched; if (!cfqd->busy_queues) @@ -1129,28 +1129,45 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) return cfq_forced_dispatch(cfqd); dispatched = 0; + prev_cfqq = NULL; while ((cfqq = cfq_select_queue(cfqd)) != NULL) { int max_dispatch; - max_dispatch = cfqd->cfq_quantum; - if (cfq_class_idle(cfqq)) - max_dispatch = 1; - - if (cfqq->dispatched >= max_dispatch) { - if (cfqd->busy_queues > 1) - break; - if (cfqq->dispatched >= 4 * max_dispatch) - break; - } + /* + * Don't repeat dispatch from the previous queue. + */ + if (prev_cfqq == cfqq) + break; - if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) + /* + * So we have dispatched before in this round, if the + * next queue has idling enabled (must be sync), don't + * allow it service until the previous have continued. + */ + if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq)) break; + +// if (cfqq->dispatched >= max_dispatch) { +// if (cfqd->busy_queues > 1) +// break; +// if (cfqq->dispatched >= 4 * max_dispatch) +// break; +// } + +// if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) +// break; + cfq_clear_cfqq_must_dispatch(cfqq); cfq_clear_cfqq_wait_request(cfqq); del_timer(&cfqd->idle_slice_timer); + max_dispatch = cfqd->cfq_quantum; + if (cfq_class_idle(cfqq)) + max_dispatch = 1; + dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); + prev_cfqq = cfqq; } cfq_log(cfqd, "dispatched=%d", dispatched); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 2970e35..c47530d 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -66,7 +66,7 @@ static inline long sync_writeback_pages(void) /* * Start background writeback (via pdflush) at this percentage */ -int dirty_background_ratio = 5; +int dirty_background_ratio = 10; /* * free highmem will not be subtracted from the total free memory @@ -77,7 +77,7 @@ int vm_highmem_is_dirtyable; /* * The generator of dirty data starts writeback at this percentage */ -int vm_dirty_ratio = 10; +int vm_dirty_ratio = 40; /* * The interval between `kupdate'-style writebacks, in jiffies