1 diff -uprN linux-2.6.28.orig/block/bfq-iosched.c linux-2.6.28.new/block/bfq-iosched.c
2 --- linux-2.6.28.orig/block/bfq-iosched.c 2011-06-10 22:28:46.801129048 +0200
3 +++ linux-2.6.28.new/block/bfq-iosched.c 2011-06-10 22:29:04.551353847 +0200
4 @@ -1091,8 +1091,29 @@ static struct bfq_queue *bfq_select_queu
5 bfq_bfqq_budget_left(bfqq)) {
6 reason = BFQ_BFQQ_BUDGET_EXHAUSTED;
11 + * The idle timer may be pending because we may not
12 + * disable disk idling even when a new request arrives
14 + if (timer_pending(&bfqd->idle_slice_timer)) {
16 + * If we get here: 1) at least a new request
17 + * has arrived but we have not disabled the
18 + * timer because the request was too small,
19 + * 2) then the block layer has unplugged the
20 + * device, causing the dispatch to be invoked.
22 + * Since the device is unplugged, now the
23 + * requests are probably large enough to
24 + * provide a reasonable throughput.
25 + * So we disable idling.
27 + bfq_clear_bfqq_wait_request(bfqq);
28 + del_timer(&bfqd->idle_slice_timer);
35 @@ -1659,6 +1680,25 @@ static void bfq_rq_enqueued(struct bfq_d
36 bfqq->last_request_pos = rq->sector + rq->nr_sectors;
38 if (bfqq == bfqd->active_queue) {
40 + * If there is just this request queued and the request
41 + * is small, just make sure the queue is plugged and exit.
42 + * In this way, if the disk is being idled to wait for a new
43 + * request from the active queue, we avoid unplugging the
44 + * device for this request.
46 + * By doing so, we spare the disk to be committed
47 + * to serve just a small request. On the contrary, we wait for
48 + * the block layer to decide when to unplug the device:
49 + * hopefully, new requests will be merged to this
50 + * one quickly, then the device will be unplugged
51 + * and larger requests will be dispatched.
53 + if (bfqq->queued[rq_is_sync(rq)] == 1 &&
54 + rq->nr_sectors < 32) {
55 + blk_plug_device(bfqd->queue);
58 if (bfq_bfqq_wait_request(bfqq)) {
60 * If we are waiting for a request for this queue, let
61 @@ -1939,7 +1979,6 @@ static void bfq_idle_slice_timer(unsigne
64 bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
65 - reason = BFQ_BFQQ_TOO_IDLE;
66 if (bfq_bfqq_budget_timeout(bfqq))
68 * Also here the queue can be safely expired
69 @@ -1947,10 +1986,21 @@ static void bfq_idle_slice_timer(unsigne
72 reason = BFQ_BFQQ_BUDGET_TIMEOUT;
73 + else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
75 + * The queue may not be empty upon timer expiration,
76 + * because we may not disable the timer when the first
77 + * request of the active queue arrives during
80 + reason = BFQ_BFQQ_TOO_IDLE;
82 + goto schedule_dispatch;
84 bfq_bfqq_expire(bfqd, bfqq, 1, reason);
88 bfq_schedule_dispatch(bfqd);
90 spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);