Clean up patch dir; synchronize patches with kernel-power v48
[kernel-bfs] / kernel-bfs-2.6.28 / debian / patches / bfq / block-switch-from-BFQ-v2-to-BFQ-v2-r1-small-requests.patch
1 diff -uprN linux-2.6.28.orig/block/bfq-iosched.c linux-2.6.28.new/block/bfq-iosched.c
2 --- linux-2.6.28.orig/block/bfq-iosched.c       2011-06-10 22:28:46.801129048 +0200
3 +++ linux-2.6.28.new/block/bfq-iosched.c        2011-06-10 22:29:04.551353847 +0200
4 @@ -1091,8 +1091,29 @@ static struct bfq_queue *bfq_select_queu
5                         bfq_bfqq_budget_left(bfqq)) {
6                         reason = BFQ_BFQQ_BUDGET_EXHAUSTED;
7                         goto expire;
8 -               } else
9 +               } else {
10 +                       /*
11 +                        * The idle timer may be pending because we may not
12 +                        * disable disk idling even when a new request arrives
13 +                        */
14 +                       if (timer_pending(&bfqd->idle_slice_timer)) {
15 +                               /*
16 +                                * If we get here: 1) at least a new request
17 +                                * has arrived but we have not disabled the
18 +                                * timer because the request was too small,
19 +                                * 2) then the block layer has unplugged the
20 +                                * device, causing the dispatch to be invoked.
21 +                                *
22 +                                * Since the device is unplugged, now the
23 +                                * requests are probably large enough to
24 +                                * provide a reasonable throughput.
25 +                                * So we disable idling.
26 +                                */
27 +                               bfq_clear_bfqq_wait_request(bfqq);
28 +                               del_timer(&bfqd->idle_slice_timer);
29 +                       }
30                         goto keep_queue;
31 +               }
32         }
33  
34         /*
35 @@ -1659,6 +1680,25 @@ static void bfq_rq_enqueued(struct bfq_d
36         bfqq->last_request_pos = rq->sector + rq->nr_sectors;
37  
38         if (bfqq == bfqd->active_queue) {
39 +               /*
40 +                * If there is just this request queued and the request
41 +                * is small, just make sure the queue is plugged and exit.
42 +                * In this way, if the disk is being idled to wait for a new
43 +                * request from the active queue, we avoid unplugging the
44 +                * device for this request.
45 +                *
46 +                * By doing so, we spare the disk to be committed
47 +                * to serve just a small request. On the contrary, we wait for
48 +                * the block layer to decide when to unplug the device:
49 +                * hopefully, new requests will be merged to this
50 +                * one quickly, then the device will be unplugged
51 +                * and larger requests will be dispatched.
52 +                */
53 +               if (bfqq->queued[rq_is_sync(rq)] == 1 &&
54 +                   rq->nr_sectors < 32) {
55 +                       blk_plug_device(bfqd->queue);
56 +                       return;
57 +               }
58                 if (bfq_bfqq_wait_request(bfqq)) {
59                         /*
60                          * If we are waiting for a request for this queue, let
61 @@ -1939,7 +1979,6 @@ static void bfq_idle_slice_timer(unsigne
62          */
63         if (bfqq != NULL) {
64                 bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
65 -               reason = BFQ_BFQQ_TOO_IDLE;
66                 if (bfq_bfqq_budget_timeout(bfqq))
67                         /*
68                          * Also here the queue can be safely expired
69 @@ -1947,10 +1986,21 @@ static void bfq_idle_slice_timer(unsigne
70                          * guarantees
71                          */
72                         reason = BFQ_BFQQ_BUDGET_TIMEOUT;
73 +               else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
74 +                       /*
75 +                        * The queue may not be empty upon timer expiration,
76 +                        * because we may not disable the timer when the first
77 +                        * request of the active queue arrives during
78 +                        * disk idling
79 +                        */
80 +                       reason = BFQ_BFQQ_TOO_IDLE;
81 +               else
82 +                       goto schedule_dispatch;
83  
84                 bfq_bfqq_expire(bfqd, bfqq, 1, reason);
85         }
86  
87 +schedule_dispatch:
88         bfq_schedule_dispatch(bfqd);
89  
90         spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);