From: Jens Axboe Arjan (sensibly) put a might_sleep() in mempool_alloc() and it caught a bad cfq usage. Signed-off-by: Jens Axboe Signed-off-by: Andrew Morton --- 25-akpm/drivers/block/cfq-iosched.c | 52 ++++++++++++++++++++++++------------ 1 files changed, 35 insertions(+), 17 deletions(-) diff -puN drivers/block/cfq-iosched.c~cfq-bad-allocation drivers/block/cfq-iosched.c --- 25/drivers/block/cfq-iosched.c~cfq-bad-allocation 2004-07-07 10:45:16.329181792 -0700 +++ 25-akpm/drivers/block/cfq-iosched.c 2004-07-07 10:45:16.343179664 -0700 @@ -456,21 +456,23 @@ static void cfq_put_queue(struct cfq_dat mempool_free(cfqq, cfq_mpool); } -static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, int pid) +static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, int pid, int mask) { const int hashval = hash_long(current->tgid, CFQ_QHASH_SHIFT); struct cfq_queue *cfqq = __cfq_find_cfq_hash(cfqd, pid, hashval); if (!cfqq) { - cfqq = mempool_alloc(cfq_mpool, GFP_NOIO); + cfqq = mempool_alloc(cfq_mpool, mask); - INIT_LIST_HEAD(&cfqq->cfq_hash); - INIT_LIST_HEAD(&cfqq->cfq_list); - RB_CLEAR_ROOT(&cfqq->sort_list); - - cfqq->pid = pid; - cfqq->queued[0] = cfqq->queued[1] = 0; - list_add(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); + if (cfqq) { + INIT_LIST_HEAD(&cfqq->cfq_hash); + INIT_LIST_HEAD(&cfqq->cfq_list); + RB_CLEAR_ROOT(&cfqq->sort_list); + + cfqq->pid = pid; + cfqq->queued[0] = cfqq->queued[1] = 0; + list_add(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); + } } return cfqq; @@ -478,15 +480,22 @@ static struct cfq_queue *cfq_get_queue(s static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq) { - struct cfq_queue *cfqq; - - cfqq = cfq_get_queue(cfqd, current->tgid); + struct cfq_queue *cfqq = cfq_get_queue(cfqd, current->tgid, GFP_ATOMIC); - cfq_add_crq_rb(cfqd, cfqq, crq); + if (cfqq) { + cfq_add_crq_rb(cfqd, cfqq, crq); - if (list_empty(&cfqq->cfq_list)) { - list_add(&cfqq->cfq_list, &cfqd->rr_list); - cfqd->busy_queues++; + if (list_empty(&cfqq->cfq_list)) { + list_add(&cfqq->cfq_list, &cfqd->rr_list); + cfqd->busy_queues++; + } + } else { + /* + * should can only happen if the request wasn't allocated + * through blk_alloc_request(), eg stack requests from ide-cd + * (those should be removed) _and_ we are in OOM. + */ + list_add_tail(&crq->request->queuelist, cfqd->dispatch); } } @@ -617,8 +626,17 @@ static void cfq_put_request(request_queu static int cfq_set_request(request_queue_t *q, struct request *rq, int gfp_mask) { struct cfq_data *cfqd = q->elevator.elevator_data; - struct cfq_rq *crq = mempool_alloc(cfqd->crq_pool, gfp_mask); + struct cfq_queue *cfqq; + struct cfq_rq *crq; + + /* + * prepare a queue up front, so cfq_enqueue() doesn't have to + */ + cfqq = cfq_get_queue(cfqd, current->tgid, gfp_mask); + if (!cfqq) + return 1; + crq = mempool_alloc(cfqd->crq_pool, gfp_mask); if (crq) { RB_CLEAR(&crq->rb_node); crq->request = rq; _