2005-04-17 02:20:36 +04:00
/*
2007-07-11 22:04:50 +04:00
* linux / drivers / mmc / card / queue . c
2005-04-17 02:20:36 +04:00
*
* Copyright ( C ) 2003 Russell King , All Rights Reserved .
2006-12-23 22:03:02 +03:00
* Copyright 2006 - 2007 Pierre Ossman
2005-04-17 02:20:36 +04:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
*/
# include <linux/module.h>
# include <linux/blkdev.h>
2007-07-17 15:03:35 +04:00
# include <linux/freezer.h>
2006-11-13 22:23:52 +03:00
# include <linux/kthread.h>
2007-10-22 23:19:53 +04:00
# include <linux/scatterlist.h>
2005-04-17 02:20:36 +04:00
# include <linux/mmc/card.h>
# include <linux/mmc/host.h>
2006-12-23 22:03:02 +03:00
# include "queue.h"
2005-04-17 02:20:36 +04:00
2007-05-12 02:26:16 +04:00
# define MMC_QUEUE_BOUNCESZ 65536
2006-11-13 22:23:52 +03:00
# define MMC_QUEUE_SUSPENDED (1 << 0)
2005-04-17 02:20:36 +04:00
/*
2007-05-16 19:29:21 +04:00
* Prepare a MMC request . This just filters out odd stuff .
2005-04-17 02:20:36 +04:00
*/
static int mmc_prep_request ( struct request_queue * q , struct request * req )
{
2007-05-16 19:29:21 +04:00
/*
* We only like normal block requests .
*/
2008-08-16 22:43:48 +04:00
if ( ! blk_fs_request ( req ) ) {
2005-04-17 02:20:36 +04:00
blk_dump_rq_flags ( req , " MMC bad request " ) ;
2007-05-16 19:29:21 +04:00
return BLKPREP_KILL ;
2005-04-17 02:20:36 +04:00
}
2007-05-16 19:29:21 +04:00
req - > cmd_flags | = REQ_DONTPREP ;
2005-04-17 02:20:36 +04:00
2007-05-16 19:29:21 +04:00
return BLKPREP_OK ;
2005-04-17 02:20:36 +04:00
}
static int mmc_queue_thread ( void * d )
{
struct mmc_queue * mq = d ;
struct request_queue * q = mq - > queue ;
2007-07-17 15:03:35 +04:00
current - > flags | = PF_MEMALLOC ;
2005-04-17 02:20:36 +04:00
down ( & mq - > thread_sem ) ;
do {
struct request * req = NULL ;
spin_lock_irq ( q - > queue_lock ) ;
set_current_state ( TASK_INTERRUPTIBLE ) ;
2009-05-08 06:54:16 +04:00
if ( ! blk_queue_plugged ( q ) )
req = blk_fetch_request ( q ) ;
2006-08-06 12:58:22 +04:00
mq - > req = req ;
2005-04-17 02:20:36 +04:00
spin_unlock_irq ( q - > queue_lock ) ;
if ( ! req ) {
2006-12-07 22:08:02 +03:00
if ( kthread_should_stop ( ) ) {
set_current_state ( TASK_RUNNING ) ;
2005-04-17 02:20:36 +04:00
break ;
2006-12-07 22:08:02 +03:00
}
2005-04-17 02:20:36 +04:00
up ( & mq - > thread_sem ) ;
schedule ( ) ;
down ( & mq - > thread_sem ) ;
continue ;
}
set_current_state ( TASK_RUNNING ) ;
mq - > issue_fn ( mq , req ) ;
} while ( 1 ) ;
up ( & mq - > thread_sem ) ;
return 0 ;
}
/*
* Generic MMC request handler . This is called for any queue on a
* particular host . When the host is not busy , we look for a request
* on any queue on this host , and attempt to issue it . This may
* not be the queue we were asked to process .
*/
2007-07-24 11:28:11 +04:00
static void mmc_request ( struct request_queue * q )
2005-04-17 02:20:36 +04:00
{
struct mmc_queue * mq = q - > queuedata ;
2006-11-15 00:08:16 +03:00
struct request * req ;
if ( ! mq ) {
2010-01-09 01:43:00 +03:00
while ( ( req = blk_fetch_request ( q ) ) ! = NULL ) {
req - > cmd_flags | = REQ_QUIET ;
2009-05-08 06:54:15 +04:00
__blk_end_request_all ( req , - EIO ) ;
2010-01-09 01:43:00 +03:00
}
2006-11-15 00:08:16 +03:00
return ;
}
2005-04-17 02:20:36 +04:00
if ( ! mq - > req )
2006-11-13 22:23:52 +03:00
wake_up_process ( mq - > thread ) ;
2005-04-17 02:20:36 +04:00
}
/**
* mmc_init_queue - initialise a queue structure .
* @ mq : mmc queue
* @ card : mmc card to attach this queue
* @ lock : queue lock
*
* Initialise a MMC card request queue .
*/
int mmc_init_queue ( struct mmc_queue * mq , struct mmc_card * card , spinlock_t * lock )
{
struct mmc_host * host = card - > host ;
u64 limit = BLK_BOUNCE_HIGH ;
int ret ;
2006-09-12 19:00:10 +04:00
if ( mmc_dev ( host ) - > dma_mask & & * mmc_dev ( host ) - > dma_mask )
limit = * mmc_dev ( host ) - > dma_mask ;
2005-04-17 02:20:36 +04:00
mq - > card = card ;
mq - > queue = blk_init_queue ( mmc_request , lock ) ;
if ( ! mq - > queue )
return - ENOMEM ;
mq - > queue - > queuedata = mq ;
mq - > req = NULL ;
2007-05-12 02:26:16 +04:00
blk_queue_prep_rq ( mq - > queue , mmc_prep_request ) ;
2008-08-16 23:15:50 +04:00
blk_queue_ordered ( mq - > queue , QUEUE_ORDERED_DRAIN , NULL ) ;
2008-10-14 22:04:46 +04:00
queue_flag_set_unlocked ( QUEUE_FLAG_NONROT , mq - > queue ) ;
2007-05-12 02:26:16 +04:00
# ifdef CONFIG_MMC_BLOCK_BOUNCE
if ( host - > max_hw_segs = = 1 ) {
2007-08-09 16:28:02 +04:00
unsigned int bouncesz ;
2007-05-12 02:26:16 +04:00
bouncesz = MMC_QUEUE_BOUNCESZ ;
if ( bouncesz > host - > max_req_size )
bouncesz = host - > max_req_size ;
if ( bouncesz > host - > max_seg_size )
bouncesz = host - > max_seg_size ;
2008-08-16 23:34:02 +04:00
if ( bouncesz > ( host - > max_blk_count * 512 ) )
bouncesz = host - > max_blk_count * 512 ;
if ( bouncesz > 512 ) {
mq - > bounce_buf = kmalloc ( bouncesz , GFP_KERNEL ) ;
if ( ! mq - > bounce_buf ) {
printk ( KERN_WARNING " %s: unable to "
" allocate bounce buffer \n " ,
mmc_card_name ( card ) ) ;
}
}
2007-05-12 02:26:16 +04:00
2008-08-16 23:34:02 +04:00
if ( mq - > bounce_buf ) {
2008-07-22 16:35:42 +04:00
blk_queue_bounce_limit ( mq - > queue , BLK_BOUNCE_ANY ) ;
2010-02-26 08:20:38 +03:00
blk_queue_max_hw_sectors ( mq - > queue , bouncesz / 512 ) ;
2010-02-26 08:20:39 +03:00
blk_queue_max_segments ( mq - > queue , bouncesz / 512 ) ;
2007-05-12 02:26:16 +04:00
blk_queue_max_segment_size ( mq - > queue , bouncesz ) ;
2007-10-22 23:19:53 +04:00
mq - > sg = kmalloc ( sizeof ( struct scatterlist ) ,
2007-05-12 02:26:16 +04:00
GFP_KERNEL ) ;
if ( ! mq - > sg ) {
ret = - ENOMEM ;
2007-08-09 16:28:02 +04:00
goto cleanup_queue ;
2007-05-12 02:26:16 +04:00
}
2007-10-22 23:19:53 +04:00
sg_init_table ( mq - > sg , 1 ) ;
2007-05-12 02:26:16 +04:00
2007-10-22 23:19:53 +04:00
mq - > bounce_sg = kmalloc ( sizeof ( struct scatterlist ) *
2007-05-12 02:26:16 +04:00
bouncesz / 512 , GFP_KERNEL ) ;
if ( ! mq - > bounce_sg ) {
ret = - ENOMEM ;
2007-08-09 16:28:02 +04:00
goto cleanup_queue ;
2007-05-12 02:26:16 +04:00
}
2007-10-22 23:19:53 +04:00
sg_init_table ( mq - > bounce_sg , bouncesz / 512 ) ;
2007-05-12 02:26:16 +04:00
}
}
# endif
if ( ! mq - > bounce_buf ) {
blk_queue_bounce_limit ( mq - > queue , limit ) ;
2010-02-26 08:20:38 +03:00
blk_queue_max_hw_sectors ( mq - > queue ,
2008-08-16 23:34:02 +04:00
min ( host - > max_blk_count , host - > max_req_size / 512 ) ) ;
2010-02-26 08:20:39 +03:00
blk_queue_max_segments ( mq - > queue , host - > max_hw_segs ) ;
2007-05-12 02:26:16 +04:00
blk_queue_max_segment_size ( mq - > queue , host - > max_seg_size ) ;
2007-11-23 12:19:00 +03:00
mq - > sg = kmalloc ( sizeof ( struct scatterlist ) *
2007-05-12 02:26:16 +04:00
host - > max_phys_segs , GFP_KERNEL ) ;
if ( ! mq - > sg ) {
ret = - ENOMEM ;
goto cleanup_queue ;
}
2007-11-23 12:19:00 +03:00
sg_init_table ( mq - > sg , host - > max_phys_segs ) ;
2005-04-17 02:20:36 +04:00
}
init_MUTEX ( & mq - > thread_sem ) ;
2006-11-13 22:23:52 +03:00
mq - > thread = kthread_run ( mmc_queue_thread , mq , " mmcqd " ) ;
if ( IS_ERR ( mq - > thread ) ) {
ret = PTR_ERR ( mq - > thread ) ;
2007-05-12 02:26:16 +04:00
goto free_bounce_sg ;
2005-04-17 02:20:36 +04:00
}
2006-11-13 22:23:52 +03:00
return 0 ;
2007-05-12 02:26:16 +04:00
free_bounce_sg :
if ( mq - > bounce_sg )
kfree ( mq - > bounce_sg ) ;
mq - > bounce_sg = NULL ;
2007-08-09 16:28:02 +04:00
cleanup_queue :
if ( mq - > sg )
kfree ( mq - > sg ) ;
2005-04-17 02:20:36 +04:00
mq - > sg = NULL ;
2007-05-12 02:26:16 +04:00
if ( mq - > bounce_buf )
kfree ( mq - > bounce_buf ) ;
mq - > bounce_buf = NULL ;
2005-04-17 02:20:36 +04:00
blk_cleanup_queue ( mq - > queue ) ;
return ret ;
}
void mmc_cleanup_queue ( struct mmc_queue * mq )
{
2007-07-24 11:28:11 +04:00
struct request_queue * q = mq - > queue ;
2006-11-15 00:08:16 +03:00
unsigned long flags ;
2007-04-28 18:52:12 +04:00
/* Make sure the queue isn't suspended, as that will deadlock */
mmc_queue_resume ( mq ) ;
2006-11-15 00:08:16 +03:00
/* Then terminate our worker thread */
2006-11-13 22:23:52 +03:00
kthread_stop ( mq - > thread ) ;
2005-04-17 02:20:36 +04:00
2010-01-09 01:43:00 +03:00
/* Empty the queue */
spin_lock_irqsave ( q - > queue_lock , flags ) ;
q - > queuedata = NULL ;
blk_start_queue ( q ) ;
spin_unlock_irqrestore ( q - > queue_lock , flags ) ;
2007-05-12 02:26:16 +04:00
if ( mq - > bounce_sg )
kfree ( mq - > bounce_sg ) ;
mq - > bounce_sg = NULL ;
2005-04-17 02:20:36 +04:00
kfree ( mq - > sg ) ;
mq - > sg = NULL ;
2007-05-12 02:26:16 +04:00
if ( mq - > bounce_buf )
kfree ( mq - > bounce_buf ) ;
mq - > bounce_buf = NULL ;
2005-04-17 02:20:36 +04:00
mq - > card = NULL ;
}
EXPORT_SYMBOL ( mmc_cleanup_queue ) ;
/**
* mmc_queue_suspend - suspend a MMC request queue
* @ mq : MMC queue to suspend
*
* Stop the block request queue , and wait for our thread to
* complete any outstanding requests . This ensures that we
* won ' t suspend while a request is being processed .
*/
void mmc_queue_suspend ( struct mmc_queue * mq )
{
2007-07-24 11:28:11 +04:00
struct request_queue * q = mq - > queue ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
if ( ! ( mq - > flags & MMC_QUEUE_SUSPENDED ) ) {
mq - > flags | = MMC_QUEUE_SUSPENDED ;
spin_lock_irqsave ( q - > queue_lock , flags ) ;
blk_stop_queue ( q ) ;
spin_unlock_irqrestore ( q - > queue_lock , flags ) ;
down ( & mq - > thread_sem ) ;
}
}
/**
* mmc_queue_resume - resume a previously suspended MMC request queue
* @ mq : MMC queue to resume
*/
void mmc_queue_resume ( struct mmc_queue * mq )
{
2007-07-24 11:28:11 +04:00
struct request_queue * q = mq - > queue ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
if ( mq - > flags & MMC_QUEUE_SUSPENDED ) {
mq - > flags & = ~ MMC_QUEUE_SUSPENDED ;
up ( & mq - > thread_sem ) ;
spin_lock_irqsave ( q - > queue_lock , flags ) ;
blk_start_queue ( q ) ;
spin_unlock_irqrestore ( q - > queue_lock , flags ) ;
}
}
2006-12-23 22:03:02 +03:00
2008-07-22 16:35:42 +04:00
/*
* Prepare the sg list ( s ) to be handed of to the host driver
*/
2007-05-12 02:26:16 +04:00
unsigned int mmc_queue_map_sg ( struct mmc_queue * mq )
{
unsigned int sg_len ;
2008-07-22 16:35:42 +04:00
size_t buflen ;
struct scatterlist * sg ;
int i ;
2007-05-12 02:26:16 +04:00
if ( ! mq - > bounce_buf )
return blk_rq_map_sg ( mq - > queue , mq - > req , mq - > sg ) ;
BUG_ON ( ! mq - > bounce_sg ) ;
sg_len = blk_rq_map_sg ( mq - > queue , mq - > req , mq - > bounce_sg ) ;
mq - > bounce_sg_len = sg_len ;
2008-07-22 16:35:42 +04:00
buflen = 0 ;
for_each_sg ( mq - > bounce_sg , sg , sg_len , i )
buflen + = sg - > length ;
2007-05-12 02:26:16 +04:00
2008-07-22 16:35:42 +04:00
sg_init_one ( mq - > sg , mq - > bounce_buf , buflen ) ;
2007-05-12 02:26:16 +04:00
return 1 ;
}
2008-07-22 16:35:42 +04:00
/*
* If writing , bounce the data to the buffer before the request
* is sent to the host driver
*/
2007-05-12 02:26:16 +04:00
void mmc_queue_bounce_pre ( struct mmc_queue * mq )
{
2008-07-22 16:35:42 +04:00
unsigned long flags ;
2007-05-12 02:26:16 +04:00
if ( ! mq - > bounce_buf )
return ;
if ( rq_data_dir ( mq - > req ) ! = WRITE )
return ;
2008-07-22 16:35:42 +04:00
local_irq_save ( flags ) ;
sg_copy_to_buffer ( mq - > bounce_sg , mq - > bounce_sg_len ,
mq - > bounce_buf , mq - > sg [ 0 ] . length ) ;
local_irq_restore ( flags ) ;
2007-05-12 02:26:16 +04:00
}
2008-07-22 16:35:42 +04:00
/*
* If reading , bounce the data from the buffer after the request
* has been handled by the host driver
*/
2007-05-12 02:26:16 +04:00
void mmc_queue_bounce_post ( struct mmc_queue * mq )
{
2008-07-22 16:35:42 +04:00
unsigned long flags ;
2007-05-12 02:26:16 +04:00
if ( ! mq - > bounce_buf )
return ;
if ( rq_data_dir ( mq - > req ) ! = READ )
return ;
2008-07-22 16:35:42 +04:00
local_irq_save ( flags ) ;
sg_copy_from_buffer ( mq - > bounce_sg , mq - > bounce_sg_len ,
mq - > bounce_buf , mq - > sg [ 0 ] . length ) ;
local_irq_restore ( flags ) ;
2007-05-12 02:26:16 +04:00
}