2005-04-17 02:20:36 +04:00
/*
2007-07-11 22:04:50 +04:00
* linux / drivers / mmc / card / queue . c
2005-04-17 02:20:36 +04:00
*
* Copyright ( C ) 2003 Russell King , All Rights Reserved .
2006-12-23 22:03:02 +03:00
* Copyright 2006 - 2007 Pierre Ossman
2005-04-17 02:20:36 +04:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
*/
# include <linux/module.h>
# include <linux/blkdev.h>
2007-07-17 15:03:35 +04:00
# include <linux/freezer.h>
2006-11-13 22:23:52 +03:00
# include <linux/kthread.h>
2005-04-17 02:20:36 +04:00
# include <linux/mmc/card.h>
# include <linux/mmc/host.h>
2006-12-23 22:03:02 +03:00
# include "queue.h"
2005-04-17 02:20:36 +04:00
2007-05-12 02:26:16 +04:00
# define MMC_QUEUE_BOUNCESZ 65536
2006-11-13 22:23:52 +03:00
# define MMC_QUEUE_SUSPENDED (1 << 0)
2005-04-17 02:20:36 +04:00
/*
2007-05-16 19:29:21 +04:00
* Prepare a MMC request . This just filters out odd stuff .
2005-04-17 02:20:36 +04:00
*/
static int mmc_prep_request ( struct request_queue * q , struct request * req )
{
2007-05-16 19:29:21 +04:00
/*
* We only like normal block requests .
*/
if ( ! blk_fs_request ( req ) & & ! blk_pc_request ( req ) ) {
2005-04-17 02:20:36 +04:00
blk_dump_rq_flags ( req , " MMC bad request " ) ;
2007-05-16 19:29:21 +04:00
return BLKPREP_KILL ;
2005-04-17 02:20:36 +04:00
}
2007-05-16 19:29:21 +04:00
req - > cmd_flags | = REQ_DONTPREP ;
2005-04-17 02:20:36 +04:00
2007-05-16 19:29:21 +04:00
return BLKPREP_OK ;
2005-04-17 02:20:36 +04:00
}
static int mmc_queue_thread ( void * d )
{
struct mmc_queue * mq = d ;
struct request_queue * q = mq - > queue ;
2007-07-17 15:03:35 +04:00
current - > flags | = PF_MEMALLOC ;
2005-04-17 02:20:36 +04:00
down ( & mq - > thread_sem ) ;
do {
struct request * req = NULL ;
spin_lock_irq ( q - > queue_lock ) ;
set_current_state ( TASK_INTERRUPTIBLE ) ;
if ( ! blk_queue_plugged ( q ) )
2006-08-06 12:58:22 +04:00
req = elv_next_request ( q ) ;
mq - > req = req ;
2005-04-17 02:20:36 +04:00
spin_unlock_irq ( q - > queue_lock ) ;
if ( ! req ) {
2006-12-07 22:08:02 +03:00
if ( kthread_should_stop ( ) ) {
set_current_state ( TASK_RUNNING ) ;
2005-04-17 02:20:36 +04:00
break ;
2006-12-07 22:08:02 +03:00
}
2005-04-17 02:20:36 +04:00
up ( & mq - > thread_sem ) ;
schedule ( ) ;
down ( & mq - > thread_sem ) ;
continue ;
}
set_current_state ( TASK_RUNNING ) ;
mq - > issue_fn ( mq , req ) ;
} while ( 1 ) ;
up ( & mq - > thread_sem ) ;
return 0 ;
}
/*
* Generic MMC request handler . This is called for any queue on a
* particular host . When the host is not busy , we look for a request
* on any queue on this host , and attempt to issue it . This may
* not be the queue we were asked to process .
*/
2007-07-24 11:28:11 +04:00
static void mmc_request ( struct request_queue * q )
2005-04-17 02:20:36 +04:00
{
struct mmc_queue * mq = q - > queuedata ;
2006-11-15 00:08:16 +03:00
struct request * req ;
int ret ;
if ( ! mq ) {
printk ( KERN_ERR " MMC: killing requests for dead queue \n " ) ;
while ( ( req = elv_next_request ( q ) ) ! = NULL ) {
do {
ret = end_that_request_chunk ( req , 0 ,
req - > current_nr_sectors < < 9 ) ;
} while ( ret ) ;
}
return ;
}
2005-04-17 02:20:36 +04:00
if ( ! mq - > req )
2006-11-13 22:23:52 +03:00
wake_up_process ( mq - > thread ) ;
2005-04-17 02:20:36 +04:00
}
/**
* mmc_init_queue - initialise a queue structure .
* @ mq : mmc queue
* @ card : mmc card to attach this queue
* @ lock : queue lock
*
* Initialise a MMC card request queue .
*/
int mmc_init_queue ( struct mmc_queue * mq , struct mmc_card * card , spinlock_t * lock )
{
struct mmc_host * host = card - > host ;
u64 limit = BLK_BOUNCE_HIGH ;
int ret ;
2007-05-12 02:26:16 +04:00
unsigned int bouncesz ;
2005-04-17 02:20:36 +04:00
2006-09-12 19:00:10 +04:00
if ( mmc_dev ( host ) - > dma_mask & & * mmc_dev ( host ) - > dma_mask )
limit = * mmc_dev ( host ) - > dma_mask ;
2005-04-17 02:20:36 +04:00
mq - > card = card ;
mq - > queue = blk_init_queue ( mmc_request , lock ) ;
if ( ! mq - > queue )
return - ENOMEM ;
mq - > queue - > queuedata = mq ;
mq - > req = NULL ;
2007-05-12 02:26:16 +04:00
blk_queue_prep_rq ( mq - > queue , mmc_prep_request ) ;
# ifdef CONFIG_MMC_BLOCK_BOUNCE
if ( host - > max_hw_segs = = 1 ) {
bouncesz = MMC_QUEUE_BOUNCESZ ;
if ( bouncesz > host - > max_req_size )
bouncesz = host - > max_req_size ;
if ( bouncesz > host - > max_seg_size )
bouncesz = host - > max_seg_size ;
mq - > bounce_buf = kmalloc ( bouncesz , GFP_KERNEL ) ;
if ( ! mq - > bounce_buf ) {
printk ( KERN_WARNING " %s: unable to allocate "
" bounce buffer \n " , mmc_card_name ( card ) ) ;
} else {
blk_queue_bounce_limit ( mq - > queue , BLK_BOUNCE_HIGH ) ;
blk_queue_max_sectors ( mq - > queue , bouncesz / 512 ) ;
blk_queue_max_phys_segments ( mq - > queue , bouncesz / 512 ) ;
blk_queue_max_hw_segments ( mq - > queue , bouncesz / 512 ) ;
blk_queue_max_segment_size ( mq - > queue , bouncesz ) ;
mq - > sg = kmalloc ( sizeof ( struct scatterlist ) ,
GFP_KERNEL ) ;
if ( ! mq - > sg ) {
ret = - ENOMEM ;
goto free_bounce_buf ;
}
mq - > bounce_sg = kmalloc ( sizeof ( struct scatterlist ) *
bouncesz / 512 , GFP_KERNEL ) ;
if ( ! mq - > bounce_sg ) {
ret = - ENOMEM ;
goto free_sg ;
}
}
}
# endif
if ( ! mq - > bounce_buf ) {
blk_queue_bounce_limit ( mq - > queue , limit ) ;
blk_queue_max_sectors ( mq - > queue , host - > max_req_size / 512 ) ;
blk_queue_max_phys_segments ( mq - > queue , host - > max_phys_segs ) ;
blk_queue_max_hw_segments ( mq - > queue , host - > max_hw_segs ) ;
blk_queue_max_segment_size ( mq - > queue , host - > max_seg_size ) ;
mq - > sg = kmalloc ( sizeof ( struct scatterlist ) *
host - > max_phys_segs , GFP_KERNEL ) ;
if ( ! mq - > sg ) {
ret = - ENOMEM ;
goto cleanup_queue ;
}
2005-04-17 02:20:36 +04:00
}
init_MUTEX ( & mq - > thread_sem ) ;
2006-11-13 22:23:52 +03:00
mq - > thread = kthread_run ( mmc_queue_thread , mq , " mmcqd " ) ;
if ( IS_ERR ( mq - > thread ) ) {
ret = PTR_ERR ( mq - > thread ) ;
2007-05-12 02:26:16 +04:00
goto free_bounce_sg ;
2005-04-17 02:20:36 +04:00
}
2006-11-13 22:23:52 +03:00
return 0 ;
2007-05-12 02:26:16 +04:00
free_bounce_sg :
if ( mq - > bounce_sg )
kfree ( mq - > bounce_sg ) ;
mq - > bounce_sg = NULL ;
2006-11-13 22:23:52 +03:00
free_sg :
2005-04-17 02:20:36 +04:00
kfree ( mq - > sg ) ;
mq - > sg = NULL ;
2007-05-12 02:26:16 +04:00
free_bounce_buf :
if ( mq - > bounce_buf )
kfree ( mq - > bounce_buf ) ;
mq - > bounce_buf = NULL ;
2006-11-13 22:23:52 +03:00
cleanup_queue :
2005-04-17 02:20:36 +04:00
blk_cleanup_queue ( mq - > queue ) ;
return ret ;
}
void mmc_cleanup_queue ( struct mmc_queue * mq )
{
2007-07-24 11:28:11 +04:00
struct request_queue * q = mq - > queue ;
2006-11-15 00:08:16 +03:00
unsigned long flags ;
/* Mark that we should start throwing out stragglers */
spin_lock_irqsave ( q - > queue_lock , flags ) ;
q - > queuedata = NULL ;
spin_unlock_irqrestore ( q - > queue_lock , flags ) ;
2007-04-28 18:52:12 +04:00
/* Make sure the queue isn't suspended, as that will deadlock */
mmc_queue_resume ( mq ) ;
2006-11-15 00:08:16 +03:00
/* Then terminate our worker thread */
2006-11-13 22:23:52 +03:00
kthread_stop ( mq - > thread ) ;
2005-04-17 02:20:36 +04:00
2007-05-12 02:26:16 +04:00
if ( mq - > bounce_sg )
kfree ( mq - > bounce_sg ) ;
mq - > bounce_sg = NULL ;
2005-04-17 02:20:36 +04:00
kfree ( mq - > sg ) ;
mq - > sg = NULL ;
2007-05-12 02:26:16 +04:00
if ( mq - > bounce_buf )
kfree ( mq - > bounce_buf ) ;
mq - > bounce_buf = NULL ;
2005-04-17 02:20:36 +04:00
blk_cleanup_queue ( mq - > queue ) ;
mq - > card = NULL ;
}
EXPORT_SYMBOL ( mmc_cleanup_queue ) ;
/**
* mmc_queue_suspend - suspend a MMC request queue
* @ mq : MMC queue to suspend
*
* Stop the block request queue , and wait for our thread to
* complete any outstanding requests . This ensures that we
* won ' t suspend while a request is being processed .
*/
void mmc_queue_suspend ( struct mmc_queue * mq )
{
2007-07-24 11:28:11 +04:00
struct request_queue * q = mq - > queue ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
if ( ! ( mq - > flags & MMC_QUEUE_SUSPENDED ) ) {
mq - > flags | = MMC_QUEUE_SUSPENDED ;
spin_lock_irqsave ( q - > queue_lock , flags ) ;
blk_stop_queue ( q ) ;
spin_unlock_irqrestore ( q - > queue_lock , flags ) ;
down ( & mq - > thread_sem ) ;
}
}
/**
* mmc_queue_resume - resume a previously suspended MMC request queue
* @ mq : MMC queue to resume
*/
void mmc_queue_resume ( struct mmc_queue * mq )
{
2007-07-24 11:28:11 +04:00
struct request_queue * q = mq - > queue ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
if ( mq - > flags & MMC_QUEUE_SUSPENDED ) {
mq - > flags & = ~ MMC_QUEUE_SUSPENDED ;
up ( & mq - > thread_sem ) ;
spin_lock_irqsave ( q - > queue_lock , flags ) ;
blk_start_queue ( q ) ;
spin_unlock_irqrestore ( q - > queue_lock , flags ) ;
}
}
2006-12-23 22:03:02 +03:00
2007-05-12 02:26:16 +04:00
static void copy_sg ( struct scatterlist * dst , unsigned int dst_len ,
struct scatterlist * src , unsigned int src_len )
{
unsigned int chunk ;
char * dst_buf , * src_buf ;
unsigned int dst_size , src_size ;
dst_buf = NULL ;
src_buf = NULL ;
dst_size = 0 ;
src_size = 0 ;
while ( src_len ) {
BUG_ON ( dst_len = = 0 ) ;
if ( dst_size = = 0 ) {
dst_buf = page_address ( dst - > page ) + dst - > offset ;
dst_size = dst - > length ;
}
if ( src_size = = 0 ) {
src_buf = page_address ( src - > page ) + src - > offset ;
src_size = src - > length ;
}
chunk = min ( dst_size , src_size ) ;
memcpy ( dst_buf , src_buf , chunk ) ;
dst_buf + = chunk ;
src_buf + = chunk ;
dst_size - = chunk ;
src_size - = chunk ;
if ( dst_size = = 0 ) {
dst + + ;
dst_len - - ;
}
if ( src_size = = 0 ) {
src + + ;
src_len - - ;
}
}
}
unsigned int mmc_queue_map_sg ( struct mmc_queue * mq )
{
unsigned int sg_len ;
if ( ! mq - > bounce_buf )
return blk_rq_map_sg ( mq - > queue , mq - > req , mq - > sg ) ;
BUG_ON ( ! mq - > bounce_sg ) ;
sg_len = blk_rq_map_sg ( mq - > queue , mq - > req , mq - > bounce_sg ) ;
mq - > bounce_sg_len = sg_len ;
/*
* Shortcut in the event we only get a single entry .
*/
if ( sg_len = = 1 ) {
memcpy ( mq - > sg , mq - > bounce_sg , sizeof ( struct scatterlist ) ) ;
return 1 ;
}
mq - > sg [ 0 ] . page = virt_to_page ( mq - > bounce_buf ) ;
mq - > sg [ 0 ] . offset = offset_in_page ( mq - > bounce_buf ) ;
mq - > sg [ 0 ] . length = 0 ;
while ( sg_len ) {
mq - > sg [ 0 ] . length + = mq - > bounce_sg [ sg_len - 1 ] . length ;
sg_len - - ;
}
return 1 ;
}
void mmc_queue_bounce_pre ( struct mmc_queue * mq )
{
if ( ! mq - > bounce_buf )
return ;
if ( mq - > bounce_sg_len = = 1 )
return ;
if ( rq_data_dir ( mq - > req ) ! = WRITE )
return ;
copy_sg ( mq - > sg , 1 , mq - > bounce_sg , mq - > bounce_sg_len ) ;
}
void mmc_queue_bounce_post ( struct mmc_queue * mq )
{
if ( ! mq - > bounce_buf )
return ;
if ( mq - > bounce_sg_len = = 1 )
return ;
if ( rq_data_dir ( mq - > req ) ! = READ )
return ;
copy_sg ( mq - > bounce_sg , mq - > bounce_sg_len , mq - > sg , 1 ) ;
}