2011-07-03 09:56:22 +04:00
/*
* This file is provided under a dual BSD / GPLv2 license . When using or
* redistributing this file , you may do so under either license .
*
* GPL LICENSE SUMMARY
*
* Copyright ( c ) 2008 - 2011 Intel Corporation . All rights reserved .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 51 Franklin St - Fifth Floor , Boston , MA 02110 - 1301 USA .
* The full GNU General Public License is included in this distribution
* in the file called LICENSE . GPL .
*
* BSD LICENSE
*
* Copyright ( c ) 2008 - 2011 Intel Corporation . All rights reserved .
* All rights reserved .
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions
* are met :
*
* * Redistributions of source code must retain the above copyright
* notice , this list of conditions and the following disclaimer .
* * Redistributions in binary form must reproduce the above copyright
* notice , this list of conditions and the following disclaimer in
* the documentation and / or other materials provided with the
* distribution .
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT
* LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL ,
* SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT
* LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE ,
* DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT
* ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*/
2011-06-08 05:50:55 +04:00
# include <linux/circ_buf.h>
2011-05-09 04:34:44 +04:00
# include <linux/device.h>
# include <scsi/sas.h>
# include "host.h"
2011-07-03 09:56:22 +04:00
# include "isci.h"
# include "port.h"
2011-03-08 20:52:49 +03:00
# include "probe_roms.h"
2011-05-09 04:34:44 +04:00
# include "remote_device.h"
# include "request.h"
# include "scu_completion_codes.h"
# include "scu_event_codes.h"
2011-05-09 08:36:46 +04:00
# include "registers.h"
2011-05-09 04:34:44 +04:00
# include "scu_remote_node_context.h"
# include "scu_task_context.h"
2011-07-03 09:56:22 +04:00
2011-05-09 04:34:44 +04:00
# define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
2011-06-02 03:00:01 +04:00
# define smu_max_ports(dcc_value) \
2011-05-09 04:34:44 +04:00
( \
( ( ( dcc_value ) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK ) \
> > SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT ) + 1 \
)
2011-06-02 03:00:01 +04:00
# define smu_max_task_contexts(dcc_value) \
2011-05-09 04:34:44 +04:00
( \
( ( ( dcc_value ) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK ) \
> > SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT ) + 1 \
)
2011-06-02 03:00:01 +04:00
# define smu_max_rncs(dcc_value) \
2011-05-09 04:34:44 +04:00
( \
( ( ( dcc_value ) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK ) \
> > SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT ) + 1 \
)
# define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
/**
*
*
* The number of milliseconds to wait while a given phy is consuming power
* before allowing another set of phys to consume power . Ultimately , this will
* be specified by OEM parameter .
*/
# define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
/**
* NORMALIZE_PUT_POINTER ( ) -
*
* This macro will normalize the completion queue put pointer so its value can
* be used as an array inde
*/
# define NORMALIZE_PUT_POINTER(x) \
( ( x ) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK )
/**
* NORMALIZE_EVENT_POINTER ( ) -
*
* This macro will normalize the completion queue event entry so its value can
* be used as an index .
*/
# define NORMALIZE_EVENT_POINTER(x) \
( \
( ( x ) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK ) \
> > SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
)
/**
* NORMALIZE_GET_POINTER ( ) -
*
* This macro will normalize the completion queue get pointer so its value can
* be used as an index into an array
*/
# define NORMALIZE_GET_POINTER(x) \
( ( x ) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK )
/**
* NORMALIZE_GET_POINTER_CYCLE_BIT ( ) -
*
* This macro will normalize the completion queue cycle pointer so it matches
* the completion queue cycle bit
*/
# define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
( ( SMU_CQGR_CYCLE_BIT & ( x ) ) < < ( 31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT ) )
/**
* COMPLETION_QUEUE_CYCLE_BIT ( ) -
*
* This macro will return the cycle bit of the completion queue entry
*/
# define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
2011-06-02 04:10:50 +04:00
/* Init the state machine and call the state entry function (if any) */
void sci_init_sm ( struct sci_base_state_machine * sm ,
const struct sci_base_state * state_table , u32 initial_state )
{
sci_state_transition_t handler ;
sm - > initial_state_id = initial_state ;
sm - > previous_state_id = initial_state ;
sm - > current_state_id = initial_state ;
sm - > state_table = state_table ;
handler = sm - > state_table [ initial_state ] . enter_state ;
if ( handler )
handler ( sm ) ;
}
/* Call the state exit fn, update the current state, call the state entry fn */
void sci_change_state ( struct sci_base_state_machine * sm , u32 next_state )
{
sci_state_transition_t handler ;
handler = sm - > state_table [ sm - > current_state_id ] . exit_state ;
if ( handler )
handler ( sm ) ;
sm - > previous_state_id = sm - > current_state_id ;
sm - > current_state_id = next_state ;
handler = sm - > state_table [ sm - > current_state_id ] . enter_state ;
if ( handler )
handler ( sm ) ;
}
2011-07-01 06:14:33 +04:00
static bool sci_controller_completion_queue_has_entries ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
2011-07-01 04:38:32 +04:00
u32 get_value = ihost - > completion_queue_get ;
2011-05-09 04:34:44 +04:00
u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK ;
if ( NORMALIZE_GET_POINTER_CYCLE_BIT ( get_value ) = =
2011-07-01 04:38:32 +04:00
COMPLETION_QUEUE_CYCLE_BIT ( ihost - > completion_queue [ get_index ] ) )
2011-05-09 04:34:44 +04:00
return true ;
return false ;
}
2011-07-01 06:14:33 +04:00
static bool sci_controller_isr ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
2012-03-02 05:06:24 +04:00
if ( sci_controller_completion_queue_has_entries ( ihost ) )
2011-05-09 04:34:44 +04:00
return true ;
2012-03-02 05:06:24 +04:00
/* we have a spurious interrupt it could be that we have already
* emptied the completion queue from a previous interrupt
* FIXME : really ! ?
*/
writel ( SMU_ISR_COMPLETION , & ihost - > smu_registers - > interrupt_status ) ;
/* There is a race in the hardware that could cause us not to be
* notified of an interrupt completion if we do not take this
* step . We will mask then unmask the interrupts so if there is
* another interrupt pending the clearing of the interrupt
* source we get the next interrupt message .
*/
spin_lock ( & ihost - > scic_lock ) ;
if ( test_bit ( IHOST_IRQ_ENABLED , & ihost - > flags ) ) {
2011-07-01 04:38:32 +04:00
writel ( 0xFF000000 , & ihost - > smu_registers - > interrupt_mask ) ;
writel ( 0 , & ihost - > smu_registers - > interrupt_mask ) ;
2011-05-09 04:34:44 +04:00
}
2012-03-02 05:06:24 +04:00
spin_unlock ( & ihost - > scic_lock ) ;
2011-05-09 04:34:44 +04:00
return false ;
}
2011-02-18 20:25:05 +03:00
irqreturn_t isci_msix_isr ( int vec , void * data )
2011-07-03 09:56:22 +04:00
{
2011-02-18 20:25:05 +03:00
struct isci_host * ihost = data ;
2011-07-01 06:14:33 +04:00
if ( sci_controller_isr ( ihost ) )
2011-02-18 20:25:07 +03:00
tasklet_schedule ( & ihost - > completion_tasklet ) ;
2011-07-03 09:56:22 +04:00
2011-02-18 20:25:05 +03:00
return IRQ_HANDLED ;
2011-07-03 09:56:22 +04:00
}
2011-07-01 06:14:33 +04:00
static bool sci_controller_error_isr ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
u32 interrupt_status ;
interrupt_status =
2011-07-01 04:38:32 +04:00
readl ( & ihost - > smu_registers - > interrupt_status ) ;
2011-05-09 04:34:44 +04:00
interrupt_status & = ( SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND ) ;
if ( interrupt_status ! = 0 ) {
/*
* There is an error interrupt pending so let it through and handle
* in the callback */
return true ;
}
/*
* There is a race in the hardware that could cause us not to be notified
* of an interrupt completion if we do not take this step . We will mask
* then unmask the error interrupts so if there was another interrupt
* pending we will be notified .
* Could we write the value of ( SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND ) ? */
2011-07-01 04:38:32 +04:00
writel ( 0xff , & ihost - > smu_registers - > interrupt_mask ) ;
writel ( 0 , & ihost - > smu_registers - > interrupt_mask ) ;
2011-05-09 04:34:44 +04:00
return false ;
}
2011-07-01 06:14:33 +04:00
static void sci_controller_task_completion ( struct isci_host * ihost , u32 ent )
2011-05-09 04:34:44 +04:00
{
2011-07-01 06:14:33 +04:00
u32 index = SCU_GET_COMPLETION_INDEX ( ent ) ;
2011-06-18 01:18:39 +04:00
struct isci_request * ireq = ihost - > reqs [ index ] ;
2011-05-09 04:34:44 +04:00
/* Make sure that we really want to process this IO request */
2011-06-18 01:18:39 +04:00
if ( test_bit ( IREQ_ACTIVE , & ireq - > flags ) & &
2011-06-28 01:57:03 +04:00
ireq - > io_tag ! = SCI_CONTROLLER_INVALID_IO_TAG & &
2011-07-01 04:38:32 +04:00
ISCI_TAG_SEQ ( ireq - > io_tag ) = = ihost - > io_request_sequence [ index ] )
2011-07-01 06:14:33 +04:00
/* Yep this is a valid io request pass it along to the
* io request handler
*/
sci_io_request_tc_completion ( ireq , ent ) ;
2011-05-09 04:34:44 +04:00
}
2011-07-01 06:14:33 +04:00
static void sci_controller_sdma_completion ( struct isci_host * ihost , u32 ent )
2011-05-09 04:34:44 +04:00
{
u32 index ;
2011-06-28 01:57:03 +04:00
struct isci_request * ireq ;
2011-07-01 03:31:37 +04:00
struct isci_remote_device * idev ;
2011-05-09 04:34:44 +04:00
2011-07-01 06:14:33 +04:00
index = SCU_GET_COMPLETION_INDEX ( ent ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 06:14:33 +04:00
switch ( scu_get_command_request_type ( ent ) ) {
2011-05-09 04:34:44 +04:00
case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC :
case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC :
2011-07-01 04:38:32 +04:00
ireq = ihost - > reqs [ index ] ;
dev_warn ( & ihost - > pdev - > dev , " %s: %x for io request %p \n " ,
2011-07-01 06:14:33 +04:00
__func__ , ent , ireq ) ;
2011-05-09 04:34:44 +04:00
/* @todo For a post TC operation we need to fail the IO
* request
*/
break ;
case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC :
case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC :
case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC :
2011-07-01 04:38:32 +04:00
idev = ihost - > device_table [ index ] ;
dev_warn ( & ihost - > pdev - > dev , " %s: %x for device %p \n " ,
2011-07-01 06:14:33 +04:00
__func__ , ent , idev ) ;
2011-05-09 04:34:44 +04:00
/* @todo For a port RNC operation we need to fail the
* device
*/
break ;
default :
2011-07-01 04:38:32 +04:00
dev_warn ( & ihost - > pdev - > dev , " %s: unknown completion type %x \n " ,
2011-07-01 06:14:33 +04:00
__func__ , ent ) ;
2011-05-09 04:34:44 +04:00
break ;
}
}
2011-07-01 06:14:33 +04:00
static void sci_controller_unsolicited_frame ( struct isci_host * ihost , u32 ent )
2011-05-09 04:34:44 +04:00
{
u32 index ;
u32 frame_index ;
struct scu_unsolicited_frame_header * frame_header ;
2011-06-29 02:05:53 +04:00
struct isci_phy * iphy ;
2011-07-01 03:31:37 +04:00
struct isci_remote_device * idev ;
2011-05-09 04:34:44 +04:00
enum sci_status result = SCI_FAILURE ;
2011-07-01 06:14:33 +04:00
frame_index = SCU_GET_FRAME_INDEX ( ent ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
frame_header = ihost - > uf_control . buffers . array [ frame_index ] . header ;
ihost - > uf_control . buffers . array [ frame_index ] . state = UNSOLICITED_FRAME_IN_USE ;
2011-05-09 04:34:44 +04:00
2011-07-01 06:14:33 +04:00
if ( SCU_GET_FRAME_ERROR ( ent ) ) {
2011-05-09 04:34:44 +04:00
/*
* / @ todo If the IAF frame or SIGNATURE FIS frame has an error will
* / this cause a problem ? We expect the phy initialization will
* / fail if there is an error in the frame . */
2011-07-01 06:14:33 +04:00
sci_controller_release_frame ( ihost , frame_index ) ;
2011-05-09 04:34:44 +04:00
return ;
}
if ( frame_header - > is_address_frame ) {
2011-07-01 06:14:33 +04:00
index = SCU_GET_PROTOCOL_ENGINE_INDEX ( ent ) ;
2011-06-29 02:05:53 +04:00
iphy = & ihost - > phys [ index ] ;
2011-07-01 06:14:33 +04:00
result = sci_phy_frame_handler ( iphy , frame_index ) ;
2011-05-09 04:34:44 +04:00
} else {
2011-07-01 06:14:33 +04:00
index = SCU_GET_COMPLETION_INDEX ( ent ) ;
2011-05-09 04:34:44 +04:00
if ( index = = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX ) {
/*
* This is a signature fis or a frame from a direct attached SATA
* device that has not yet been created . In either case forwared
* the frame to the PE and let it take care of the frame data . */
2011-07-01 06:14:33 +04:00
index = SCU_GET_PROTOCOL_ENGINE_INDEX ( ent ) ;
2011-06-29 02:05:53 +04:00
iphy = & ihost - > phys [ index ] ;
2011-07-01 06:14:33 +04:00
result = sci_phy_frame_handler ( iphy , frame_index ) ;
2011-05-09 04:34:44 +04:00
} else {
2011-07-01 04:38:32 +04:00
if ( index < ihost - > remote_node_entries )
idev = ihost - > device_table [ index ] ;
2011-05-09 04:34:44 +04:00
else
2011-07-01 03:31:37 +04:00
idev = NULL ;
2011-05-09 04:34:44 +04:00
2011-07-01 03:31:37 +04:00
if ( idev ! = NULL )
2011-07-01 06:14:33 +04:00
result = sci_remote_device_frame_handler ( idev , frame_index ) ;
2011-05-09 04:34:44 +04:00
else
2011-07-01 06:14:33 +04:00
sci_controller_release_frame ( ihost , frame_index ) ;
2011-05-09 04:34:44 +04:00
}
}
if ( result ! = SCI_SUCCESS ) {
/*
* / @ todo Is there any reason to report some additional error message
* / when we get this failure notifiction ? */
}
}
2011-07-01 06:14:33 +04:00
static void sci_controller_event_completion ( struct isci_host * ihost , u32 ent )
2011-05-09 04:34:44 +04:00
{
2011-07-01 03:31:37 +04:00
struct isci_remote_device * idev ;
2011-06-28 01:57:03 +04:00
struct isci_request * ireq ;
2011-06-29 02:05:53 +04:00
struct isci_phy * iphy ;
2011-05-09 04:34:44 +04:00
u32 index ;
2011-07-01 06:14:33 +04:00
index = SCU_GET_COMPLETION_INDEX ( ent ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 06:14:33 +04:00
switch ( scu_get_event_type ( ent ) ) {
2011-05-09 04:34:44 +04:00
case SCU_EVENT_TYPE_SMU_COMMAND_ERROR :
/* / @todo The driver did something wrong and we need to fix the condtion. */
2011-07-01 04:38:32 +04:00
dev_err ( & ihost - > pdev - > dev ,
2011-05-09 04:34:44 +04:00
" %s: SCIC Controller 0x%p received SMU command error "
" 0x%x \n " ,
__func__ ,
2011-07-01 04:38:32 +04:00
ihost ,
2011-07-01 06:14:33 +04:00
ent ) ;
2011-05-09 04:34:44 +04:00
break ;
case SCU_EVENT_TYPE_SMU_PCQ_ERROR :
case SCU_EVENT_TYPE_SMU_ERROR :
case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR :
/*
* / @ todo This is a hardware failure and its likely that we want to
* / reset the controller . */
2011-07-01 04:38:32 +04:00
dev_err ( & ihost - > pdev - > dev ,
2011-05-09 04:34:44 +04:00
" %s: SCIC Controller 0x%p received fatal controller "
" event 0x%x \n " ,
__func__ ,
2011-07-01 04:38:32 +04:00
ihost ,
2011-07-01 06:14:33 +04:00
ent ) ;
2011-05-09 04:34:44 +04:00
break ;
case SCU_EVENT_TYPE_TRANSPORT_ERROR :
2011-06-28 01:57:03 +04:00
ireq = ihost - > reqs [ index ] ;
2011-07-01 06:14:33 +04:00
sci_io_request_event_handler ( ireq , ent ) ;
2011-05-09 04:34:44 +04:00
break ;
case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT :
2011-07-01 06:14:33 +04:00
switch ( scu_get_event_specifier ( ent ) ) {
2011-05-09 04:34:44 +04:00
case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE :
case SCU_EVENT_SPECIFIC_TASK_TIMEOUT :
2011-06-28 01:57:03 +04:00
ireq = ihost - > reqs [ index ] ;
if ( ireq ! = NULL )
2011-07-01 06:14:33 +04:00
sci_io_request_event_handler ( ireq , ent ) ;
2011-05-09 04:34:44 +04:00
else
2011-07-01 04:38:32 +04:00
dev_warn ( & ihost - > pdev - > dev ,
2011-05-09 04:34:44 +04:00
" %s: SCIC Controller 0x%p received "
" event 0x%x for io request object "
" that doesnt exist. \n " ,
__func__ ,
2011-07-01 04:38:32 +04:00
ihost ,
2011-07-01 06:14:33 +04:00
ent ) ;
2011-05-09 04:34:44 +04:00
break ;
case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT :
2011-07-01 04:38:32 +04:00
idev = ihost - > device_table [ index ] ;
2011-07-01 03:31:37 +04:00
if ( idev ! = NULL )
2011-07-01 06:14:33 +04:00
sci_remote_device_event_handler ( idev , ent ) ;
2011-05-09 04:34:44 +04:00
else
2011-07-01 04:38:32 +04:00
dev_warn ( & ihost - > pdev - > dev ,
2011-05-09 04:34:44 +04:00
" %s: SCIC Controller 0x%p received "
" event 0x%x for remote device object "
" that doesnt exist. \n " ,
__func__ ,
2011-07-01 04:38:32 +04:00
ihost ,
2011-07-01 06:14:33 +04:00
ent ) ;
2011-05-09 04:34:44 +04:00
break ;
}
break ;
case SCU_EVENT_TYPE_BROADCAST_CHANGE :
/*
* direct the broadcast change event to the phy first and then let
* the phy redirect the broadcast change to the port object */
case SCU_EVENT_TYPE_ERR_CNT_EVENT :
/*
* direct error counter event to the phy object since that is where
* we get the event notification . This is a type 4 event . */
case SCU_EVENT_TYPE_OSSP_EVENT :
2011-07-01 06:14:33 +04:00
index = SCU_GET_PROTOCOL_ENGINE_INDEX ( ent ) ;
2011-06-29 02:05:53 +04:00
iphy = & ihost - > phys [ index ] ;
2011-07-01 06:14:33 +04:00
sci_phy_event_handler ( iphy , ent ) ;
2011-05-09 04:34:44 +04:00
break ;
case SCU_EVENT_TYPE_RNC_SUSPEND_TX :
case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX :
case SCU_EVENT_TYPE_RNC_OPS_MISC :
2011-07-01 04:38:32 +04:00
if ( index < ihost - > remote_node_entries ) {
idev = ihost - > device_table [ index ] ;
2011-05-09 04:34:44 +04:00
2011-07-01 03:31:37 +04:00
if ( idev ! = NULL )
2011-07-01 06:14:33 +04:00
sci_remote_device_event_handler ( idev , ent ) ;
2011-05-09 04:34:44 +04:00
} else
2011-07-01 04:38:32 +04:00
dev_err ( & ihost - > pdev - > dev ,
2011-05-09 04:34:44 +04:00
" %s: SCIC Controller 0x%p received event 0x%x "
" for remote device object 0x%0x that doesnt "
" exist. \n " ,
__func__ ,
2011-07-01 04:38:32 +04:00
ihost ,
2011-07-01 06:14:33 +04:00
ent ,
2011-05-09 04:34:44 +04:00
index ) ;
break ;
default :
2011-07-01 04:38:32 +04:00
dev_warn ( & ihost - > pdev - > dev ,
2011-05-09 04:34:44 +04:00
" %s: SCIC Controller received unknown event code %x \n " ,
__func__ ,
2011-07-01 06:14:33 +04:00
ent ) ;
2011-05-09 04:34:44 +04:00
break ;
}
}
2011-07-01 06:14:33 +04:00
static void sci_controller_process_completions ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
u32 completion_count = 0 ;
2011-07-01 06:14:33 +04:00
u32 ent ;
2011-05-09 04:34:44 +04:00
u32 get_index ;
u32 get_cycle ;
2011-06-10 03:04:28 +04:00
u32 event_get ;
2011-05-09 04:34:44 +04:00
u32 event_cycle ;
2011-07-01 04:38:32 +04:00
dev_dbg ( & ihost - > pdev - > dev ,
2012-04-24 19:24:16 +04:00
" %s: completion queue beginning get:0x%08x \n " ,
2011-05-09 04:34:44 +04:00
__func__ ,
2011-07-01 04:38:32 +04:00
ihost - > completion_queue_get ) ;
2011-05-09 04:34:44 +04:00
/* Get the component parts of the completion queue */
2011-07-01 04:38:32 +04:00
get_index = NORMALIZE_GET_POINTER ( ihost - > completion_queue_get ) ;
get_cycle = SMU_CQGR_CYCLE_BIT & ihost - > completion_queue_get ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
event_get = NORMALIZE_EVENT_POINTER ( ihost - > completion_queue_get ) ;
event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost - > completion_queue_get ;
2011-05-09 04:34:44 +04:00
while (
NORMALIZE_GET_POINTER_CYCLE_BIT ( get_cycle )
2011-07-01 04:38:32 +04:00
= = COMPLETION_QUEUE_CYCLE_BIT ( ihost - > completion_queue [ get_index ] )
2011-05-09 04:34:44 +04:00
) {
completion_count + + ;
2011-07-01 06:14:33 +04:00
ent = ihost - > completion_queue [ get_index ] ;
2011-06-10 03:04:28 +04:00
/* increment the get pointer and check for rollover to toggle the cycle bit */
get_cycle ^ = ( ( get_index + 1 ) & SCU_MAX_COMPLETION_QUEUE_ENTRIES ) < <
( SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT ) ;
get_index = ( get_index + 1 ) & ( SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1 ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
dev_dbg ( & ihost - > pdev - > dev ,
2011-05-09 04:34:44 +04:00
" %s: completion queue entry:0x%08x \n " ,
__func__ ,
2011-07-01 06:14:33 +04:00
ent ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 06:14:33 +04:00
switch ( SCU_GET_COMPLETION_TYPE ( ent ) ) {
2011-05-09 04:34:44 +04:00
case SCU_COMPLETION_TYPE_TASK :
2011-07-01 06:14:33 +04:00
sci_controller_task_completion ( ihost , ent ) ;
2011-05-09 04:34:44 +04:00
break ;
case SCU_COMPLETION_TYPE_SDMA :
2011-07-01 06:14:33 +04:00
sci_controller_sdma_completion ( ihost , ent ) ;
2011-05-09 04:34:44 +04:00
break ;
case SCU_COMPLETION_TYPE_UFI :
2011-07-01 06:14:33 +04:00
sci_controller_unsolicited_frame ( ihost , ent ) ;
2011-05-09 04:34:44 +04:00
break ;
case SCU_COMPLETION_TYPE_EVENT :
2011-07-30 04:17:16 +04:00
sci_controller_event_completion ( ihost , ent ) ;
break ;
2011-06-10 03:04:28 +04:00
case SCU_COMPLETION_TYPE_NOTIFY : {
event_cycle ^ = ( ( event_get + 1 ) & SCU_MAX_EVENTS ) < <
( SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT ) ;
event_get = ( event_get + 1 ) & ( SCU_MAX_EVENTS - 1 ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 06:14:33 +04:00
sci_controller_event_completion ( ihost , ent ) ;
2011-05-09 04:34:44 +04:00
break ;
2011-06-10 03:04:28 +04:00
}
2011-05-09 04:34:44 +04:00
default :
2011-07-01 04:38:32 +04:00
dev_warn ( & ihost - > pdev - > dev ,
2011-05-09 04:34:44 +04:00
" %s: SCIC Controller received unknown "
" completion type %x \n " ,
__func__ ,
2011-07-01 06:14:33 +04:00
ent ) ;
2011-05-09 04:34:44 +04:00
break ;
}
}
/* Update the get register if we completed one or more entries */
if ( completion_count > 0 ) {
2011-07-01 04:38:32 +04:00
ihost - > completion_queue_get =
2011-05-09 04:34:44 +04:00
SMU_CQGR_GEN_BIT ( ENABLE ) |
SMU_CQGR_GEN_BIT ( EVENT_ENABLE ) |
event_cycle |
2011-06-10 03:04:28 +04:00
SMU_CQGR_GEN_VAL ( EVENT_POINTER , event_get ) |
2011-05-09 04:34:44 +04:00
get_cycle |
SMU_CQGR_GEN_VAL ( POINTER , get_index ) ;
2011-07-01 04:38:32 +04:00
writel ( ihost - > completion_queue_get ,
& ihost - > smu_registers - > completion_queue_get ) ;
2011-05-09 04:34:44 +04:00
}
2011-07-01 04:38:32 +04:00
dev_dbg ( & ihost - > pdev - > dev ,
2011-05-09 04:34:44 +04:00
" %s: completion queue ending get:0x%08x \n " ,
__func__ ,
2011-07-01 04:38:32 +04:00
ihost - > completion_queue_get ) ;
2011-05-09 04:34:44 +04:00
}
2011-07-01 06:14:33 +04:00
static void sci_controller_error_handler ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
u32 interrupt_status ;
interrupt_status =
2011-07-01 04:38:32 +04:00
readl ( & ihost - > smu_registers - > interrupt_status ) ;
2011-05-09 04:34:44 +04:00
if ( ( interrupt_status & SMU_ISR_QUEUE_SUSPEND ) & &
2011-07-01 06:14:33 +04:00
sci_controller_completion_queue_has_entries ( ihost ) ) {
2011-05-09 04:34:44 +04:00
2011-07-01 06:14:33 +04:00
sci_controller_process_completions ( ihost ) ;
2011-07-01 04:38:32 +04:00
writel ( SMU_ISR_QUEUE_SUSPEND , & ihost - > smu_registers - > interrupt_status ) ;
2011-05-09 04:34:44 +04:00
} else {
2011-07-01 04:38:32 +04:00
dev_err ( & ihost - > pdev - > dev , " %s: status: %#x \n " , __func__ ,
2011-05-09 04:34:44 +04:00
interrupt_status ) ;
2011-07-01 04:38:32 +04:00
sci_change_state ( & ihost - > sm , SCIC_FAILED ) ;
2011-05-09 04:34:44 +04:00
return ;
}
/* If we dont process any completions I am not sure that we want to do this.
* We are in the middle of a hardware fault and should probably be reset .
*/
2011-07-01 04:38:32 +04:00
writel ( 0 , & ihost - > smu_registers - > interrupt_mask ) ;
2011-05-09 04:34:44 +04:00
}
2011-02-18 20:25:05 +03:00
irqreturn_t isci_intx_isr ( int vec , void * data )
2011-07-03 09:56:22 +04:00
{
irqreturn_t ret = IRQ_NONE ;
2011-04-19 23:32:51 +04:00
struct isci_host * ihost = data ;
2011-02-18 20:25:05 +03:00
2011-07-01 06:14:33 +04:00
if ( sci_controller_isr ( ihost ) ) {
2011-07-01 04:38:32 +04:00
writel ( SMU_ISR_COMPLETION , & ihost - > smu_registers - > interrupt_status ) ;
2011-04-19 23:32:51 +04:00
tasklet_schedule ( & ihost - > completion_tasklet ) ;
ret = IRQ_HANDLED ;
2011-07-01 06:14:33 +04:00
} else if ( sci_controller_error_isr ( ihost ) ) {
2011-04-19 23:32:51 +04:00
spin_lock ( & ihost - > scic_lock ) ;
2011-07-01 06:14:33 +04:00
sci_controller_error_handler ( ihost ) ;
2011-04-19 23:32:51 +04:00
spin_unlock ( & ihost - > scic_lock ) ;
ret = IRQ_HANDLED ;
2011-07-03 09:56:22 +04:00
}
2011-02-18 20:25:11 +03:00
2011-07-03 09:56:22 +04:00
return ret ;
}
2011-02-18 20:25:11 +03:00
irqreturn_t isci_error_isr ( int vec , void * data )
{
struct isci_host * ihost = data ;
2011-07-01 06:14:33 +04:00
if ( sci_controller_error_isr ( ihost ) )
sci_controller_error_handler ( ihost ) ;
2011-02-18 20:25:11 +03:00
return IRQ_HANDLED ;
}
2011-07-03 09:56:22 +04:00
/**
* isci_host_start_complete ( ) - This function is called by the core library ,
* through the ISCI Module , to indicate controller start status .
* @ isci_host : This parameter specifies the ISCI host object
* @ completion_status : This parameter specifies the completion status from the
* core library .
*
*/
2011-05-09 04:34:44 +04:00
static void isci_host_start_complete ( struct isci_host * ihost , enum sci_status completion_status )
2011-07-03 09:56:22 +04:00
{
2011-02-18 20:25:07 +03:00
if ( completion_status ! = SCI_SUCCESS )
dev_info ( & ihost - > pdev - > dev ,
" controller start timed out, continuing... \n " ) ;
clear_bit ( IHOST_START_PENDING , & ihost - > flags ) ;
wake_up ( & ihost - > eventq ) ;
2011-07-03 09:56:22 +04:00
}
2011-02-18 20:25:05 +03:00
int isci_host_scan_finished ( struct Scsi_Host * shost , unsigned long time )
2011-07-03 09:56:22 +04:00
{
2011-12-20 04:42:34 +04:00
struct sas_ha_struct * ha = SHOST_TO_SAS_HA ( shost ) ;
struct isci_host * ihost = ha - > lldd_ha ;
2011-07-03 09:56:22 +04:00
2011-02-18 20:25:09 +03:00
if ( test_bit ( IHOST_START_PENDING , & ihost - > flags ) )
2011-07-03 09:56:22 +04:00
return 0 ;
2011-12-20 04:42:34 +04:00
sas_drain_work ( ha ) ;
2011-07-03 09:56:22 +04:00
return 1 ;
}
2011-05-09 04:34:44 +04:00
/**
2011-07-01 06:14:33 +04:00
* sci_controller_get_suggested_start_timeout ( ) - This method returns the
* suggested sci_controller_start ( ) timeout amount . The user is free to
2011-05-09 04:34:44 +04:00
* use any timeout value , but this method provides the suggested minimum
* start timeout value . The returned value is based upon empirical
* information determined as a result of interoperability testing .
* @ controller : the handle to the controller object for which to return the
* suggested start timeout .
*
* This method returns the number of milliseconds for the suggested start
* operation timeout .
*/
2011-07-01 06:14:33 +04:00
static u32 sci_controller_get_suggested_start_timeout ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
/* Validate the user supplied parameters. */
2011-07-01 04:38:32 +04:00
if ( ! ihost )
2011-05-09 04:34:44 +04:00
return 0 ;
/*
* The suggested minimum timeout value for a controller start operation :
*
* Signature FIS Timeout
* + Phy Start Timeout
* + Number of Phy Spin Up Intervals
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* Number of milliseconds for the controller start operation .
*
* NOTE : The number of phy spin up intervals will be equivalent
* to the number of phys divided by the number phys allowed
* per interval - 1 ( once OEM parameters are supported ) .
* Currently we assume only 1 phy per interval . */
return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
+ SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
+ ( ( SCI_MAX_PHYS - 1 ) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL ) ;
}
2011-07-01 06:14:33 +04:00
static void sci_controller_enable_interrupts ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
2012-03-02 05:06:24 +04:00
set_bit ( IHOST_IRQ_ENABLED , & ihost - > flags ) ;
2011-07-01 04:38:32 +04:00
writel ( 0 , & ihost - > smu_registers - > interrupt_mask ) ;
2011-05-09 04:34:44 +04:00
}
2011-07-01 06:14:33 +04:00
void sci_controller_disable_interrupts ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
2012-03-02 05:06:24 +04:00
clear_bit ( IHOST_IRQ_ENABLED , & ihost - > flags ) ;
2011-07-01 04:38:32 +04:00
writel ( 0xffffffff , & ihost - > smu_registers - > interrupt_mask ) ;
2012-03-02 05:06:24 +04:00
readl ( & ihost - > smu_registers - > interrupt_mask ) ; /* flush */
2011-05-09 04:34:44 +04:00
}
2011-07-01 06:14:33 +04:00
static void sci_controller_enable_port_task_scheduler ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
u32 port_task_scheduler_value ;
port_task_scheduler_value =
2011-07-01 04:38:32 +04:00
readl ( & ihost - > scu_registers - > peg0 . ptsg . control ) ;
2011-05-09 04:34:44 +04:00
port_task_scheduler_value | =
( SCU_PTSGCR_GEN_BIT ( ETM_ENABLE ) |
SCU_PTSGCR_GEN_BIT ( PTSG_ENABLE ) ) ;
writel ( port_task_scheduler_value ,
2011-07-01 04:38:32 +04:00
& ihost - > scu_registers - > peg0 . ptsg . control ) ;
2011-05-09 04:34:44 +04:00
}
2011-07-01 06:14:33 +04:00
static void sci_controller_assign_task_entries ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
u32 task_assignment ;
/*
* Assign all the TCs to function 0
* TODO : Do we actually need to read this register to write it back ?
*/
task_assignment =
2011-07-01 04:38:32 +04:00
readl ( & ihost - > smu_registers - > task_context_assignment [ 0 ] ) ;
2011-05-09 04:34:44 +04:00
task_assignment | = ( SMU_TCA_GEN_VAL ( STARTING , 0 ) ) |
2011-07-01 04:38:32 +04:00
( SMU_TCA_GEN_VAL ( ENDING , ihost - > task_context_entries - 1 ) ) |
2011-05-09 04:34:44 +04:00
( SMU_TCA_GEN_BIT ( RANGE_CHECK_ENABLE ) ) ;
writel ( task_assignment ,
2011-07-01 04:38:32 +04:00
& ihost - > smu_registers - > task_context_assignment [ 0 ] ) ;
2011-05-09 04:34:44 +04:00
}
2011-07-01 06:14:33 +04:00
static void sci_controller_initialize_completion_queue ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
u32 index ;
u32 completion_queue_control_value ;
u32 completion_queue_get_value ;
u32 completion_queue_put_value ;
2011-07-01 04:38:32 +04:00
ihost - > completion_queue_get = 0 ;
2011-05-09 04:34:44 +04:00
2011-06-02 03:00:01 +04:00
completion_queue_control_value =
( SMU_CQC_QUEUE_LIMIT_SET ( SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1 ) |
SMU_CQC_EVENT_LIMIT_SET ( SCU_MAX_EVENTS - 1 ) ) ;
2011-05-09 04:34:44 +04:00
writel ( completion_queue_control_value ,
2011-07-01 04:38:32 +04:00
& ihost - > smu_registers - > completion_queue_control ) ;
2011-05-09 04:34:44 +04:00
/* Set the completion queue get pointer and enable the queue */
completion_queue_get_value = (
( SMU_CQGR_GEN_VAL ( POINTER , 0 ) )
| ( SMU_CQGR_GEN_VAL ( EVENT_POINTER , 0 ) )
| ( SMU_CQGR_GEN_BIT ( ENABLE ) )
| ( SMU_CQGR_GEN_BIT ( EVENT_ENABLE ) )
) ;
writel ( completion_queue_get_value ,
2011-07-01 04:38:32 +04:00
& ihost - > smu_registers - > completion_queue_get ) ;
2011-05-09 04:34:44 +04:00
/* Set the completion queue put pointer */
completion_queue_put_value = (
( SMU_CQPR_GEN_VAL ( POINTER , 0 ) )
| ( SMU_CQPR_GEN_VAL ( EVENT_POINTER , 0 ) )
) ;
writel ( completion_queue_put_value ,
2011-07-01 04:38:32 +04:00
& ihost - > smu_registers - > completion_queue_put ) ;
2011-05-09 04:34:44 +04:00
/* Initialize the cycle bit of the completion queue entries */
2011-06-02 03:00:01 +04:00
for ( index = 0 ; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES ; index + + ) {
2011-05-09 04:34:44 +04:00
/*
* If get . cycle_bit ! = completion_queue . cycle_bit
* its not a valid completion queue entry
* so at system start all entries are invalid */
2011-07-01 04:38:32 +04:00
ihost - > completion_queue [ index ] = 0x80000000 ;
2011-05-09 04:34:44 +04:00
}
}
2011-07-01 06:14:33 +04:00
static void sci_controller_initialize_unsolicited_frame_queue ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
u32 frame_queue_control_value ;
u32 frame_queue_get_value ;
u32 frame_queue_put_value ;
/* Write the queue size */
frame_queue_control_value =
2011-06-02 03:00:01 +04:00
SCU_UFQC_GEN_VAL ( QUEUE_SIZE , SCU_MAX_UNSOLICITED_FRAMES ) ;
2011-05-09 04:34:44 +04:00
writel ( frame_queue_control_value ,
2011-07-01 04:38:32 +04:00
& ihost - > scu_registers - > sdma . unsolicited_frame_queue_control ) ;
2011-05-09 04:34:44 +04:00
/* Setup the get pointer for the unsolicited frame queue */
frame_queue_get_value = (
SCU_UFQGP_GEN_VAL ( POINTER , 0 )
| SCU_UFQGP_GEN_BIT ( ENABLE_BIT )
) ;
writel ( frame_queue_get_value ,
2011-07-01 04:38:32 +04:00
& ihost - > scu_registers - > sdma . unsolicited_frame_get_pointer ) ;
2011-05-09 04:34:44 +04:00
/* Setup the put pointer for the unsolicited frame queue */
frame_queue_put_value = SCU_UFQPP_GEN_VAL ( POINTER , 0 ) ;
writel ( frame_queue_put_value ,
2011-07-01 04:38:32 +04:00
& ihost - > scu_registers - > sdma . unsolicited_frame_put_pointer ) ;
2011-05-09 04:34:44 +04:00
}
2012-02-29 13:07:56 +04:00
void sci_controller_transition_to_ready ( struct isci_host * ihost , enum sci_status status )
2011-05-09 04:34:44 +04:00
{
2011-07-01 04:38:32 +04:00
if ( ihost - > sm . current_state_id = = SCIC_STARTING ) {
2011-05-09 04:34:44 +04:00
/*
* We move into the ready state , because some of the phys / ports
* may be up and operational .
*/
2011-07-01 04:38:32 +04:00
sci_change_state ( & ihost - > sm , SCIC_READY ) ;
2011-05-09 04:34:44 +04:00
isci_host_start_complete ( ihost , status ) ;
}
}
2011-06-29 02:05:53 +04:00
static bool is_phy_starting ( struct isci_phy * iphy )
2011-05-11 03:54:23 +04:00
{
2011-07-01 06:14:33 +04:00
enum sci_phy_states state ;
2011-05-11 03:54:23 +04:00
2011-06-29 02:05:53 +04:00
state = iphy - > sm . current_state_id ;
2011-05-11 03:54:23 +04:00
switch ( state ) {
2011-06-02 04:10:43 +04:00
case SCI_PHY_STARTING :
case SCI_PHY_SUB_INITIAL :
case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN :
case SCI_PHY_SUB_AWAIT_IAF_UF :
case SCI_PHY_SUB_AWAIT_SAS_POWER :
case SCI_PHY_SUB_AWAIT_SATA_POWER :
case SCI_PHY_SUB_AWAIT_SATA_PHY_EN :
case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN :
2012-02-29 13:07:56 +04:00
case SCI_PHY_SUB_AWAIT_OSSP_EN :
2011-06-02 04:10:43 +04:00
case SCI_PHY_SUB_AWAIT_SIG_FIS_UF :
case SCI_PHY_SUB_FINAL :
2011-05-11 03:54:23 +04:00
return true ;
default :
return false ;
}
}
2012-02-29 13:07:56 +04:00
bool is_controller_start_complete ( struct isci_host * ihost )
{
int i ;
for ( i = 0 ; i < SCI_MAX_PHYS ; i + + ) {
struct isci_phy * iphy = & ihost - > phys [ i ] ;
u32 state = iphy - > sm . current_state_id ;
/* in apc mode we need to check every phy, in
* mpc mode we only need to check phys that have
* been configured into a port
*/
if ( is_port_config_apc ( ihost ) )
/* pass */ ;
else if ( ! phy_get_non_dummy_port ( iphy ) )
continue ;
/* The controller start operation is complete iff:
* - all links have been given an opportunity to start
* - have no indication of a connected device
* - have an indication of a connected device and it has
* finished the link training process .
*/
if ( ( iphy - > is_in_link_training = = false & & state = = SCI_PHY_INITIAL ) | |
( iphy - > is_in_link_training = = false & & state = = SCI_PHY_STOPPED ) | |
( iphy - > is_in_link_training = = true & & is_phy_starting ( iphy ) ) | |
( ihost - > port_agent . phy_ready_mask ! = ihost - > port_agent . phy_configured_mask ) )
return false ;
}
return true ;
}
2011-05-09 04:34:44 +04:00
/**
2011-07-01 06:14:33 +04:00
* sci_controller_start_next_phy - start phy
2011-05-09 04:34:44 +04:00
* @ scic : controller
*
* If all the phys have been started , then attempt to transition the
* controller to the READY state and inform the user
2011-07-01 06:14:33 +04:00
* ( sci_cb_controller_start_complete ( ) ) .
2011-05-09 04:34:44 +04:00
*/
2011-07-01 06:14:33 +04:00
static enum sci_status sci_controller_start_next_phy ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
2011-07-01 06:14:33 +04:00
struct sci_oem_params * oem = & ihost - > oem_parameters ;
2011-06-29 02:05:53 +04:00
struct isci_phy * iphy ;
2011-05-09 04:34:44 +04:00
enum sci_status status ;
status = SCI_SUCCESS ;
2011-07-01 04:38:32 +04:00
if ( ihost - > phy_startup_timer_pending )
2011-05-09 04:34:44 +04:00
return status ;
2011-07-01 04:38:32 +04:00
if ( ihost - > next_phy_to_start > = SCI_MAX_PHYS ) {
2012-02-29 13:07:56 +04:00
if ( is_controller_start_complete ( ihost ) ) {
2011-07-01 06:14:33 +04:00
sci_controller_transition_to_ready ( ihost , SCI_SUCCESS ) ;
2011-07-01 04:38:32 +04:00
sci_del_timer ( & ihost - > phy_timer ) ;
ihost - > phy_startup_timer_pending = false ;
2011-05-09 04:34:44 +04:00
}
} else {
2011-07-01 04:38:32 +04:00
iphy = & ihost - > phys [ ihost - > next_phy_to_start ] ;
2011-05-09 04:34:44 +04:00
if ( oem - > controller . mode_type = = SCIC_PORT_MANUAL_CONFIGURATION_MODE ) {
2011-06-29 02:05:53 +04:00
if ( phy_get_non_dummy_port ( iphy ) = = NULL ) {
2011-07-01 04:38:32 +04:00
ihost - > next_phy_to_start + + ;
2011-05-09 04:34:44 +04:00
/* Caution recursion ahead be forwarned
*
* The PHY was never added to a PORT in MPC mode
* so start the next phy in sequence This phy
* will never go link up and will not draw power
* the OEM parameters either configured the phy
* incorrectly for the PORT or it was never
* assigned to a PORT
*/
2011-07-01 06:14:33 +04:00
return sci_controller_start_next_phy ( ihost ) ;
2011-05-09 04:34:44 +04:00
}
}
2011-07-01 06:14:33 +04:00
status = sci_phy_start ( iphy ) ;
2011-05-09 04:34:44 +04:00
if ( status = = SCI_SUCCESS ) {
2011-07-01 04:38:32 +04:00
sci_mod_timer ( & ihost - > phy_timer ,
2011-05-20 07:26:02 +04:00
SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT ) ;
2011-07-01 04:38:32 +04:00
ihost - > phy_startup_timer_pending = true ;
2011-05-09 04:34:44 +04:00
} else {
2011-07-01 04:38:32 +04:00
dev_warn ( & ihost - > pdev - > dev ,
2011-05-09 04:34:44 +04:00
" %s: Controller stop operation failed "
" to stop phy %d because of status "
" %d. \n " ,
__func__ ,
2011-07-01 04:38:32 +04:00
ihost - > phys [ ihost - > next_phy_to_start ] . phy_index ,
2011-05-09 04:34:44 +04:00
status ) ;
}
2011-07-01 04:38:32 +04:00
ihost - > next_phy_to_start + + ;
2011-05-09 04:34:44 +04:00
}
return status ;
}
2011-05-20 07:26:02 +04:00
static void phy_startup_timeout ( unsigned long data )
2011-05-09 04:34:44 +04:00
{
2011-05-20 07:26:02 +04:00
struct sci_timer * tmr = ( struct sci_timer * ) data ;
2011-07-01 04:38:32 +04:00
struct isci_host * ihost = container_of ( tmr , typeof ( * ihost ) , phy_timer ) ;
2011-05-20 07:26:02 +04:00
unsigned long flags ;
2011-05-09 04:34:44 +04:00
enum sci_status status ;
2011-05-20 07:26:02 +04:00
spin_lock_irqsave ( & ihost - > scic_lock , flags ) ;
if ( tmr - > cancel )
goto done ;
2011-07-01 04:38:32 +04:00
ihost - > phy_startup_timer_pending = false ;
2011-05-20 07:26:02 +04:00
do {
2011-07-01 06:14:33 +04:00
status = sci_controller_start_next_phy ( ihost ) ;
2011-05-20 07:26:02 +04:00
} while ( status ! = SCI_SUCCESS ) ;
done :
spin_unlock_irqrestore ( & ihost - > scic_lock , flags ) ;
2011-05-09 04:34:44 +04:00
}
2011-06-08 05:50:55 +04:00
static u16 isci_tci_active ( struct isci_host * ihost )
{
return CIRC_CNT ( ihost - > tci_head , ihost - > tci_tail , SCI_MAX_IO_REQUESTS ) ;
}
2011-07-01 06:14:33 +04:00
static enum sci_status sci_controller_start ( struct isci_host * ihost ,
2011-05-09 04:34:44 +04:00
u32 timeout )
{
enum sci_status result ;
u16 index ;
2011-07-01 04:38:32 +04:00
if ( ihost - > sm . current_state_id ! = SCIC_INITIALIZED ) {
2012-02-10 13:05:43 +04:00
dev_warn ( & ihost - > pdev - > dev , " %s invalid state: %d \n " ,
__func__ , ihost - > sm . current_state_id ) ;
2011-05-09 04:34:44 +04:00
return SCI_FAILURE_INVALID_STATE ;
}
/* Build the TCi free pool */
2011-06-08 05:50:55 +04:00
BUILD_BUG_ON ( SCI_MAX_IO_REQUESTS > 1 < < sizeof ( ihost - > tci_pool [ 0 ] ) * 8 ) ;
ihost - > tci_head = 0 ;
ihost - > tci_tail = 0 ;
2011-07-01 04:38:32 +04:00
for ( index = 0 ; index < ihost - > task_context_entries ; index + + )
2011-06-08 05:50:55 +04:00
isci_tci_free ( ihost , index ) ;
2011-05-09 04:34:44 +04:00
/* Build the RNi free pool */
2011-07-01 06:14:33 +04:00
sci_remote_node_table_initialize ( & ihost - > available_remote_nodes ,
ihost - > remote_node_entries ) ;
2011-05-09 04:34:44 +04:00
/*
* Before anything else lets make sure we will not be
* interrupted by the hardware .
*/
2011-07-01 06:14:33 +04:00
sci_controller_disable_interrupts ( ihost ) ;
2011-05-09 04:34:44 +04:00
/* Enable the port task scheduler */
2011-07-01 06:14:33 +04:00
sci_controller_enable_port_task_scheduler ( ihost ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
/* Assign all the task entries to ihost physical function */
2011-07-01 06:14:33 +04:00
sci_controller_assign_task_entries ( ihost ) ;
2011-05-09 04:34:44 +04:00
/* Now initialize the completion queue */
2011-07-01 06:14:33 +04:00
sci_controller_initialize_completion_queue ( ihost ) ;
2011-05-09 04:34:44 +04:00
/* Initialize the unsolicited frame queue for use */
2011-07-01 06:14:33 +04:00
sci_controller_initialize_unsolicited_frame_queue ( ihost ) ;
2011-05-09 04:34:44 +04:00
/* Start all of the ports on this controller */
2011-07-01 04:38:32 +04:00
for ( index = 0 ; index < ihost - > logical_port_entries ; index + + ) {
2011-06-30 00:09:25 +04:00
struct isci_port * iport = & ihost - > ports [ index ] ;
2011-05-09 04:34:44 +04:00
2011-07-01 06:14:33 +04:00
result = sci_port_start ( iport ) ;
2011-05-09 04:34:44 +04:00
if ( result )
return result ;
}
2011-07-01 06:14:33 +04:00
sci_controller_start_next_phy ( ihost ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
sci_mod_timer ( & ihost - > timer , timeout ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
sci_change_state ( & ihost - > sm , SCIC_STARTING ) ;
2011-05-09 04:34:44 +04:00
return SCI_SUCCESS ;
}
2012-06-22 10:41:56 +04:00
void isci_host_start ( struct Scsi_Host * shost )
2011-07-03 09:56:22 +04:00
{
2011-04-01 00:10:44 +04:00
struct isci_host * ihost = SHOST_TO_SAS_HA ( shost ) - > lldd_ha ;
2011-07-01 06:14:33 +04:00
unsigned long tmo = sci_controller_get_suggested_start_timeout ( ihost ) ;
2011-07-03 09:56:22 +04:00
2011-02-18 20:25:07 +03:00
set_bit ( IHOST_START_PENDING , & ihost - > flags ) ;
2011-02-18 20:25:09 +03:00
spin_lock_irq ( & ihost - > scic_lock ) ;
2011-07-01 06:14:33 +04:00
sci_controller_start ( ihost , tmo ) ;
sci_controller_enable_interrupts ( ihost ) ;
2011-02-18 20:25:09 +03:00
spin_unlock_irq ( & ihost - > scic_lock ) ;
2011-07-03 09:56:22 +04:00
}
2012-02-23 13:12:10 +04:00
static void isci_host_stop_complete ( struct isci_host * ihost )
2011-07-03 09:56:22 +04:00
{
2011-07-01 06:14:33 +04:00
sci_controller_disable_interrupts ( ihost ) ;
2011-02-18 20:25:07 +03:00
clear_bit ( IHOST_STOP_PENDING , & ihost - > flags ) ;
wake_up ( & ihost - > eventq ) ;
2011-07-03 09:56:22 +04:00
}
2011-07-01 06:14:33 +04:00
static void sci_controller_completion_handler ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
/* Empty out the completion queue */
2011-07-01 06:14:33 +04:00
if ( sci_controller_completion_queue_has_entries ( ihost ) )
sci_controller_process_completions ( ihost ) ;
2011-05-09 04:34:44 +04:00
/* Clear the interrupt and enable all interrupts again */
2011-07-01 04:38:32 +04:00
writel ( SMU_ISR_COMPLETION , & ihost - > smu_registers - > interrupt_status ) ;
2011-05-09 04:34:44 +04:00
/* Could we write the value of SMU_ISR_COMPLETION? */
2011-07-01 04:38:32 +04:00
writel ( 0xFF000000 , & ihost - > smu_registers - > interrupt_mask ) ;
writel ( 0 , & ihost - > smu_registers - > interrupt_mask ) ;
2011-05-09 04:34:44 +04:00
}
2012-03-04 16:44:53 +04:00
void ireq_done ( struct isci_host * ihost , struct isci_request * ireq , struct sas_task * task )
{
if ( ! test_bit ( IREQ_ABORT_PATH_ACTIVE , & ireq - > flags ) & &
! ( task - > task_state_flags & SAS_TASK_STATE_ABORTED ) ) {
if ( test_bit ( IREQ_COMPLETE_IN_TARGET , & ireq - > flags ) ) {
/* Normal notification (task_done) */
dev_dbg ( & ihost - > pdev - > dev ,
" %s: Normal - ireq/task = %p/%p \n " ,
__func__ , ireq , task ) ;
2012-09-06 08:36:47 +04:00
task - > lldd_task = NULL ;
2012-03-04 16:44:53 +04:00
task - > task_done ( task ) ;
} else {
dev_dbg ( & ihost - > pdev - > dev ,
" %s: Error - ireq/task = %p/%p \n " ,
__func__ , ireq , task ) ;
2012-09-06 08:36:47 +04:00
if ( sas_protocol_ata ( task - > task_proto ) )
task - > lldd_task = NULL ;
2012-03-04 16:44:53 +04:00
sas_task_abort ( task ) ;
}
2012-09-06 08:36:47 +04:00
} else
task - > lldd_task = NULL ;
2012-03-04 16:44:53 +04:00
if ( test_and_clear_bit ( IREQ_ABORT_PATH_ACTIVE , & ireq - > flags ) )
wake_up_all ( & ihost - > eventq ) ;
if ( ! test_bit ( IREQ_NO_AUTO_FREE_TAG , & ireq - > flags ) )
isci_free_tag ( ihost , ireq - > io_tag ) ;
}
2011-07-03 09:56:22 +04:00
/**
* isci_host_completion_routine ( ) - This function is the delayed service
* routine that calls the sci core library ' s completion handler . It ' s
* scheduled as a tasklet from the interrupt service routine when interrupts
* in use , or set as the timeout function in polled mode .
* @ data : This parameter specifies the ISCI host object
*
*/
2012-02-16 01:58:42 +04:00
void isci_host_completion_routine ( unsigned long data )
2011-07-03 09:56:22 +04:00
{
2011-07-01 04:38:32 +04:00
struct isci_host * ihost = ( struct isci_host * ) data ;
2011-07-30 04:17:10 +04:00
u16 active ;
2011-07-03 09:56:22 +04:00
2011-07-01 04:38:32 +04:00
spin_lock_irq ( & ihost - > scic_lock ) ;
2011-07-01 06:14:33 +04:00
sci_controller_completion_handler ( ihost ) ;
2012-03-09 10:42:03 +04:00
spin_unlock_irq ( & ihost - > scic_lock ) ;
2011-07-03 09:56:22 +04:00
2012-09-06 03:35:44 +04:00
/*
* we subtract SCI_MAX_PORTS to account for the number of dummy TCs
* issued for hardware issue workaround
*/
active = isci_tci_active ( ihost ) - SCI_MAX_PORTS ;
/*
* the coalesence timeout doubles at each encoding step , so
2011-07-30 04:17:10 +04:00
* update it based on the ilog2 value of the outstanding requests
*/
writel ( SMU_ICC_GEN_VAL ( NUMBER , active ) |
SMU_ICC_GEN_VAL ( TIMER , ISCI_COALESCE_BASE + ilog2 ( active ) ) ,
& ihost - > smu_registers - > interrupt_coalesce_control ) ;
2011-07-03 09:56:22 +04:00
}
2011-05-09 04:34:44 +04:00
/**
2011-07-01 06:14:33 +04:00
* sci_controller_stop ( ) - This method will stop an individual controller
2011-05-09 04:34:44 +04:00
* object . This method will invoke the associated user callback upon
* completion . The completion callback is called when the following
* conditions are met : - # the method return status is SCI_SUCCESS . - # the
* controller has been quiesced . This method will ensure that all IO
* requests are quiesced , phys are stopped , and all additional operation by
* the hardware is halted .
* @ controller : the handle to the controller object to stop .
* @ timeout : This parameter specifies the number of milliseconds in which the
* stop operation should complete .
*
* The controller must be in the STARTED or STOPPED state . Indicate if the
* controller stop method succeeded or failed in some way . SCI_SUCCESS if the
* stop operation successfully began . SCI_WARNING_ALREADY_IN_STATE if the
* controller is already in the STOPPED state . SCI_FAILURE_INVALID_STATE if the
* controller is not either in the STARTED or STOPPED states .
*/
2011-07-01 06:14:33 +04:00
static enum sci_status sci_controller_stop ( struct isci_host * ihost , u32 timeout )
2011-07-03 09:56:22 +04:00
{
2011-07-01 04:38:32 +04:00
if ( ihost - > sm . current_state_id ! = SCIC_READY ) {
2012-02-10 13:05:43 +04:00
dev_warn ( & ihost - > pdev - > dev , " %s invalid state: %d \n " ,
__func__ , ihost - > sm . current_state_id ) ;
2011-05-09 04:34:44 +04:00
return SCI_FAILURE_INVALID_STATE ;
}
2011-07-03 09:56:22 +04:00
2011-07-01 04:38:32 +04:00
sci_mod_timer ( & ihost - > timer , timeout ) ;
sci_change_state ( & ihost - > sm , SCIC_STOPPING ) ;
2011-05-09 04:34:44 +04:00
return SCI_SUCCESS ;
}
/**
2011-07-01 06:14:33 +04:00
* sci_controller_reset ( ) - This method will reset the supplied core
2011-05-09 04:34:44 +04:00
* controller regardless of the state of said controller . This operation is
* considered destructive . In other words , all current operations are wiped
* out . No IO completions for outstanding devices occur . Outstanding IO
* requests are not aborted or completed at the actual remote device .
* @ controller : the handle to the controller object to reset .
*
* Indicate if the controller reset method succeeded or failed in some way .
* SCI_SUCCESS if the reset operation successfully started . SCI_FATAL_ERROR if
* the controller reset operation is unable to complete .
*/
2011-07-01 06:14:33 +04:00
static enum sci_status sci_controller_reset ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
2011-07-01 04:38:32 +04:00
switch ( ihost - > sm . current_state_id ) {
2011-06-02 04:10:43 +04:00
case SCIC_RESET :
case SCIC_READY :
2012-02-23 13:12:10 +04:00
case SCIC_STOPPING :
2011-06-02 04:10:43 +04:00
case SCIC_FAILED :
2011-05-09 04:34:44 +04:00
/*
* The reset operation is not a graceful cleanup , just
* perform the state transition .
*/
2011-07-01 04:38:32 +04:00
sci_change_state ( & ihost - > sm , SCIC_RESETTING ) ;
2011-05-09 04:34:44 +04:00
return SCI_SUCCESS ;
default :
2012-02-10 13:05:43 +04:00
dev_warn ( & ihost - > pdev - > dev , " %s invalid state: %d \n " ,
__func__ , ihost - > sm . current_state_id ) ;
2011-05-09 04:34:44 +04:00
return SCI_FAILURE_INVALID_STATE ;
}
}
2012-02-23 13:12:10 +04:00
static enum sci_status sci_controller_stop_phys ( struct isci_host * ihost )
{
u32 index ;
enum sci_status status ;
enum sci_status phy_status ;
status = SCI_SUCCESS ;
for ( index = 0 ; index < SCI_MAX_PHYS ; index + + ) {
phy_status = sci_phy_stop ( & ihost - > phys [ index ] ) ;
if ( phy_status ! = SCI_SUCCESS & &
phy_status ! = SCI_FAILURE_INVALID_STATE ) {
status = SCI_FAILURE ;
dev_warn ( & ihost - > pdev - > dev ,
" %s: Controller stop operation failed to stop "
" phy %d because of status %d. \n " ,
__func__ ,
ihost - > phys [ index ] . phy_index , phy_status ) ;
}
}
return status ;
}
/**
* isci_host_deinit - shutdown frame reception and dma
* @ ihost : host to take down
*
* This is called in either the driver shutdown or the suspend path . In
* the shutdown case libsas went through port teardown and normal device
* removal ( i . e . physical links stayed up to service scsi_device removal
* commands ) . In the suspend case we disable the hardware without
* notifying libsas of the link down events since we want libsas to
* remember the domain across the suspend / resume cycle
*/
2011-05-09 04:34:44 +04:00
void isci_host_deinit ( struct isci_host * ihost )
{
int i ;
2011-09-02 08:18:31 +04:00
/* disable output data selects */
for ( i = 0 ; i < isci_gpio_count ( ihost ) ; i + + )
writel ( SGPIO_HW_CONTROL , & ihost - > scu_registers - > peg0 . sgpio . output_data_select [ i ] ) ;
2011-02-18 20:25:07 +03:00
set_bit ( IHOST_STOP_PENDING , & ihost - > flags ) ;
2011-03-02 22:49:26 +03:00
spin_lock_irq ( & ihost - > scic_lock ) ;
2011-07-01 06:14:33 +04:00
sci_controller_stop ( ihost , SCIC_CONTROLLER_STOP_TIMEOUT ) ;
2011-03-02 22:49:26 +03:00
spin_unlock_irq ( & ihost - > scic_lock ) ;
2011-02-18 20:25:07 +03:00
wait_for_stop ( ihost ) ;
2011-09-02 08:18:31 +04:00
2012-02-23 13:12:10 +04:00
/* phy stop is after controller stop to allow port and device to
* go idle before shutting down the phys , but the expectation is
* that i / o has been shut off well before we reach this
* function .
*/
sci_controller_stop_phys ( ihost ) ;
2011-09-02 08:18:31 +04:00
/* disable sgpio: where the above wait should give time for the
* enclosure to sample the gpios going inactive
*/
writel ( 0 , & ihost - > scu_registers - > peg0 . sgpio . interface_control ) ;
2012-03-02 05:06:24 +04:00
spin_lock_irq ( & ihost - > scic_lock ) ;
2011-07-01 06:14:33 +04:00
sci_controller_reset ( ihost ) ;
2012-03-02 05:06:24 +04:00
spin_unlock_irq ( & ihost - > scic_lock ) ;
2011-05-19 15:59:10 +04:00
/* Cancel any/all outstanding port timers */
2011-07-01 04:38:32 +04:00
for ( i = 0 ; i < ihost - > logical_port_entries ; i + + ) {
2011-06-30 00:09:25 +04:00
struct isci_port * iport = & ihost - > ports [ i ] ;
del_timer_sync ( & iport - > timer . timer ) ;
2011-05-19 15:59:10 +04:00
}
2011-05-19 15:59:36 +04:00
/* Cancel any/all outstanding phy timers */
for ( i = 0 ; i < SCI_MAX_PHYS ; i + + ) {
2011-06-29 02:05:53 +04:00
struct isci_phy * iphy = & ihost - > phys [ i ] ;
del_timer_sync ( & iphy - > sata_timer . timer ) ;
2011-05-19 15:59:36 +04:00
}
2011-07-01 04:38:32 +04:00
del_timer_sync ( & ihost - > port_agent . timer . timer ) ;
2011-05-20 07:00:51 +04:00
2011-07-01 04:38:32 +04:00
del_timer_sync ( & ihost - > power_control . timer . timer ) ;
2011-05-20 07:17:47 +04:00
2011-07-01 04:38:32 +04:00
del_timer_sync ( & ihost - > timer . timer ) ;
2011-05-19 15:59:56 +04:00
2011-07-01 04:38:32 +04:00
del_timer_sync ( & ihost - > phy_timer . timer ) ;
2011-07-03 09:56:22 +04:00
}
static void __iomem * scu_base ( struct isci_host * isci_host )
{
struct pci_dev * pdev = isci_host - > pdev ;
int id = isci_host - > id ;
return pcim_iomap_table ( pdev ) [ SCI_SCU_BAR * 2 ] + SCI_SCU_BAR_SIZE * id ;
}
static void __iomem * smu_base ( struct isci_host * isci_host )
{
struct pci_dev * pdev = isci_host - > pdev ;
int id = isci_host - > id ;
return pcim_iomap_table ( pdev ) [ SCI_SMU_BAR * 2 ] + SCI_SMU_BAR_SIZE * id ;
}
2011-07-01 06:14:33 +04:00
static void sci_controller_initial_state_enter ( struct sci_base_state_machine * sm )
2011-05-09 04:34:44 +04:00
{
2011-07-01 04:38:32 +04:00
struct isci_host * ihost = container_of ( sm , typeof ( * ihost ) , sm ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
sci_change_state ( & ihost - > sm , SCIC_RESET ) ;
2011-05-09 04:34:44 +04:00
}
2011-07-01 06:14:33 +04:00
static inline void sci_controller_starting_state_exit ( struct sci_base_state_machine * sm )
2011-05-09 04:34:44 +04:00
{
2011-07-01 04:38:32 +04:00
struct isci_host * ihost = container_of ( sm , typeof ( * ihost ) , sm ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
sci_del_timer ( & ihost - > timer ) ;
2011-05-09 04:34:44 +04:00
}
# define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
# define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
# define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
# define INTERRUPT_COALESCE_NUMBER_MAX 256
# define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
# define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
/**
2011-07-01 06:14:33 +04:00
* sci_controller_set_interrupt_coalescence ( ) - This method allows the user to
2011-05-09 04:34:44 +04:00
* configure the interrupt coalescence .
* @ controller : This parameter represents the handle to the controller object
* for which its interrupt coalesce register is overridden .
* @ coalesce_number : Used to control the number of entries in the Completion
* Queue before an interrupt is generated . If the number of entries exceed
* this number , an interrupt will be generated . The valid range of the input
* is [ 0 , 256 ] . A setting of 0 results in coalescing being disabled .
* @ coalesce_timeout : Timeout value in microseconds . The valid range of the
* input is [ 0 , 2700000 ] . A setting of 0 is allowed and results in no
* interrupt coalescing timeout .
*
* Indicate if the user successfully set the interrupt coalesce parameters .
* SCI_SUCCESS The user successfully updated the interrutp coalescence .
* SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range .
*/
2011-07-01 04:38:32 +04:00
static enum sci_status
2011-07-01 06:14:33 +04:00
sci_controller_set_interrupt_coalescence ( struct isci_host * ihost ,
u32 coalesce_number ,
u32 coalesce_timeout )
2011-05-09 04:34:44 +04:00
{
u8 timeout_encode = 0 ;
u32 min = 0 ;
u32 max = 0 ;
/* Check if the input parameters fall in the range. */
if ( coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX )
return SCI_FAILURE_INVALID_PARAMETER_VALUE ;
/*
* Defined encoding for interrupt coalescing timeout :
* Value Min Max Units
* - - - - - - - - - - - - - - - -
* 0 - - Disabled
* 1 13.3 20.0 ns
* 2 26.7 40.0
* 3 53.3 80.0
* 4 106.7 160.0
* 5 213.3 320.0
* 6 426.7 640.0
* 7 853.3 1280.0
* 8 1.7 2.6 us
* 9 3.4 5.1
* 10 6.8 10.2
* 11 13.7 20.5
* 12 27.3 41.0
* 13 54.6 81.9
* 14 109.2 163.8
* 15 218.5 327.7
* 16 436.9 655.4
* 17 873.8 1310.7
* 18 1.7 2.6 ms
* 19 3.5 5.2
* 20 7.0 10.5
* 21 14.0 21.0
* 22 28.0 41.9
* 23 55.9 83.9
* 24 111.8 167.8
* 25 223.7 335.5
* 26 447.4 671.1
* 27 894.8 1342.2
* 28 1.8 2.7 s
* Others Undefined */
/*
* Use the table above to decide the encode of interrupt coalescing timeout
* value for register writing . */
if ( coalesce_timeout = = 0 )
timeout_encode = 0 ;
else {
/* make the timeout value in unit of (10 ns). */
coalesce_timeout = coalesce_timeout * 100 ;
min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10 ;
max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10 ;
/* get the encode of timeout for register writing. */
for ( timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN ;
timeout_encode < = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX ;
timeout_encode + + ) {
if ( min < = coalesce_timeout & & max > coalesce_timeout )
break ;
else if ( coalesce_timeout > = max & & coalesce_timeout < min * 2
& & coalesce_timeout < = INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100 ) {
if ( ( coalesce_timeout - max ) < ( 2 * min - coalesce_timeout ) )
break ;
else {
timeout_encode + + ;
break ;
}
} else {
max = max * 2 ;
min = min * 2 ;
}
}
if ( timeout_encode = = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1 )
/* the value is out of range. */
return SCI_FAILURE_INVALID_PARAMETER_VALUE ;
}
writel ( SMU_ICC_GEN_VAL ( NUMBER , coalesce_number ) |
SMU_ICC_GEN_VAL ( TIMER , timeout_encode ) ,
2011-07-01 04:38:32 +04:00
& ihost - > smu_registers - > interrupt_coalesce_control ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
ihost - > interrupt_coalesce_number = ( u16 ) coalesce_number ;
ihost - > interrupt_coalesce_timeout = coalesce_timeout / 100 ;
2011-05-09 04:34:44 +04:00
return SCI_SUCCESS ;
}
2011-07-01 06:14:33 +04:00
static void sci_controller_ready_state_enter ( struct sci_base_state_machine * sm )
2011-05-09 04:34:44 +04:00
{
2011-07-01 04:38:32 +04:00
struct isci_host * ihost = container_of ( sm , typeof ( * ihost ) , sm ) ;
2012-01-27 23:14:50 +04:00
u32 val ;
/* enable clock gating for power control of the scu unit */
val = readl ( & ihost - > smu_registers - > clock_gating_control ) ;
val & = ~ ( SMU_CGUCR_GEN_BIT ( REGCLK_ENABLE ) |
SMU_CGUCR_GEN_BIT ( TXCLK_ENABLE ) |
SMU_CGUCR_GEN_BIT ( XCLK_ENABLE ) ) ;
val | = SMU_CGUCR_GEN_BIT ( IDLE_ENABLE ) ;
writel ( val , & ihost - > smu_registers - > clock_gating_control ) ;
2011-05-09 04:34:44 +04:00
/* set the default interrupt coalescence number and timeout value. */
2011-07-30 04:17:10 +04:00
sci_controller_set_interrupt_coalescence ( ihost , 0 , 0 ) ;
2011-05-09 04:34:44 +04:00
}
2011-07-01 06:14:33 +04:00
static void sci_controller_ready_state_exit ( struct sci_base_state_machine * sm )
2011-05-09 04:34:44 +04:00
{
2011-07-01 04:38:32 +04:00
struct isci_host * ihost = container_of ( sm , typeof ( * ihost ) , sm ) ;
2011-05-09 04:34:44 +04:00
/* disable interrupt coalescence. */
2011-07-01 06:14:33 +04:00
sci_controller_set_interrupt_coalescence ( ihost , 0 , 0 ) ;
2011-05-09 04:34:44 +04:00
}
2011-07-01 06:14:33 +04:00
static enum sci_status sci_controller_stop_ports ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
u32 index ;
enum sci_status port_status ;
enum sci_status status = SCI_SUCCESS ;
2011-07-01 04:38:32 +04:00
for ( index = 0 ; index < ihost - > logical_port_entries ; index + + ) {
2011-06-30 00:09:25 +04:00
struct isci_port * iport = & ihost - > ports [ index ] ;
2011-05-09 04:34:44 +04:00
2011-07-01 06:14:33 +04:00
port_status = sci_port_stop ( iport ) ;
2011-05-09 04:34:44 +04:00
if ( ( port_status ! = SCI_SUCCESS ) & &
( port_status ! = SCI_FAILURE_INVALID_STATE ) ) {
status = SCI_FAILURE ;
2011-07-01 04:38:32 +04:00
dev_warn ( & ihost - > pdev - > dev ,
2011-05-09 04:34:44 +04:00
" %s: Controller stop operation failed to "
" stop port %d because of status %d. \n " ,
__func__ ,
2011-06-30 00:09:25 +04:00
iport - > logical_port_index ,
2011-05-09 04:34:44 +04:00
port_status ) ;
}
}
return status ;
}
2011-07-01 06:14:33 +04:00
static enum sci_status sci_controller_stop_devices ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
u32 index ;
enum sci_status status ;
enum sci_status device_status ;
status = SCI_SUCCESS ;
2011-07-01 04:38:32 +04:00
for ( index = 0 ; index < ihost - > remote_node_entries ; index + + ) {
if ( ihost - > device_table [ index ] ! = NULL ) {
2011-05-09 04:34:44 +04:00
/* / @todo What timeout value do we want to provide to this request? */
2011-07-01 06:14:33 +04:00
device_status = sci_remote_device_stop ( ihost - > device_table [ index ] , 0 ) ;
2011-05-09 04:34:44 +04:00
if ( ( device_status ! = SCI_SUCCESS ) & &
( device_status ! = SCI_FAILURE_INVALID_STATE ) ) {
2011-07-01 04:38:32 +04:00
dev_warn ( & ihost - > pdev - > dev ,
2011-05-09 04:34:44 +04:00
" %s: Controller stop operation failed "
" to stop device 0x%p because of "
" status %d. \n " ,
__func__ ,
2011-07-01 04:38:32 +04:00
ihost - > device_table [ index ] , device_status ) ;
2011-05-09 04:34:44 +04:00
}
}
}
return status ;
}
2011-07-01 06:14:33 +04:00
static void sci_controller_stopping_state_enter ( struct sci_base_state_machine * sm )
2011-05-09 04:34:44 +04:00
{
2011-07-01 04:38:32 +04:00
struct isci_host * ihost = container_of ( sm , typeof ( * ihost ) , sm ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 06:14:33 +04:00
sci_controller_stop_devices ( ihost ) ;
2012-02-23 13:12:10 +04:00
sci_controller_stop_ports ( ihost ) ;
if ( ! sci_controller_has_remote_devices_stopping ( ihost ) )
isci_host_stop_complete ( ihost ) ;
2011-05-09 04:34:44 +04:00
}
2011-07-01 06:14:33 +04:00
static void sci_controller_stopping_state_exit ( struct sci_base_state_machine * sm )
2011-05-09 04:34:44 +04:00
{
2011-07-01 04:38:32 +04:00
struct isci_host * ihost = container_of ( sm , typeof ( * ihost ) , sm ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
sci_del_timer ( & ihost - > timer ) ;
2011-05-09 04:34:44 +04:00
}
2011-07-01 06:14:33 +04:00
static void sci_controller_reset_hardware ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
/* Disable interrupts so we dont take any spurious interrupts */
2011-07-01 06:14:33 +04:00
sci_controller_disable_interrupts ( ihost ) ;
2011-05-09 04:34:44 +04:00
/* Reset the SCU */
2011-07-01 04:38:32 +04:00
writel ( 0xFFFFFFFF , & ihost - > smu_registers - > soft_reset_control ) ;
2011-05-09 04:34:44 +04:00
/* Delay for 1ms to before clearing the CQP and UFQPR. */
udelay ( 1000 ) ;
/* The write to the CQGR clears the CQP */
2011-07-01 04:38:32 +04:00
writel ( 0x00000000 , & ihost - > smu_registers - > completion_queue_get ) ;
2011-05-09 04:34:44 +04:00
/* The write to the UFQGP clears the UFQPR */
2011-07-01 04:38:32 +04:00
writel ( 0 , & ihost - > scu_registers - > sdma . unsolicited_frame_get_pointer ) ;
2012-03-02 05:06:24 +04:00
/* clear all interrupts */
writel ( ~ SMU_INTERRUPT_STATUS_RESERVED_MASK , & ihost - > smu_registers - > interrupt_status ) ;
2011-05-09 04:34:44 +04:00
}
2011-07-01 06:14:33 +04:00
static void sci_controller_resetting_state_enter ( struct sci_base_state_machine * sm )
2011-05-09 04:34:44 +04:00
{
2011-07-01 04:38:32 +04:00
struct isci_host * ihost = container_of ( sm , typeof ( * ihost ) , sm ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 06:14:33 +04:00
sci_controller_reset_hardware ( ihost ) ;
2011-07-01 04:38:32 +04:00
sci_change_state ( & ihost - > sm , SCIC_RESET ) ;
2011-05-09 04:34:44 +04:00
}
2011-07-01 06:14:33 +04:00
static const struct sci_base_state sci_controller_state_table [ ] = {
2011-06-02 04:10:43 +04:00
[ SCIC_INITIAL ] = {
2011-07-01 06:14:33 +04:00
. enter_state = sci_controller_initial_state_enter ,
2011-05-09 04:34:44 +04:00
} ,
2011-06-02 04:10:43 +04:00
[ SCIC_RESET ] = { } ,
[ SCIC_INITIALIZING ] = { } ,
[ SCIC_INITIALIZED ] = { } ,
[ SCIC_STARTING ] = {
2011-07-01 06:14:33 +04:00
. exit_state = sci_controller_starting_state_exit ,
2011-05-09 04:34:44 +04:00
} ,
2011-06-02 04:10:43 +04:00
[ SCIC_READY ] = {
2011-07-01 06:14:33 +04:00
. enter_state = sci_controller_ready_state_enter ,
. exit_state = sci_controller_ready_state_exit ,
2011-05-09 04:34:44 +04:00
} ,
2011-06-02 04:10:43 +04:00
[ SCIC_RESETTING ] = {
2011-07-01 06:14:33 +04:00
. enter_state = sci_controller_resetting_state_enter ,
2011-05-09 04:34:44 +04:00
} ,
2011-06-02 04:10:43 +04:00
[ SCIC_STOPPING ] = {
2011-07-01 06:14:33 +04:00
. enter_state = sci_controller_stopping_state_enter ,
. exit_state = sci_controller_stopping_state_exit ,
2011-05-09 04:34:44 +04:00
} ,
2011-06-02 04:10:43 +04:00
[ SCIC_FAILED ] = { }
2011-05-09 04:34:44 +04:00
} ;
2011-05-19 15:59:56 +04:00
static void controller_timeout ( unsigned long data )
{
struct sci_timer * tmr = ( struct sci_timer * ) data ;
2011-07-01 04:38:32 +04:00
struct isci_host * ihost = container_of ( tmr , typeof ( * ihost ) , timer ) ;
struct sci_base_state_machine * sm = & ihost - > sm ;
2011-05-19 15:59:56 +04:00
unsigned long flags ;
spin_lock_irqsave ( & ihost - > scic_lock , flags ) ;
if ( tmr - > cancel )
goto done ;
2011-06-02 04:10:43 +04:00
if ( sm - > current_state_id = = SCIC_STARTING )
2011-07-01 06:14:33 +04:00
sci_controller_transition_to_ready ( ihost , SCI_FAILURE_TIMEOUT ) ;
2011-06-02 04:10:43 +04:00
else if ( sm - > current_state_id = = SCIC_STOPPING ) {
sci_change_state ( sm , SCIC_FAILED ) ;
2012-02-23 13:12:10 +04:00
isci_host_stop_complete ( ihost ) ;
2011-05-19 15:59:56 +04:00
} else /* / @todo Now what do we want to do in this case? */
2011-07-01 04:38:32 +04:00
dev_err ( & ihost - > pdev - > dev ,
2011-05-19 15:59:56 +04:00
" %s: Controller timer fired when controller was not "
" in a state being timed. \n " ,
__func__ ) ;
2011-05-09 04:34:44 +04:00
2011-05-19 15:59:56 +04:00
done :
spin_unlock_irqrestore ( & ihost - > scic_lock , flags ) ;
}
2011-05-09 04:34:44 +04:00
2011-07-01 06:14:33 +04:00
static enum sci_status sci_controller_construct ( struct isci_host * ihost ,
void __iomem * scu_base ,
void __iomem * smu_base )
2011-05-09 04:34:44 +04:00
{
u8 i ;
2011-07-01 06:14:33 +04:00
sci_init_sm ( & ihost - > sm , sci_controller_state_table , SCIC_INITIAL ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
ihost - > scu_registers = scu_base ;
ihost - > smu_registers = smu_base ;
2011-05-09 04:34:44 +04:00
2011-07-01 06:14:33 +04:00
sci_port_configuration_agent_construct ( & ihost - > port_agent ) ;
2011-05-09 04:34:44 +04:00
/* Construct the ports for this controller */
for ( i = 0 ; i < SCI_MAX_PORTS ; i + + )
2011-07-01 06:14:33 +04:00
sci_port_construct ( & ihost - > ports [ i ] , i , ihost ) ;
sci_port_construct ( & ihost - > ports [ i ] , SCIC_SDS_DUMMY_PORT , ihost ) ;
2011-05-09 04:34:44 +04:00
/* Construct the phys for this controller */
for ( i = 0 ; i < SCI_MAX_PHYS ; i + + ) {
/* Add all the PHYs to the dummy port */
2011-07-01 06:14:33 +04:00
sci_phy_construct ( & ihost - > phys [ i ] ,
& ihost - > ports [ SCI_MAX_PORTS ] , i ) ;
2011-05-09 04:34:44 +04:00
}
2011-07-01 04:38:32 +04:00
ihost - > invalid_phy_mask = 0 ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
sci_init_timer ( & ihost - > timer , controller_timeout ) ;
2011-05-19 15:59:56 +04:00
2011-07-01 06:14:33 +04:00
return sci_controller_reset ( ihost ) ;
2011-05-09 04:34:44 +04:00
}
2012-01-04 13:32:44 +04:00
int sci_oem_parameters_validate ( struct sci_oem_params * oem , u8 version )
2011-05-09 04:34:44 +04:00
{
int i ;
for ( i = 0 ; i < SCI_MAX_PORTS ; i + + )
if ( oem - > ports [ i ] . phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX )
return - EINVAL ;
for ( i = 0 ; i < SCI_MAX_PHYS ; i + + )
if ( oem - > phys [ i ] . sas_address . high = = 0 & &
oem - > phys [ i ] . sas_address . low = = 0 )
return - EINVAL ;
if ( oem - > controller . mode_type = = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE ) {
for ( i = 0 ; i < SCI_MAX_PHYS ; i + + )
if ( oem - > ports [ i ] . phy_mask ! = 0 )
return - EINVAL ;
} else if ( oem - > controller . mode_type = = SCIC_PORT_MANUAL_CONFIGURATION_MODE ) {
u8 phy_mask = 0 ;
for ( i = 0 ; i < SCI_MAX_PHYS ; i + + )
phy_mask | = oem - > ports [ i ] . phy_mask ;
if ( phy_mask = = 0 )
return - EINVAL ;
} else
return - EINVAL ;
2011-10-28 02:05:42 +04:00
if ( oem - > controller . max_concurr_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT | |
oem - > controller . max_concurr_spin_up < 1 )
2011-05-09 04:34:44 +04:00
return - EINVAL ;
2012-01-04 13:32:44 +04:00
if ( oem - > controller . do_enable_ssc ) {
if ( version < ISCI_ROM_VER_1_1 & & oem - > controller . do_enable_ssc ! = 1 )
return - EINVAL ;
if ( version > = ISCI_ROM_VER_1_1 ) {
u8 test = oem - > controller . ssc_sata_tx_spread_level ;
switch ( test ) {
case 0 :
case 2 :
case 3 :
case 6 :
case 7 :
break ;
default :
return - EINVAL ;
}
test = oem - > controller . ssc_sas_tx_spread_level ;
if ( oem - > controller . ssc_sas_tx_type = = 0 ) {
switch ( test ) {
case 0 :
case 2 :
case 3 :
break ;
default :
return - EINVAL ;
}
} else if ( oem - > controller . ssc_sas_tx_type = = 1 ) {
switch ( test ) {
case 0 :
case 3 :
case 6 :
break ;
default :
return - EINVAL ;
}
}
}
}
2011-05-09 04:34:44 +04:00
return 0 ;
}
2011-10-28 02:05:42 +04:00
static u8 max_spin_up ( struct isci_host * ihost )
{
if ( ihost - > user_parameters . max_concurr_spinup )
return min_t ( u8 , ihost - > user_parameters . max_concurr_spinup ,
MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT ) ;
else
return min_t ( u8 , ihost - > oem_parameters . controller . max_concurr_spin_up ,
MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT ) ;
}
2011-05-20 07:17:47 +04:00
static void power_control_timeout ( unsigned long data )
2011-05-09 04:34:44 +04:00
{
2011-05-20 07:17:47 +04:00
struct sci_timer * tmr = ( struct sci_timer * ) data ;
2011-07-01 04:38:32 +04:00
struct isci_host * ihost = container_of ( tmr , typeof ( * ihost ) , power_control . timer ) ;
2011-06-29 02:05:53 +04:00
struct isci_phy * iphy ;
2011-05-20 07:17:47 +04:00
unsigned long flags ;
u8 i ;
2011-05-09 04:34:44 +04:00
2011-05-20 07:17:47 +04:00
spin_lock_irqsave ( & ihost - > scic_lock , flags ) ;
2011-05-09 04:34:44 +04:00
2011-05-20 07:17:47 +04:00
if ( tmr - > cancel )
goto done ;
2011-07-01 04:38:32 +04:00
ihost - > power_control . phys_granted_power = 0 ;
2011-05-20 07:17:47 +04:00
2011-07-01 04:38:32 +04:00
if ( ihost - > power_control . phys_waiting = = 0 ) {
ihost - > power_control . timer_started = false ;
2011-05-20 07:17:47 +04:00
goto done ;
2011-05-09 04:34:44 +04:00
}
2011-05-20 07:17:47 +04:00
for ( i = 0 ; i < SCI_MAX_PHYS ; i + + ) {
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
if ( ihost - > power_control . phys_waiting = = 0 )
2011-05-20 07:17:47 +04:00
break ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
iphy = ihost - > power_control . requesters [ i ] ;
2011-06-29 02:05:53 +04:00
if ( iphy = = NULL )
2011-05-20 07:17:47 +04:00
continue ;
2011-05-09 04:34:44 +04:00
2011-10-28 02:05:42 +04:00
if ( ihost - > power_control . phys_granted_power > = max_spin_up ( ihost ) )
2011-05-20 07:17:47 +04:00
break ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
ihost - > power_control . requesters [ i ] = NULL ;
ihost - > power_control . phys_waiting - - ;
ihost - > power_control . phys_granted_power + + ;
2011-07-01 06:14:33 +04:00
sci_phy_consume_power_handler ( iphy ) ;
2012-01-04 13:33:31 +04:00
2012-02-01 12:44:14 +04:00
if ( iphy - > protocol = = SAS_PROTOCOL_SSP ) {
2012-01-04 13:33:31 +04:00
u8 j ;
for ( j = 0 ; j < SCI_MAX_PHYS ; j + + ) {
struct isci_phy * requester = ihost - > power_control . requesters [ j ] ;
/*
* Search the power_control queue to see if there are other phys
* attached to the same remote device . If found , take all of
* them out of await_sas_power state .
*/
if ( requester ! = NULL & & requester ! = iphy ) {
u8 other = memcmp ( requester - > frame_rcvd . iaf . sas_addr ,
iphy - > frame_rcvd . iaf . sas_addr ,
sizeof ( requester - > frame_rcvd . iaf . sas_addr ) ) ;
if ( other = = 0 ) {
ihost - > power_control . requesters [ j ] = NULL ;
ihost - > power_control . phys_waiting - - ;
sci_phy_consume_power_handler ( requester ) ;
}
}
}
}
2011-05-09 04:34:44 +04:00
}
2011-05-20 07:17:47 +04:00
/*
* It doesn ' t matter if the power list is empty , we need to start the
* timer in case another phy becomes ready .
*/
sci_mod_timer ( tmr , SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL ) ;
2011-07-01 04:38:32 +04:00
ihost - > power_control . timer_started = true ;
2011-05-20 07:17:47 +04:00
done :
spin_unlock_irqrestore ( & ihost - > scic_lock , flags ) ;
2011-05-09 04:34:44 +04:00
}
2011-07-01 06:14:33 +04:00
void sci_controller_power_control_queue_insert ( struct isci_host * ihost ,
struct isci_phy * iphy )
2011-05-09 04:34:44 +04:00
{
2011-06-29 02:05:53 +04:00
BUG_ON ( iphy = = NULL ) ;
2011-05-09 04:34:44 +04:00
2011-10-28 02:05:42 +04:00
if ( ihost - > power_control . phys_granted_power < max_spin_up ( ihost ) ) {
2011-07-01 04:38:32 +04:00
ihost - > power_control . phys_granted_power + + ;
2011-07-01 06:14:33 +04:00
sci_phy_consume_power_handler ( iphy ) ;
2011-05-09 04:34:44 +04:00
/*
* stop and start the power_control timer . When the timer fires , the
* no_of_phys_granted_power will be set to 0
*/
2011-07-01 04:38:32 +04:00
if ( ihost - > power_control . timer_started )
sci_del_timer ( & ihost - > power_control . timer ) ;
2011-05-20 07:17:47 +04:00
2011-07-01 04:38:32 +04:00
sci_mod_timer ( & ihost - > power_control . timer ,
2011-05-20 07:17:47 +04:00
SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL ) ;
2011-07-01 04:38:32 +04:00
ihost - > power_control . timer_started = true ;
2011-05-20 07:17:47 +04:00
2011-05-09 04:34:44 +04:00
} else {
2012-01-04 13:33:31 +04:00
/*
* There are phys , attached to the same sas address as this phy , are
* already in READY state , this phy don ' t need wait .
*/
u8 i ;
struct isci_phy * current_phy ;
for ( i = 0 ; i < SCI_MAX_PHYS ; i + + ) {
u8 other ;
current_phy = & ihost - > phys [ i ] ;
other = memcmp ( current_phy - > frame_rcvd . iaf . sas_addr ,
iphy - > frame_rcvd . iaf . sas_addr ,
sizeof ( current_phy - > frame_rcvd . iaf . sas_addr ) ) ;
if ( current_phy - > sm . current_state_id = = SCI_PHY_READY & &
2012-02-01 12:44:14 +04:00
current_phy - > protocol = = SAS_PROTOCOL_SSP & &
2012-01-04 13:33:31 +04:00
other = = 0 ) {
sci_phy_consume_power_handler ( iphy ) ;
break ;
}
}
if ( i = = SCI_MAX_PHYS ) {
/* Add the phy in the waiting list */
ihost - > power_control . requesters [ iphy - > phy_index ] = iphy ;
ihost - > power_control . phys_waiting + + ;
}
2011-05-09 04:34:44 +04:00
}
}
2011-07-01 06:14:33 +04:00
void sci_controller_power_control_queue_remove ( struct isci_host * ihost ,
struct isci_phy * iphy )
2011-05-09 04:34:44 +04:00
{
2011-06-29 02:05:53 +04:00
BUG_ON ( iphy = = NULL ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 06:14:33 +04:00
if ( ihost - > power_control . requesters [ iphy - > phy_index ] )
2011-07-01 04:38:32 +04:00
ihost - > power_control . phys_waiting - - ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
ihost - > power_control . requesters [ iphy - > phy_index ] = NULL ;
2011-05-09 04:34:44 +04:00
}
2012-01-04 13:32:39 +04:00
static int is_long_cable ( int phy , unsigned char selection_byte )
{
2012-01-04 13:32:49 +04:00
return ! ! ( selection_byte & ( 1 < < phy ) ) ;
2012-01-04 13:32:39 +04:00
}
static int is_medium_cable ( int phy , unsigned char selection_byte )
{
2012-01-04 13:32:49 +04:00
return ! ! ( selection_byte & ( 1 < < ( phy + 4 ) ) ) ;
}
static enum cable_selections decode_selection_byte (
int phy ,
unsigned char selection_byte )
{
return ( ( selection_byte & ( 1 < < phy ) ) ? 1 : 0 )
+ ( selection_byte & ( 1 < < ( phy + 4 ) ) ? 2 : 0 ) ;
}
static unsigned char * to_cable_select ( struct isci_host * ihost )
{
if ( is_cable_select_overridden ( ) )
return ( ( unsigned char * ) & cable_selection_override )
+ ihost - > id ;
else
return & ihost - > oem_parameters . controller . cable_selection_mask ;
}
enum cable_selections decode_cable_selection ( struct isci_host * ihost , int phy )
{
return decode_selection_byte ( phy , * to_cable_select ( ihost ) ) ;
}
char * lookup_cable_names ( enum cable_selections selection )
{
static char * cable_names [ ] = {
[ short_cable ] = " short " ,
[ long_cable ] = " long " ,
[ medium_cable ] = " medium " ,
[ undefined_cable ] = " <undefined, assumed long> " /* bit 0==1 */
} ;
return ( selection < = undefined_cable ) ? cable_names [ selection ]
: cable_names [ undefined_cable ] ;
2012-01-04 13:32:39 +04:00
}
2011-05-09 04:34:44 +04:00
# define AFE_REGISTER_WRITE_DELAY 10
2011-07-01 06:14:33 +04:00
static void sci_controller_afe_initialization ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
2012-01-04 13:32:34 +04:00
struct scu_afe_registers __iomem * afe = & ihost - > scu_registers - > afe ;
2011-07-01 06:14:33 +04:00
const struct sci_oem_params * oem = & ihost - > oem_parameters ;
2011-07-01 22:41:21 +04:00
struct pci_dev * pdev = ihost - > pdev ;
2011-05-09 04:34:44 +04:00
u32 afe_status ;
u32 phy_id ;
2012-01-04 13:32:49 +04:00
unsigned char cable_selection_mask = * to_cable_select ( ihost ) ;
2011-05-09 04:34:44 +04:00
/* Clear DFX Status registers */
2012-01-04 13:32:34 +04:00
writel ( 0x0081000f , & afe - > afe_dfx_master_control0 ) ;
2011-05-09 04:34:44 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
2012-01-04 13:32:39 +04:00
if ( is_b0 ( pdev ) | | is_c0 ( pdev ) | | is_c1 ( pdev ) ) {
2011-05-09 04:34:44 +04:00
/* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
2012-01-04 13:32:34 +04:00
* Timer , PM Stagger Timer
*/
2012-01-04 13:32:39 +04:00
writel ( 0x0007FFFF , & afe - > afe_pmsn_master_control2 ) ;
2011-05-09 04:34:44 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
}
/* Configure bias currents to normal */
2011-07-01 22:41:21 +04:00
if ( is_a2 ( pdev ) )
2012-01-04 13:32:34 +04:00
writel ( 0x00005A00 , & afe - > afe_bias_control ) ;
2011-07-01 22:41:21 +04:00
else if ( is_b0 ( pdev ) | | is_c0 ( pdev ) )
2012-01-04 13:32:34 +04:00
writel ( 0x00005F00 , & afe - > afe_bias_control ) ;
2012-01-04 13:32:39 +04:00
else if ( is_c1 ( pdev ) )
writel ( 0x00005500 , & afe - > afe_bias_control ) ;
2011-05-09 04:34:44 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
/* Enable PLL */
2012-01-04 13:32:39 +04:00
if ( is_a2 ( pdev ) )
2012-01-04 13:32:34 +04:00
writel ( 0x80040908 , & afe - > afe_pll_control0 ) ;
2012-01-04 13:32:39 +04:00
else if ( is_b0 ( pdev ) | | is_c0 ( pdev ) )
writel ( 0x80040A08 , & afe - > afe_pll_control0 ) ;
else if ( is_c1 ( pdev ) ) {
writel ( 0x80000B08 , & afe - > afe_pll_control0 ) ;
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
writel ( 0x00000B08 , & afe - > afe_pll_control0 ) ;
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
writel ( 0x80000B08 , & afe - > afe_pll_control0 ) ;
}
2011-05-09 04:34:44 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
/* Wait for the PLL to lock */
do {
2012-01-04 13:32:34 +04:00
afe_status = readl ( & afe - > afe_common_block_status ) ;
2011-05-09 04:34:44 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
} while ( ( afe_status & 0x00001000 ) = = 0 ) ;
2011-07-01 22:41:21 +04:00
if ( is_a2 ( pdev ) ) {
2012-01-04 13:32:34 +04:00
/* Shorten SAS SNW lock time (RxLock timer value from 76
* us to 50 us )
*/
writel ( 0x7bcc96ad , & afe - > afe_pmsn_master_control0 ) ;
2011-05-09 04:34:44 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
}
for ( phy_id = 0 ; phy_id < SCI_MAX_PHYS ; phy_id + + ) {
2012-06-27 13:05:21 +04:00
struct scu_afe_transceiver __iomem * xcvr = & afe - > scu_afe_xcvr [ phy_id ] ;
2011-05-09 04:34:44 +04:00
const struct sci_phy_oem_params * oem_phy = & oem - > phys [ phy_id ] ;
2012-01-04 13:32:39 +04:00
int cable_length_long =
is_long_cable ( phy_id , cable_selection_mask ) ;
int cable_length_medium =
is_medium_cable ( phy_id , cable_selection_mask ) ;
if ( is_a2 ( pdev ) ) {
/* All defaults, except the Receive Word
* Alignament / Comma Detect Enable . . . . ( 0xe800 )
*/
writel ( 0x00004512 , & xcvr - > afe_xcvr_control0 ) ;
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
2011-05-09 04:34:44 +04:00
2012-01-04 13:32:39 +04:00
writel ( 0x0050100F , & xcvr - > afe_xcvr_control1 ) ;
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
} else if ( is_b0 ( pdev ) ) {
/* Configure transmitter SSC parameters */
2012-01-04 13:32:34 +04:00
writel ( 0x00030000 , & xcvr - > afe_tx_ssc_control ) ;
2011-05-09 04:34:44 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
2011-07-01 22:41:21 +04:00
} else if ( is_c0 ( pdev ) ) {
2012-01-04 13:32:39 +04:00
/* Configure transmitter SSC parameters */
writel ( 0x00010202 , & xcvr - > afe_tx_ssc_control ) ;
2011-06-02 02:31:03 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
2012-01-04 13:32:34 +04:00
/* All defaults, except the Receive Word
* Alignament / Comma Detect Enable . . . . ( 0xe800 )
*/
2012-01-04 13:32:39 +04:00
writel ( 0x00014500 , & xcvr - > afe_xcvr_control0 ) ;
2011-06-02 02:31:03 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
2012-01-04 13:32:39 +04:00
} else if ( is_c1 ( pdev ) ) {
/* Configure transmitter SSC parameters */
writel ( 0x00010202 , & xcvr - > afe_tx_ssc_control ) ;
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
2012-01-04 13:32:34 +04:00
/* All defaults, except the Receive Word
* Alignament / Comma Detect Enable . . . . ( 0xe800 )
*/
2012-01-04 13:32:39 +04:00
writel ( 0x0001C500 , & xcvr - > afe_xcvr_control0 ) ;
2011-05-09 04:34:44 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
}
2012-01-04 13:32:39 +04:00
/* Power up TX and RX out from power down (PWRDNTX and
* PWRDNRX ) & increase TX int & ext bias 20 % . . . . ( 0xe85c )
2012-01-04 13:32:34 +04:00
*/
2011-07-01 22:41:21 +04:00
if ( is_a2 ( pdev ) )
2012-01-04 13:32:34 +04:00
writel ( 0x000003F0 , & xcvr - > afe_channel_control ) ;
2011-07-01 22:41:21 +04:00
else if ( is_b0 ( pdev ) ) {
2012-01-04 13:32:34 +04:00
writel ( 0x000003D7 , & xcvr - > afe_channel_control ) ;
2011-06-02 02:31:03 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
2012-01-04 13:32:39 +04:00
2012-01-04 13:32:34 +04:00
writel ( 0x000003D4 , & xcvr - > afe_channel_control ) ;
2012-01-04 13:32:39 +04:00
} else if ( is_c0 ( pdev ) ) {
2012-01-04 13:32:34 +04:00
writel ( 0x000001E7 , & xcvr - > afe_channel_control ) ;
2011-05-09 04:34:44 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
2012-01-04 13:32:39 +04:00
2012-01-04 13:32:34 +04:00
writel ( 0x000001E4 , & xcvr - > afe_channel_control ) ;
2012-01-04 13:32:39 +04:00
} else if ( is_c1 ( pdev ) ) {
writel ( cable_length_long ? 0x000002F7 : 0x000001F7 ,
& xcvr - > afe_channel_control ) ;
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
writel ( cable_length_long ? 0x000002F4 : 0x000001F4 ,
& xcvr - > afe_channel_control ) ;
2011-05-09 04:34:44 +04:00
}
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
2011-07-01 22:41:21 +04:00
if ( is_a2 ( pdev ) ) {
2011-05-09 04:34:44 +04:00
/* Enable TX equalization (0xe824) */
2012-01-04 13:32:34 +04:00
writel ( 0x00040000 , & xcvr - > afe_tx_control ) ;
2011-05-09 04:34:44 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
}
2012-01-04 13:32:39 +04:00
if ( is_a2 ( pdev ) | | is_b0 ( pdev ) )
/* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0,
* TPD = 0x0 ( TX Power On ) , RDD = 0x0 ( RX Detect
* Enabled ) . . . . ( 0xe800 )
*/
writel ( 0x00004100 , & xcvr - > afe_xcvr_control0 ) ;
else if ( is_c0 ( pdev ) )
writel ( 0x00014100 , & xcvr - > afe_xcvr_control0 ) ;
else if ( is_c1 ( pdev ) )
writel ( 0x0001C100 , & xcvr - > afe_xcvr_control0 ) ;
2011-05-09 04:34:44 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
/* Leave DFE/FFE on */
2011-07-01 22:41:21 +04:00
if ( is_a2 ( pdev ) )
2012-01-04 13:32:34 +04:00
writel ( 0x3F11103F , & xcvr - > afe_rx_ssc_control0 ) ;
2011-07-01 22:41:21 +04:00
else if ( is_b0 ( pdev ) ) {
2012-01-04 13:32:34 +04:00
writel ( 0x3F11103F , & xcvr - > afe_rx_ssc_control0 ) ;
2011-05-09 04:34:44 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
2011-06-02 02:31:03 +04:00
/* Enable TX equalization (0xe824) */
2012-01-04 13:32:34 +04:00
writel ( 0x00040000 , & xcvr - > afe_tx_control ) ;
2012-01-04 13:32:39 +04:00
} else if ( is_c0 ( pdev ) ) {
writel ( 0x01400C0F , & xcvr - > afe_rx_ssc_control1 ) ;
2011-06-02 02:31:03 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
2012-01-04 13:32:34 +04:00
writel ( 0x3F6F103F , & xcvr - > afe_rx_ssc_control0 ) ;
2011-06-02 02:31:03 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
2012-01-04 13:32:39 +04:00
/* Enable TX equalization (0xe824) */
writel ( 0x00040000 , & xcvr - > afe_tx_control ) ;
} else if ( is_c1 ( pdev ) ) {
writel ( cable_length_long ? 0x01500C0C :
cable_length_medium ? 0x01400C0D : 0x02400C0D ,
& xcvr - > afe_xcvr_control1 ) ;
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
writel ( 0x000003E0 , & xcvr - > afe_dfx_rx_control1 ) ;
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
writel ( cable_length_long ? 0x33091C1F :
cable_length_medium ? 0x3315181F : 0x2B17161F ,
& xcvr - > afe_rx_ssc_control0 ) ;
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
2011-06-02 02:31:03 +04:00
2011-05-09 04:34:44 +04:00
/* Enable TX equalization (0xe824) */
2012-01-04 13:32:34 +04:00
writel ( 0x00040000 , & xcvr - > afe_tx_control ) ;
2011-05-09 04:34:44 +04:00
}
2011-06-02 02:31:03 +04:00
2011-05-09 04:34:44 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
2012-01-04 13:32:34 +04:00
writel ( oem_phy - > afe_tx_amp_control0 , & xcvr - > afe_tx_amp_control0 ) ;
2011-05-09 04:34:44 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
2012-01-04 13:32:34 +04:00
writel ( oem_phy - > afe_tx_amp_control1 , & xcvr - > afe_tx_amp_control1 ) ;
2011-05-09 04:34:44 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
2012-01-04 13:32:34 +04:00
writel ( oem_phy - > afe_tx_amp_control2 , & xcvr - > afe_tx_amp_control2 ) ;
2011-05-09 04:34:44 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
2012-01-04 13:32:34 +04:00
writel ( oem_phy - > afe_tx_amp_control3 , & xcvr - > afe_tx_amp_control3 ) ;
2011-05-09 04:34:44 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
}
/* Transfer control to the PEs */
2012-01-04 13:32:34 +04:00
writel ( 0x00010f00 , & afe - > afe_dfx_master_control0 ) ;
2011-05-09 04:34:44 +04:00
udelay ( AFE_REGISTER_WRITE_DELAY ) ;
}
2011-07-01 06:14:33 +04:00
static void sci_controller_initialize_power_control ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
2011-07-01 04:38:32 +04:00
sci_init_timer ( & ihost - > power_control . timer , power_control_timeout ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
memset ( ihost - > power_control . requesters , 0 ,
sizeof ( ihost - > power_control . requesters ) ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
ihost - > power_control . phys_waiting = 0 ;
ihost - > power_control . phys_granted_power = 0 ;
2011-05-09 04:34:44 +04:00
}
2011-07-01 06:14:33 +04:00
static enum sci_status sci_controller_initialize ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
2011-07-01 04:38:32 +04:00
struct sci_base_state_machine * sm = & ihost - > sm ;
2011-06-02 03:00:01 +04:00
enum sci_status result = SCI_FAILURE ;
unsigned long i , state , val ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
if ( ihost - > sm . current_state_id ! = SCIC_RESET ) {
2012-02-10 13:05:43 +04:00
dev_warn ( & ihost - > pdev - > dev , " %s invalid state: %d \n " ,
__func__ , ihost - > sm . current_state_id ) ;
2011-05-09 04:34:44 +04:00
return SCI_FAILURE_INVALID_STATE ;
}
2011-06-02 04:10:43 +04:00
sci_change_state ( sm , SCIC_INITIALIZING ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
sci_init_timer ( & ihost - > phy_timer , phy_startup_timeout ) ;
2011-05-20 07:26:02 +04:00
2011-07-01 04:38:32 +04:00
ihost - > next_phy_to_start = 0 ;
ihost - > phy_startup_timer_pending = false ;
2011-05-09 04:34:44 +04:00
2011-07-01 06:14:33 +04:00
sci_controller_initialize_power_control ( ihost ) ;
2011-05-09 04:34:44 +04:00
/*
* There is nothing to do here for B0 since we do not have to
* program the AFE registers .
* / @ todo The AFE settings are supposed to be correct for the B0 but
* / presently they seem to be wrong . */
2011-07-01 06:14:33 +04:00
sci_controller_afe_initialization ( ihost ) ;
2011-05-09 04:34:44 +04:00
2011-06-02 03:00:01 +04:00
/* Take the hardware out of reset */
2011-07-01 04:38:32 +04:00
writel ( 0 , & ihost - > smu_registers - > soft_reset_control ) ;
2011-05-09 04:34:44 +04:00
2011-06-02 03:00:01 +04:00
/*
* / @ todo Provide meaningfull error code for hardware failure
* result = SCI_FAILURE_CONTROLLER_HARDWARE ; */
for ( i = 100 ; i > = 1 ; i - - ) {
u32 status ;
2011-05-09 04:34:44 +04:00
2011-06-02 03:00:01 +04:00
/* Loop until the hardware reports success */
udelay ( SCU_CONTEXT_RAM_INIT_STALL_TIME ) ;
2011-07-01 04:38:32 +04:00
status = readl ( & ihost - > smu_registers - > control_status ) ;
2011-05-09 04:34:44 +04:00
2011-06-02 03:00:01 +04:00
if ( ( status & SCU_RAM_INIT_COMPLETED ) = = SCU_RAM_INIT_COMPLETED )
break ;
}
if ( i = = 0 )
goto out ;
2011-05-09 04:34:44 +04:00
2011-06-02 03:00:01 +04:00
/*
* Determine what are the actaul device capacities that the
* hardware will support */
2011-07-01 04:38:32 +04:00
val = readl ( & ihost - > smu_registers - > device_context_capacity ) ;
2011-05-09 04:34:44 +04:00
2011-06-02 03:00:01 +04:00
/* Record the smaller of the two capacity values */
2011-07-01 04:38:32 +04:00
ihost - > logical_port_entries = min ( smu_max_ports ( val ) , SCI_MAX_PORTS ) ;
ihost - > task_context_entries = min ( smu_max_task_contexts ( val ) , SCI_MAX_IO_REQUESTS ) ;
ihost - > remote_node_entries = min ( smu_max_rncs ( val ) , SCI_MAX_REMOTE_DEVICES ) ;
2011-05-09 04:34:44 +04:00
2011-06-02 03:00:01 +04:00
/*
* Make all PEs that are unassigned match up with the
* logical ports
*/
2011-07-01 04:38:32 +04:00
for ( i = 0 ; i < ihost - > logical_port_entries ; i + + ) {
2011-06-02 03:00:01 +04:00
struct scu_port_task_scheduler_group_registers __iomem
2011-07-01 04:38:32 +04:00
* ptsg = & ihost - > scu_registers - > peg0 . ptsg ;
2011-05-09 04:34:44 +04:00
2011-06-02 03:00:01 +04:00
writel ( i , & ptsg - > protocol_engine [ i ] ) ;
2011-05-09 04:34:44 +04:00
}
/* Initialize hardware PCI Relaxed ordering in DMA engines */
2011-07-01 04:38:32 +04:00
val = readl ( & ihost - > scu_registers - > sdma . pdma_configuration ) ;
2011-06-02 03:00:01 +04:00
val | = SCU_PDMACR_GEN_BIT ( PCI_RELAXED_ORDERING_ENABLE ) ;
2011-07-01 04:38:32 +04:00
writel ( val , & ihost - > scu_registers - > sdma . pdma_configuration ) ;
2011-06-02 03:00:01 +04:00
2011-07-01 04:38:32 +04:00
val = readl ( & ihost - > scu_registers - > sdma . cdma_configuration ) ;
2011-06-02 03:00:01 +04:00
val | = SCU_CDMACR_GEN_BIT ( PCI_RELAXED_ORDERING_ENABLE ) ;
2011-07-01 04:38:32 +04:00
writel ( val , & ihost - > scu_registers - > sdma . cdma_configuration ) ;
2011-05-09 04:34:44 +04:00
/*
* Initialize the PHYs before the PORTs because the PHY registers
* are accessed during the port initialization .
*/
2011-06-02 03:00:01 +04:00
for ( i = 0 ; i < SCI_MAX_PHYS ; i + + ) {
2011-07-01 06:14:33 +04:00
result = sci_phy_initialize ( & ihost - > phys [ i ] ,
& ihost - > scu_registers - > peg0 . pe [ i ] . tl ,
& ihost - > scu_registers - > peg0 . pe [ i ] . ll ) ;
2011-06-02 03:00:01 +04:00
if ( result ! = SCI_SUCCESS )
goto out ;
2011-05-09 04:34:44 +04:00
}
2011-07-01 04:38:32 +04:00
for ( i = 0 ; i < ihost - > logical_port_entries ; i + + ) {
2011-07-01 06:14:33 +04:00
struct isci_port * iport = & ihost - > ports [ i ] ;
2011-06-02 03:00:01 +04:00
2011-07-01 06:14:33 +04:00
iport - > port_task_scheduler_registers = & ihost - > scu_registers - > peg0 . ptsg . port [ i ] ;
iport - > port_pe_configuration_register = & ihost - > scu_registers - > peg0 . ptsg . protocol_engine [ 0 ] ;
iport - > viit_registers = & ihost - > scu_registers - > peg0 . viit [ i ] ;
2011-05-09 04:34:44 +04:00
}
2011-07-01 06:14:33 +04:00
result = sci_port_configuration_agent_initialize ( ihost , & ihost - > port_agent ) ;
2011-05-09 04:34:44 +04:00
2011-06-02 03:00:01 +04:00
out :
2011-05-09 04:34:44 +04:00
/* Advance the controller state machine */
if ( result = = SCI_SUCCESS )
2011-06-02 04:10:43 +04:00
state = SCIC_INITIALIZED ;
2011-05-09 04:34:44 +04:00
else
2011-06-02 04:10:43 +04:00
state = SCIC_FAILED ;
sci_change_state ( sm , state ) ;
2011-05-09 04:34:44 +04:00
return result ;
}
2012-02-16 01:58:42 +04:00
static int sci_controller_dma_alloc ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
2011-07-01 04:38:32 +04:00
struct device * dev = & ihost - > pdev - > dev ;
2011-06-02 03:00:01 +04:00
size_t size ;
2012-02-16 01:58:42 +04:00
int i ;
/* detect re-initialization */
if ( ihost - > completion_queue )
return 0 ;
2011-05-09 04:34:44 +04:00
2011-06-02 03:00:01 +04:00
size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof ( u32 ) ;
2012-02-16 01:58:42 +04:00
ihost - > completion_queue = dmam_alloc_coherent ( dev , size , & ihost - > cq_dma ,
GFP_KERNEL ) ;
2011-07-01 04:38:32 +04:00
if ( ! ihost - > completion_queue )
2011-05-09 04:34:44 +04:00
return - ENOMEM ;
2011-07-01 04:38:32 +04:00
size = ihost - > remote_node_entries * sizeof ( union scu_remote_node_context ) ;
2012-02-16 01:58:42 +04:00
ihost - > remote_node_context_table = dmam_alloc_coherent ( dev , size , & ihost - > rnc_dma ,
2011-07-01 06:14:33 +04:00
GFP_KERNEL ) ;
2012-02-16 01:58:42 +04:00
2011-07-01 04:38:32 +04:00
if ( ! ihost - > remote_node_context_table )
2011-05-09 04:34:44 +04:00
return - ENOMEM ;
2011-07-01 04:38:32 +04:00
size = ihost - > task_context_entries * sizeof ( struct scu_task_context ) ,
2012-02-16 01:58:42 +04:00
ihost - > task_context_table = dmam_alloc_coherent ( dev , size , & ihost - > tc_dma ,
GFP_KERNEL ) ;
2011-07-01 04:38:32 +04:00
if ( ! ihost - > task_context_table )
2011-05-09 04:34:44 +04:00
return - ENOMEM ;
2012-02-16 01:58:42 +04:00
size = SCI_UFI_TOTAL_SIZE ;
ihost - > ufi_buf = dmam_alloc_coherent ( dev , size , & ihost - > ufi_dma , GFP_KERNEL ) ;
if ( ! ihost - > ufi_buf )
return - ENOMEM ;
for ( i = 0 ; i < SCI_MAX_IO_REQUESTS ; i + + ) {
struct isci_request * ireq ;
dma_addr_t dma ;
ireq = dmam_alloc_coherent ( dev , sizeof ( * ireq ) , & dma , GFP_KERNEL ) ;
if ( ! ireq )
return - ENOMEM ;
ireq - > tc = & ihost - > task_context_table [ i ] ;
ireq - > owning_controller = ihost ;
ireq - > request_daddr = dma ;
ireq - > isci_host = ihost ;
ihost - > reqs [ i ] = ireq ;
}
return 0 ;
}
static int sci_controller_mem_init ( struct isci_host * ihost )
{
int err = sci_controller_dma_alloc ( ihost ) ;
2011-05-09 04:34:44 +04:00
2011-06-02 03:00:01 +04:00
if ( err )
return err ;
2011-05-09 04:34:44 +04:00
2012-02-16 01:58:42 +04:00
writel ( lower_32_bits ( ihost - > cq_dma ) , & ihost - > smu_registers - > completion_queue_lower ) ;
writel ( upper_32_bits ( ihost - > cq_dma ) , & ihost - > smu_registers - > completion_queue_upper ) ;
writel ( lower_32_bits ( ihost - > rnc_dma ) , & ihost - > smu_registers - > remote_node_context_lower ) ;
writel ( upper_32_bits ( ihost - > rnc_dma ) , & ihost - > smu_registers - > remote_node_context_upper ) ;
writel ( lower_32_bits ( ihost - > tc_dma ) , & ihost - > smu_registers - > host_task_table_lower ) ;
writel ( upper_32_bits ( ihost - > tc_dma ) , & ihost - > smu_registers - > host_task_table_upper ) ;
sci_unsolicited_frame_control_construct ( ihost ) ;
2011-05-09 04:34:44 +04:00
/*
* Inform the silicon as to the location of the UF headers and
* address table .
*/
2011-07-01 04:38:32 +04:00
writel ( lower_32_bits ( ihost - > uf_control . headers . physical_address ) ,
& ihost - > scu_registers - > sdma . uf_header_base_address_lower ) ;
writel ( upper_32_bits ( ihost - > uf_control . headers . physical_address ) ,
& ihost - > scu_registers - > sdma . uf_header_base_address_upper ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
writel ( lower_32_bits ( ihost - > uf_control . address_table . physical_address ) ,
& ihost - > scu_registers - > sdma . uf_address_table_lower ) ;
writel ( upper_32_bits ( ihost - > uf_control . address_table . physical_address ) ,
& ihost - > scu_registers - > sdma . uf_address_table_upper ) ;
2011-05-09 04:34:44 +04:00
return 0 ;
}
2012-02-16 01:58:42 +04:00
/**
* isci_host_init - ( re - ) initialize hardware and internal ( private ) state
* @ ihost : host to init
*
* Any public facing objects ( like asd_sas_port , and asd_sas_phys ) , or
* one - time initialization objects like locks and waitqueues , are
* not touched ( they are initialized in isci_host_alloc )
*/
2011-07-01 04:38:32 +04:00
int isci_host_init ( struct isci_host * ihost )
2011-07-03 09:56:22 +04:00
{
2012-02-16 01:58:42 +04:00
int i , err ;
2011-07-03 09:56:22 +04:00
enum sci_status status ;
2012-03-02 05:06:24 +04:00
spin_lock_irq ( & ihost - > scic_lock ) ;
2012-02-16 01:58:42 +04:00
status = sci_controller_construct ( ihost , scu_base ( ihost ) , smu_base ( ihost ) ) ;
2012-03-02 05:06:24 +04:00
spin_unlock_irq ( & ihost - > scic_lock ) ;
2011-07-03 09:56:22 +04:00
if ( status ! = SCI_SUCCESS ) {
2011-07-01 04:38:32 +04:00
dev_err ( & ihost - > pdev - > dev ,
2011-07-01 06:14:33 +04:00
" %s: sci_controller_construct failed - status = %x \n " ,
2011-07-03 09:56:22 +04:00
__func__ ,
status ) ;
2011-02-22 12:27:03 +03:00
return - ENODEV ;
2011-07-03 09:56:22 +04:00
}
2011-07-01 04:38:32 +04:00
spin_lock_irq ( & ihost - > scic_lock ) ;
2011-07-01 06:14:33 +04:00
status = sci_controller_initialize ( ihost ) ;
2011-07-01 04:38:32 +04:00
spin_unlock_irq ( & ihost - > scic_lock ) ;
2011-07-03 09:56:22 +04:00
if ( status ! = SCI_SUCCESS ) {
2011-07-01 04:38:32 +04:00
dev_warn ( & ihost - > pdev - > dev ,
2011-07-01 06:14:33 +04:00
" %s: sci_controller_initialize failed - "
2011-07-03 09:56:22 +04:00
" status = 0x%x \n " ,
__func__ , status ) ;
2011-02-22 12:27:03 +03:00
return - ENODEV ;
2011-07-03 09:56:22 +04:00
}
2011-07-01 06:14:33 +04:00
err = sci_controller_mem_init ( ihost ) ;
2011-07-03 09:56:22 +04:00
if ( err )
2011-02-22 12:27:03 +03:00
return err ;
2011-07-03 09:56:22 +04:00
2011-09-02 08:18:31 +04:00
/* enable sgpio */
writel ( 1 , & ihost - > scu_registers - > peg0 . sgpio . interface_control ) ;
for ( i = 0 ; i < isci_gpio_count ( ihost ) ; i + + )
writel ( SGPIO_HW_CONTROL , & ihost - > scu_registers - > peg0 . sgpio . output_data_select [ i ] ) ;
writel ( 0 , & ihost - > scu_registers - > peg0 . sgpio . vendor_specific_code ) ;
2011-02-22 12:27:03 +03:00
return 0 ;
2011-07-03 09:56:22 +04:00
}
2011-05-09 04:34:44 +04:00
2011-07-01 06:14:33 +04:00
void sci_controller_link_up ( struct isci_host * ihost , struct isci_port * iport ,
struct isci_phy * iphy )
2011-05-09 04:34:44 +04:00
{
2011-07-01 04:38:32 +04:00
switch ( ihost - > sm . current_state_id ) {
2011-06-02 04:10:43 +04:00
case SCIC_STARTING :
2011-07-01 04:38:32 +04:00
sci_del_timer ( & ihost - > phy_timer ) ;
ihost - > phy_startup_timer_pending = false ;
ihost - > port_agent . link_up_handler ( ihost , & ihost - > port_agent ,
2011-07-01 06:14:33 +04:00
iport , iphy ) ;
sci_controller_start_next_phy ( ihost ) ;
2011-05-09 04:34:44 +04:00
break ;
2011-06-02 04:10:43 +04:00
case SCIC_READY :
2011-07-01 04:38:32 +04:00
ihost - > port_agent . link_up_handler ( ihost , & ihost - > port_agent ,
2011-07-01 06:14:33 +04:00
iport , iphy ) ;
2011-05-09 04:34:44 +04:00
break ;
default :
2011-07-01 04:38:32 +04:00
dev_dbg ( & ihost - > pdev - > dev ,
2011-05-09 04:34:44 +04:00
" %s: SCIC Controller linkup event from phy %d in "
2011-06-29 02:05:53 +04:00
" unexpected state %d \n " , __func__ , iphy - > phy_index ,
2011-07-01 04:38:32 +04:00
ihost - > sm . current_state_id ) ;
2011-05-09 04:34:44 +04:00
}
}
2011-07-01 06:14:33 +04:00
void sci_controller_link_down ( struct isci_host * ihost , struct isci_port * iport ,
struct isci_phy * iphy )
2011-05-09 04:34:44 +04:00
{
2011-07-01 04:38:32 +04:00
switch ( ihost - > sm . current_state_id ) {
2011-06-02 04:10:43 +04:00
case SCIC_STARTING :
case SCIC_READY :
2011-07-01 04:38:32 +04:00
ihost - > port_agent . link_down_handler ( ihost , & ihost - > port_agent ,
2011-06-30 00:09:25 +04:00
iport , iphy ) ;
2011-05-09 04:34:44 +04:00
break ;
default :
2011-07-01 04:38:32 +04:00
dev_dbg ( & ihost - > pdev - > dev ,
2011-05-09 04:34:44 +04:00
" %s: SCIC Controller linkdown event from phy %d in "
" unexpected state %d \n " ,
__func__ ,
2011-06-29 02:05:53 +04:00
iphy - > phy_index ,
2011-07-01 04:38:32 +04:00
ihost - > sm . current_state_id ) ;
2011-05-09 04:34:44 +04:00
}
}
2012-02-23 13:12:10 +04:00
bool sci_controller_has_remote_devices_stopping ( struct isci_host * ihost )
2011-05-09 04:34:44 +04:00
{
u32 index ;
2011-07-01 04:38:32 +04:00
for ( index = 0 ; index < ihost - > remote_node_entries ; index + + ) {
if ( ( ihost - > device_table [ index ] ! = NULL ) & &
( ihost - > device_table [ index ] - > sm . current_state_id = = SCI_DEV_STOPPING ) )
2011-05-09 04:34:44 +04:00
return true ;
}
return false ;
}
2011-07-01 06:14:33 +04:00
void sci_controller_remote_device_stopped ( struct isci_host * ihost ,
struct isci_remote_device * idev )
2011-05-09 04:34:44 +04:00
{
2011-07-01 04:38:32 +04:00
if ( ihost - > sm . current_state_id ! = SCIC_STOPPING ) {
dev_dbg ( & ihost - > pdev - > dev ,
2011-05-09 04:34:44 +04:00
" SCIC Controller 0x%p remote device stopped event "
" from device 0x%p in unexpected state %d \n " ,
2011-07-01 04:38:32 +04:00
ihost , idev ,
ihost - > sm . current_state_id ) ;
2011-05-09 04:34:44 +04:00
return ;
}
2011-07-01 06:14:33 +04:00
if ( ! sci_controller_has_remote_devices_stopping ( ihost ) )
2012-02-23 13:12:10 +04:00
isci_host_stop_complete ( ihost ) ;
2011-05-09 04:34:44 +04:00
}
2011-07-01 06:14:33 +04:00
void sci_controller_post_request ( struct isci_host * ihost , u32 request )
2011-05-09 04:34:44 +04:00
{
2011-07-01 06:14:33 +04:00
dev_dbg ( & ihost - > pdev - > dev , " %s[%d]: %#x \n " ,
__func__ , ihost - > id , request ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
writel ( request , & ihost - > smu_registers - > post_context_port ) ;
2011-05-09 04:34:44 +04:00
}
2011-07-01 06:14:33 +04:00
struct isci_request * sci_request_by_tag ( struct isci_host * ihost , u16 io_tag )
2011-05-09 04:34:44 +04:00
{
u16 task_index ;
u16 task_sequence ;
2011-06-09 22:06:58 +04:00
task_index = ISCI_TAG_TCI ( io_tag ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
if ( task_index < ihost - > task_context_entries ) {
struct isci_request * ireq = ihost - > reqs [ task_index ] ;
2011-06-18 01:18:39 +04:00
if ( test_bit ( IREQ_ACTIVE , & ireq - > flags ) ) {
2011-06-09 22:06:58 +04:00
task_sequence = ISCI_TAG_SEQ ( io_tag ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
if ( task_sequence = = ihost - > io_request_sequence [ task_index ] )
2011-06-28 01:57:03 +04:00
return ireq ;
2011-05-09 04:34:44 +04:00
}
}
return NULL ;
}
/**
* This method allocates remote node index and the reserves the remote node
* context space for use . This method can fail if there are no more remote
* node index available .
* @ scic : This is the controller object which contains the set of
* free remote node ids
* @ sci_dev : This is the device object which is requesting the a remote node
* id
* @ node_id : This is the remote node id that is assinged to the device if one
* is available
*
* enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
* node index available .
*/
2011-07-01 06:14:33 +04:00
enum sci_status sci_controller_allocate_remote_node_context ( struct isci_host * ihost ,
struct isci_remote_device * idev ,
u16 * node_id )
2011-05-09 04:34:44 +04:00
{
u16 node_index ;
2011-07-01 06:14:33 +04:00
u32 remote_node_count = sci_remote_device_node_count ( idev ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 06:14:33 +04:00
node_index = sci_remote_node_table_allocate_remote_node (
2011-07-01 04:38:32 +04:00
& ihost - > available_remote_nodes , remote_node_count
2011-05-09 04:34:44 +04:00
) ;
if ( node_index ! = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX ) {
2011-07-01 04:38:32 +04:00
ihost - > device_table [ node_index ] = idev ;
2011-05-09 04:34:44 +04:00
* node_id = node_index ;
return SCI_SUCCESS ;
}
return SCI_FAILURE_INSUFFICIENT_RESOURCES ;
}
2011-07-01 06:14:33 +04:00
void sci_controller_free_remote_node_context ( struct isci_host * ihost ,
struct isci_remote_device * idev ,
u16 node_id )
2011-05-09 04:34:44 +04:00
{
2011-07-01 06:14:33 +04:00
u32 remote_node_count = sci_remote_device_node_count ( idev ) ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
if ( ihost - > device_table [ node_id ] = = idev ) {
ihost - > device_table [ node_id ] = NULL ;
2011-05-09 04:34:44 +04:00
2011-07-01 06:14:33 +04:00
sci_remote_node_table_release_remote_node_index (
2011-07-01 04:38:32 +04:00
& ihost - > available_remote_nodes , remote_node_count , node_id
2011-05-09 04:34:44 +04:00
) ;
}
}
2011-07-01 06:14:33 +04:00
void sci_controller_copy_sata_response ( void * response_buffer ,
void * frame_header ,
void * frame_buffer )
2011-05-09 04:34:44 +04:00
{
2011-07-01 06:14:33 +04:00
/* XXX type safety? */
2011-05-09 04:34:44 +04:00
memcpy ( response_buffer , frame_header , sizeof ( u32 ) ) ;
memcpy ( response_buffer + sizeof ( u32 ) ,
frame_buffer ,
sizeof ( struct dev_to_host_fis ) - sizeof ( u32 ) ) ;
}
2011-07-01 06:14:33 +04:00
void sci_controller_release_frame ( struct isci_host * ihost , u32 frame_index )
2011-05-09 04:34:44 +04:00
{
2011-07-01 06:14:33 +04:00
if ( sci_unsolicited_frame_control_release_frame ( & ihost - > uf_control , frame_index ) )
2011-07-01 04:38:32 +04:00
writel ( ihost - > uf_control . get ,
& ihost - > scu_registers - > sdma . unsolicited_frame_get_pointer ) ;
2011-05-09 04:34:44 +04:00
}
2011-06-29 00:47:09 +04:00
void isci_tci_free ( struct isci_host * ihost , u16 tci )
{
u16 tail = ihost - > tci_tail & ( SCI_MAX_IO_REQUESTS - 1 ) ;
ihost - > tci_pool [ tail ] = tci ;
ihost - > tci_tail = tail + 1 ;
}
static u16 isci_tci_alloc ( struct isci_host * ihost )
{
u16 head = ihost - > tci_head & ( SCI_MAX_IO_REQUESTS - 1 ) ;
u16 tci = ihost - > tci_pool [ head ] ;
ihost - > tci_head = head + 1 ;
return tci ;
}
static u16 isci_tci_space ( struct isci_host * ihost )
{
return CIRC_SPACE ( ihost - > tci_head , ihost - > tci_tail , SCI_MAX_IO_REQUESTS ) ;
}
u16 isci_alloc_tag ( struct isci_host * ihost )
{
if ( isci_tci_space ( ihost ) ) {
u16 tci = isci_tci_alloc ( ihost ) ;
2011-07-01 04:38:32 +04:00
u8 seq = ihost - > io_request_sequence [ tci ] ;
2011-06-29 00:47:09 +04:00
return ISCI_TAG ( seq , tci ) ;
}
return SCI_CONTROLLER_INVALID_IO_TAG ;
}
enum sci_status isci_free_tag ( struct isci_host * ihost , u16 io_tag )
{
u16 tci = ISCI_TAG_TCI ( io_tag ) ;
u16 seq = ISCI_TAG_SEQ ( io_tag ) ;
/* prevent tail from passing head */
if ( isci_tci_active ( ihost ) = = 0 )
return SCI_FAILURE_INVALID_IO_TAG ;
2011-07-01 04:38:32 +04:00
if ( seq = = ihost - > io_request_sequence [ tci ] ) {
ihost - > io_request_sequence [ tci ] = ( seq + 1 ) & ( SCI_MAX_SEQ - 1 ) ;
2011-06-29 00:47:09 +04:00
isci_tci_free ( ihost , tci ) ;
return SCI_SUCCESS ;
}
return SCI_FAILURE_INVALID_IO_TAG ;
}
2011-07-01 06:14:33 +04:00
enum sci_status sci_controller_start_io ( struct isci_host * ihost ,
struct isci_remote_device * idev ,
struct isci_request * ireq )
2011-05-09 04:34:44 +04:00
{
enum sci_status status ;
2011-07-01 04:38:32 +04:00
if ( ihost - > sm . current_state_id ! = SCIC_READY ) {
2012-02-10 13:05:43 +04:00
dev_warn ( & ihost - > pdev - > dev , " %s invalid state: %d \n " ,
__func__ , ihost - > sm . current_state_id ) ;
2011-05-09 04:34:44 +04:00
return SCI_FAILURE_INVALID_STATE ;
}
2011-07-01 06:14:33 +04:00
status = sci_remote_device_start_io ( ihost , idev , ireq ) ;
2011-05-09 04:34:44 +04:00
if ( status ! = SCI_SUCCESS )
return status ;
2011-06-28 01:57:03 +04:00
set_bit ( IREQ_ACTIVE , & ireq - > flags ) ;
2011-07-01 13:25:15 +04:00
sci_controller_post_request ( ihost , ireq - > post_context ) ;
2011-05-09 04:34:44 +04:00
return SCI_SUCCESS ;
}
2011-07-01 06:14:33 +04:00
enum sci_status sci_controller_terminate_request ( struct isci_host * ihost ,
struct isci_remote_device * idev ,
struct isci_request * ireq )
2011-05-09 04:34:44 +04:00
{
2011-07-01 06:14:33 +04:00
/* terminate an ongoing (i.e. started) core IO request. This does not
* abort the IO request at the target , but rather removes the IO
* request from the host controller .
*/
2011-05-09 04:34:44 +04:00
enum sci_status status ;
2011-07-01 04:38:32 +04:00
if ( ihost - > sm . current_state_id ! = SCIC_READY ) {
2012-02-10 13:05:43 +04:00
dev_warn ( & ihost - > pdev - > dev , " %s invalid state: %d \n " ,
__func__ , ihost - > sm . current_state_id ) ;
2011-05-09 04:34:44 +04:00
return SCI_FAILURE_INVALID_STATE ;
}
2011-07-01 06:14:33 +04:00
status = sci_io_request_terminate ( ireq ) ;
2012-03-09 10:41:54 +04:00
dev_dbg ( & ihost - > pdev - > dev , " %s: status=%d; ireq=%p; flags=%lx \n " ,
__func__ , status , ireq , ireq - > flags ) ;
2012-03-09 10:41:50 +04:00
if ( ( status = = SCI_SUCCESS ) & &
! test_bit ( IREQ_PENDING_ABORT , & ireq - > flags ) & &
! test_and_set_bit ( IREQ_TC_ABORT_POSTED , & ireq - > flags ) ) {
/* Utilize the original post context command and or in the
* POST_TC_ABORT request sub - type .
*/
sci_controller_post_request (
ihost , ireq - > post_context |
SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT ) ;
}
return status ;
2011-05-09 04:34:44 +04:00
}
/**
2011-07-01 06:14:33 +04:00
* sci_controller_complete_io ( ) - This method will perform core specific
2011-05-09 04:34:44 +04:00
* completion operations for an IO request . After this method is invoked ,
* the user should consider the IO request as invalid until it is properly
* reused ( i . e . re - constructed ) .
2011-07-01 06:14:33 +04:00
* @ ihost : The handle to the controller object for which to complete the
2011-05-09 04:34:44 +04:00
* IO request .
2011-07-01 06:14:33 +04:00
* @ idev : The handle to the remote device object for which to complete
2011-05-09 04:34:44 +04:00
* the IO request .
2011-07-01 06:14:33 +04:00
* @ ireq : the handle to the io request object to complete .
2011-05-09 04:34:44 +04:00
*/
2011-07-01 06:14:33 +04:00
enum sci_status sci_controller_complete_io ( struct isci_host * ihost ,
struct isci_remote_device * idev ,
struct isci_request * ireq )
2011-05-09 04:34:44 +04:00
{
enum sci_status status ;
u16 index ;
2011-07-01 04:38:32 +04:00
switch ( ihost - > sm . current_state_id ) {
2011-06-02 04:10:43 +04:00
case SCIC_STOPPING :
2011-05-09 04:34:44 +04:00
/* XXX: Implement this function */
return SCI_FAILURE ;
2011-06-02 04:10:43 +04:00
case SCIC_READY :
2011-07-01 06:14:33 +04:00
status = sci_remote_device_complete_io ( ihost , idev , ireq ) ;
2011-05-09 04:34:44 +04:00
if ( status ! = SCI_SUCCESS )
return status ;
2011-06-28 01:57:03 +04:00
index = ISCI_TAG_TCI ( ireq - > io_tag ) ;
clear_bit ( IREQ_ACTIVE , & ireq - > flags ) ;
2011-05-09 04:34:44 +04:00
return SCI_SUCCESS ;
default :
2012-02-10 13:05:43 +04:00
dev_warn ( & ihost - > pdev - > dev , " %s invalid state: %d \n " ,
__func__ , ihost - > sm . current_state_id ) ;
2011-05-09 04:34:44 +04:00
return SCI_FAILURE_INVALID_STATE ;
}
}
2011-07-01 06:14:33 +04:00
enum sci_status sci_controller_continue_io ( struct isci_request * ireq )
2011-05-09 04:34:44 +04:00
{
2011-07-01 04:38:32 +04:00
struct isci_host * ihost = ireq - > owning_controller ;
2011-05-09 04:34:44 +04:00
2011-07-01 04:38:32 +04:00
if ( ihost - > sm . current_state_id ! = SCIC_READY ) {
2012-02-10 13:05:43 +04:00
dev_warn ( & ihost - > pdev - > dev , " %s invalid state: %d \n " ,
__func__ , ihost - > sm . current_state_id ) ;
2011-05-09 04:34:44 +04:00
return SCI_FAILURE_INVALID_STATE ;
}
2011-06-28 01:57:03 +04:00
set_bit ( IREQ_ACTIVE , & ireq - > flags ) ;
2011-07-01 13:25:15 +04:00
sci_controller_post_request ( ihost , ireq - > post_context ) ;
2011-05-09 04:34:44 +04:00
return SCI_SUCCESS ;
}
/**
2011-07-01 06:14:33 +04:00
* sci_controller_start_task ( ) - This method is called by the SCIC user to
2011-05-09 04:34:44 +04:00
* send / start a framework task management request .
* @ controller : the handle to the controller object for which to start the task
* management request .
* @ remote_device : the handle to the remote device object for which to start
* the task management request .
* @ task_request : the handle to the task request object to start .
*/
2011-07-01 06:14:33 +04:00
enum sci_task_status sci_controller_start_task ( struct isci_host * ihost ,
struct isci_remote_device * idev ,
struct isci_request * ireq )
2011-05-09 04:34:44 +04:00
{
enum sci_status status ;
2011-07-01 04:38:32 +04:00
if ( ihost - > sm . current_state_id ! = SCIC_READY ) {
dev_warn ( & ihost - > pdev - > dev ,
2011-05-09 04:34:44 +04:00
" %s: SCIC Controller starting task from invalid "
" state \n " ,
__func__ ) ;
return SCI_TASK_FAILURE_INVALID_STATE ;
}
2011-07-01 06:14:33 +04:00
status = sci_remote_device_start_task ( ihost , idev , ireq ) ;
2011-05-09 04:34:44 +04:00
switch ( status ) {
case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS :
2011-06-18 01:18:39 +04:00
set_bit ( IREQ_ACTIVE , & ireq - > flags ) ;
2011-05-09 04:34:44 +04:00
/*
* We will let framework know this task request started successfully ,
* although core is still woring on starting the request ( to post tc when
* RNC is resumed . )
*/
return SCI_SUCCESS ;
case SCI_SUCCESS :
2011-06-18 01:18:39 +04:00
set_bit ( IREQ_ACTIVE , & ireq - > flags ) ;
2011-07-01 13:25:15 +04:00
sci_controller_post_request ( ihost , ireq - > post_context ) ;
2011-05-09 04:34:44 +04:00
break ;
default :
break ;
}
return status ;
}
2011-09-02 08:18:31 +04:00
static int sci_write_gpio_tx_gp ( struct isci_host * ihost , u8 reg_index , u8 reg_count , u8 * write_data )
{
int d ;
/* no support for TX_GP_CFG */
if ( reg_index = = 0 )
return - EINVAL ;
for ( d = 0 ; d < isci_gpio_count ( ihost ) ; d + + ) {
u32 val = 0x444 ; /* all ODx.n clear */
int i ;
for ( i = 0 ; i < 3 ; i + + ) {
int bit = ( i < < 2 ) + 2 ;
bit = try_test_sas_gpio_gp_bit ( to_sas_gpio_od ( d , i ) ,
write_data , reg_index ,
reg_count ) ;
if ( bit < 0 )
break ;
/* if od is set, clear the 'invert' bit */
val & = ~ ( bit < < ( ( i < < 2 ) + 2 ) ) ;
}
if ( i < 3 )
break ;
writel ( val , & ihost - > scu_registers - > peg0 . sgpio . output_data_select [ d ] ) ;
}
/* unless reg_index is > 1, we should always be able to write at
* least one register
*/
return d > 0 ;
}
int isci_gpio_write ( struct sas_ha_struct * sas_ha , u8 reg_type , u8 reg_index ,
u8 reg_count , u8 * write_data )
{
struct isci_host * ihost = sas_ha - > lldd_ha ;
int written ;
switch ( reg_type ) {
case SAS_GPIO_REG_TX_GP :
written = sci_write_gpio_tx_gp ( ihost , reg_index , reg_count , write_data ) ;
break ;
default :
written = - EINVAL ;
}
return written ;
}