2015-08-14 05:47:43 +03:00
/*
* CXL Flash Device Driver
*
* Written by : Manoj N . Kumar < manoj @ linux . vnet . ibm . com > , IBM Corporation
* Matthew R . Ochs < mrochs @ linux . vnet . ibm . com > , IBM Corporation
*
* Copyright ( C ) 2015 IBM Corporation
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
# ifndef _CXLFLASH_SUPERPIPE_H
# define _CXLFLASH_SUPERPIPE_H
extern struct cxlflash_global global ;
/*
* Terminology : use afu ( and not adapter ) to refer to the HW .
* Adapter is the entire slot and includes PSL out of which
* only the AFU is visible to user space .
*/
/* Chunk size parms: note sislite minimum chunk size is
0x10000 LBAs corresponding to a NMASK or 16.
*/
# define MC_CHUNK_SIZE (1 << MC_RHT_NMASK) /* in LBAs */
2015-10-21 23:11:10 +03:00
# define CMD_TIMEOUT 30 /* 30 secs */
2015-10-21 23:11:00 +03:00
# define CMD_RETRIES 5 /* 5 retries for scsi_execute */
# define MAX_SECTOR_UNIT 512 /* max_sector is in 512 byte multiples */
2015-08-14 05:47:43 +03:00
# define CHAN2PORT(_x) ((_x) + 1)
2015-08-14 05:47:53 +03:00
# define PORT2CHAN(_x) ((_x) - 1)
2015-08-14 05:47:43 +03:00
enum lun_mode {
MODE_NONE = 0 ,
2015-08-14 05:47:53 +03:00
MODE_VIRTUAL ,
2015-08-14 05:47:43 +03:00
MODE_PHYSICAL
} ;
/* Global (entire driver, spans adapters) lun_info structure */
struct glun_info {
u64 max_lba ; /* from read cap(16) */
u32 blk_len ; /* from read cap(16) */
2015-08-14 05:47:53 +03:00
enum lun_mode mode ; /* NONE, VIRTUAL, PHYSICAL */
2015-08-14 05:47:43 +03:00
int users ; /* Number of users w/ references to LUN */
u8 wwid [ 16 ] ;
struct mutex mutex ;
2015-08-14 05:47:53 +03:00
struct blka blka ;
2015-08-14 05:47:43 +03:00
struct list_head list ;
} ;
/* Local (per-adapter) lun_info structure */
struct llun_info {
u64 lun_id [ CXLFLASH_NUM_FC_PORTS ] ; /* from REPORT_LUNS */
u32 lun_index ; /* Index in the LUN table */
u32 host_no ; /* host_no from Scsi_host */
u32 port_sel ; /* What port to use for this LUN */
2015-08-14 05:47:53 +03:00
bool in_table ; /* Whether a LUN table entry was created */
2015-08-14 05:47:43 +03:00
u8 wwid [ 16 ] ; /* Keep a duplicate copy here? */
struct glun_info * parent ; /* Pointer to entry in global LUN structure */
struct scsi_device * sdev ;
struct list_head list ;
} ;
struct lun_access {
struct llun_info * lli ;
struct scsi_device * sdev ;
struct list_head list ;
} ;
enum ctx_ctrl {
CTX_CTRL_CLONE = ( 1 < < 1 ) ,
CTX_CTRL_ERR = ( 1 < < 2 ) ,
CTX_CTRL_ERR_FALLBACK = ( 1 < < 3 ) ,
CTX_CTRL_NOPID = ( 1 < < 4 ) ,
CTX_CTRL_FILE = ( 1 < < 5 )
} ;
2015-10-21 23:11:43 +03:00
# define ENCODE_CTXID(_ctx, _id) (((((u64)_ctx) & 0xFFFFFFFF0ULL) << 28) | _id)
2015-08-14 05:47:43 +03:00
# define DECODE_CTXID(_val) (_val & 0xFFFFFFFF)
struct ctx_info {
2015-10-21 23:14:48 +03:00
struct sisl_ctrl_map __iomem * ctrl_map ; /* initialized at startup */
2015-08-14 05:47:43 +03:00
struct sisl_rht_entry * rht_start ; /* 1 page (req'd for alignment),
alloc / free on attach / detach */
u32 rht_out ; /* Number of checked out RHT entries */
u32 rht_perms ; /* User-defined permissions for RHT entries */
struct llun_info * * rht_lun ; /* Mapping of RHT entries to LUNs */
2015-10-21 23:11:34 +03:00
u8 * rht_needs_ws ; /* User-desired write-same function per RHTE */
2015-08-14 05:47:43 +03:00
struct cxl_ioctl_start_work work ;
u64 ctxid ;
int lfd ;
pid_t pid ;
cxlflash: Split out context initialization
Presently, context information structures are allocated and
initialized in the same routine, create_context(). This imposes
an ordering restriction such that all pieces of information needed
to initialize a context must be known before the context is even
allocated.
This design point is not flexible when the order of context
creation needs to be modified. Specifically, this can lead to
problems when members of the context information structure are
a part of an ordering dependency (i.e. - the 'work' structure
embedded within the context).
To remedy, the allocation is left as-is, inside of the existing
create_context() routine and the initialization is transitioned
to a new void routine, init_context(). At the same time, in
anticipation of these routines not being called in sequence, a
state boolean is added to the context information structure to
track when the context has been initilized. The context teardown
routine, destroy_context(), is modified to support being called
with a non-initialized context.
Signed-off-by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>
Reviewed-by: Uma Krishnan <ukrishn@linux.vnet.ibm.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2016-03-05 00:55:16 +03:00
bool initialized ;
2015-08-14 05:47:43 +03:00
bool unavail ;
bool err_recovery_active ;
struct mutex mutex ; /* Context protection */
struct cxl_context * ctx ;
struct list_head luns ; /* LUNs attached to this context */
const struct vm_operations_struct * cxl_mmap_vmops ;
struct file * file ;
struct list_head list ; /* Link contexts in error recovery */
} ;
struct cxlflash_global {
struct mutex mutex ;
struct list_head gluns ; /* list of glun_info structs */
struct page * err_page ; /* One page of all 0xF for error notification */
} ;
2015-08-14 05:47:53 +03:00
int cxlflash_vlun_resize ( struct scsi_device * , struct dk_cxlflash_resize * ) ;
int _cxlflash_vlun_resize ( struct scsi_device * , struct ctx_info * ,
struct dk_cxlflash_resize * ) ;
2015-08-14 05:47:43 +03:00
int cxlflash_disk_release ( struct scsi_device * , struct dk_cxlflash_release * ) ;
int _cxlflash_disk_release ( struct scsi_device * , struct ctx_info * ,
struct dk_cxlflash_release * ) ;
2015-08-14 05:47:53 +03:00
int cxlflash_disk_clone ( struct scsi_device * , struct dk_cxlflash_clone * ) ;
int cxlflash_disk_virtual_open ( struct scsi_device * , void * ) ;
2015-08-14 05:47:43 +03:00
int cxlflash_lun_attach ( struct glun_info * , enum lun_mode , bool ) ;
void cxlflash_lun_detach ( struct glun_info * ) ;
struct ctx_info * get_context ( struct cxlflash_cfg * , u64 , void * , enum ctx_ctrl ) ;
void put_context ( struct ctx_info * ) ;
struct sisl_rht_entry * get_rhte ( struct ctx_info * , res_hndl_t ,
struct llun_info * ) ;
struct sisl_rht_entry * rhte_checkout ( struct ctx_info * , struct llun_info * ) ;
void rhte_checkin ( struct ctx_info * , struct sisl_rht_entry * ) ;
2015-08-14 05:47:53 +03:00
void cxlflash_ba_terminate ( struct ba_lun * ) ;
2015-08-14 05:47:43 +03:00
int cxlflash_manage_lun ( struct scsi_device * , struct dk_cxlflash_manage_lun * ) ;
cxlflash: Fix to avoid potential deadlock on EEH
Ioctl threads that use scsi_execute() can run for an excessive amount
of time due to the fact that they have lengthy timeouts and retry logic
built in. Under normal operation this is not an issue. However, once EEH
enters the picture, a long execution time coupled with the possibility
that a timeout can trigger entry to the driver via registered reset
callbacks becomes a liability.
In particular, a deadlock can occur when an EEH event is encountered
while in running in scsi_execute(). As part of the recovery, the EEH
handler drains all currently running ioctls, waiting until they have
completed before proceeding with a reset. As the scsi_execute()'s are
situated on the ioctl path, the EEH handler will wait until they (and
the remainder of the ioctl handler they're associated with) have
completed. Normally this would not be much of an issue aside from the
longer recovery period. Unfortunately, the scsi_execute() triggers a
reset when it times out. The reset handler will see that the device is
already being reset and wait until that reset completed. This creates
a condition where the EEH handler becomes stuck, infinitely waiting for
the ioctl thread to complete.
To avoid this behavior, temporarily unmark the scsi_execute() threads
as an ioctl thread by releasing the ioctl read semaphore. This allows
the EEH handler to proceed with a recovery while the thread is still
running. Once the scsi_execute() returns, the ioctl read semaphore is
reacquired and the adapter state is rechecked in case it changed while
inside of scsi_execute(). The state check will wait if the adapter is
still being recovered or returns a failure if the recovery failed. In
the event that the adapter reset failed, the failure is simply returned
as the ioctl would be unable to continue.
Reported-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>
Signed-off-by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>
Reviewed-by: Brian King <brking@linux.vnet.ibm.com>
Reviewed-by: Daniel Axtens <dja@axtens.net>
Reviewed-by: Tomas Henzl <thenzl@redhat.com>
Signed-off-by: James Bottomley <JBottomley@Odin.com>
2015-10-21 23:15:52 +03:00
int check_state ( struct cxlflash_cfg * ) ;
2015-08-14 05:47:43 +03:00
# endif /* ifndef _CXLFLASH_SUPERPIPE_H */