Merge branch '6.10/scsi-fixes' into 6.11/scsi-staging
Pull in my fixes branch to resolve an mpi3mr merge conflict reported by sfr. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
commit
5e9a522b07
@ -217,7 +217,7 @@ current *struct* is::
|
||||
int (*media_changed)(struct cdrom_device_info *, int);
|
||||
int (*tray_move)(struct cdrom_device_info *, int);
|
||||
int (*lock_door)(struct cdrom_device_info *, int);
|
||||
int (*select_speed)(struct cdrom_device_info *, int);
|
||||
int (*select_speed)(struct cdrom_device_info *, unsigned long);
|
||||
int (*get_last_session) (struct cdrom_device_info *,
|
||||
struct cdrom_multisession *);
|
||||
int (*get_mcn)(struct cdrom_device_info *, struct cdrom_mcn *);
|
||||
@ -396,7 +396,7 @@ action need be taken, and the return value should be 0.
|
||||
|
||||
::
|
||||
|
||||
int select_speed(struct cdrom_device_info *cdi, int speed)
|
||||
int select_speed(struct cdrom_device_info *cdi, unsigned long speed)
|
||||
|
||||
Some CD-ROM drives are capable of changing their head-speed. There
|
||||
are several reasons for changing the speed of a CD-ROM drive. Badly
|
||||
|
@ -414,28 +414,40 @@ static char print_alua_state(unsigned char state)
|
||||
}
|
||||
}
|
||||
|
||||
static enum scsi_disposition alua_check_sense(struct scsi_device *sdev,
|
||||
struct scsi_sense_hdr *sense_hdr)
|
||||
static void alua_handle_state_transition(struct scsi_device *sdev)
|
||||
{
|
||||
struct alua_dh_data *h = sdev->handler_data;
|
||||
struct alua_port_group *pg;
|
||||
|
||||
rcu_read_lock();
|
||||
pg = rcu_dereference(h->pg);
|
||||
if (pg)
|
||||
pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
|
||||
rcu_read_unlock();
|
||||
alua_check(sdev, false);
|
||||
}
|
||||
|
||||
static enum scsi_disposition alua_check_sense(struct scsi_device *sdev,
|
||||
struct scsi_sense_hdr *sense_hdr)
|
||||
{
|
||||
switch (sense_hdr->sense_key) {
|
||||
case NOT_READY:
|
||||
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
|
||||
/*
|
||||
* LUN Not Accessible - ALUA state transition
|
||||
*/
|
||||
rcu_read_lock();
|
||||
pg = rcu_dereference(h->pg);
|
||||
if (pg)
|
||||
pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
|
||||
rcu_read_unlock();
|
||||
alua_check(sdev, false);
|
||||
alua_handle_state_transition(sdev);
|
||||
return NEEDS_RETRY;
|
||||
}
|
||||
break;
|
||||
case UNIT_ATTENTION:
|
||||
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
|
||||
/*
|
||||
* LUN Not Accessible - ALUA state transition
|
||||
*/
|
||||
alua_handle_state_transition(sdev);
|
||||
return NEEDS_RETRY;
|
||||
}
|
||||
if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) {
|
||||
/*
|
||||
* Power On, Reset, or Bus Device Reset.
|
||||
@ -502,7 +514,8 @@ static int alua_tur(struct scsi_device *sdev)
|
||||
|
||||
retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ,
|
||||
ALUA_FAILOVER_RETRIES, &sense_hdr);
|
||||
if (sense_hdr.sense_key == NOT_READY &&
|
||||
if ((sense_hdr.sense_key == NOT_READY ||
|
||||
sense_hdr.sense_key == UNIT_ATTENTION) &&
|
||||
sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
|
||||
return SCSI_DH_RETRY;
|
||||
else if (retval)
|
||||
|
@ -145,6 +145,20 @@ static inline void sas_fail_probe(struct domain_device *dev, const char *func, i
|
||||
func, dev->parent ? "exp-attached" :
|
||||
"direct-attached",
|
||||
SAS_ADDR(dev->sas_addr), err);
|
||||
|
||||
/*
|
||||
* If the device probe failed, the expander phy attached address
|
||||
* needs to be reset so that the phy will not be treated as flutter
|
||||
* in the next revalidation
|
||||
*/
|
||||
if (dev->parent && !dev_is_expander(dev->dev_type)) {
|
||||
struct sas_phy *phy = dev->phy;
|
||||
struct domain_device *parent = dev->parent;
|
||||
struct ex_phy *ex_phy = &parent->ex_dev.ex_phy[phy->number];
|
||||
|
||||
memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
|
||||
}
|
||||
|
||||
sas_unregister_dev(dev->port, dev);
|
||||
}
|
||||
|
||||
|
@ -3247,10 +3247,72 @@ persistent_id_show(struct device *dev, struct device_attribute *attr,
|
||||
}
|
||||
static DEVICE_ATTR_RO(persistent_id);
|
||||
|
||||
/**
|
||||
* sas_ncq_prio_supported_show - Indicate if device supports NCQ priority
|
||||
* @dev: pointer to embedded device
|
||||
* @attr: sas_ncq_prio_supported attribute descriptor
|
||||
* @buf: the buffer returned
|
||||
*
|
||||
* A sysfs 'read-only' sdev attribute, only works with SATA devices
|
||||
*/
|
||||
static ssize_t
|
||||
sas_ncq_prio_supported_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct scsi_device *sdev = to_scsi_device(dev);
|
||||
|
||||
return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
|
||||
}
|
||||
static DEVICE_ATTR_RO(sas_ncq_prio_supported);
|
||||
|
||||
/**
|
||||
* sas_ncq_prio_enable_show - send prioritized io commands to device
|
||||
* @dev: pointer to embedded device
|
||||
* @attr: sas_ncq_prio_enable attribute descriptor
|
||||
* @buf: the buffer returned
|
||||
*
|
||||
* A sysfs 'read/write' sdev attribute, only works with SATA devices
|
||||
*/
|
||||
static ssize_t
|
||||
sas_ncq_prio_enable_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct scsi_device *sdev = to_scsi_device(dev);
|
||||
struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
|
||||
|
||||
if (!sdev_priv_data)
|
||||
return 0;
|
||||
|
||||
return sysfs_emit(buf, "%d\n", sdev_priv_data->ncq_prio_enable);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
sas_ncq_prio_enable_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct scsi_device *sdev = to_scsi_device(dev);
|
||||
struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
|
||||
bool ncq_prio_enable = 0;
|
||||
|
||||
if (kstrtobool(buf, &ncq_prio_enable))
|
||||
return -EINVAL;
|
||||
|
||||
if (!sas_ata_ncq_prio_supported(sdev))
|
||||
return -EINVAL;
|
||||
|
||||
sdev_priv_data->ncq_prio_enable = ncq_prio_enable;
|
||||
|
||||
return strlen(buf);
|
||||
}
|
||||
static DEVICE_ATTR_RW(sas_ncq_prio_enable);
|
||||
|
||||
static struct attribute *mpi3mr_dev_attrs[] = {
|
||||
&dev_attr_sas_address.attr,
|
||||
&dev_attr_device_handle.attr,
|
||||
&dev_attr_persistent_id.attr,
|
||||
&dev_attr_sas_ncq_prio_supported.attr,
|
||||
&dev_attr_sas_ncq_prio_enable.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -1378,7 +1378,7 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
|
||||
continue;
|
||||
|
||||
if (i >= sizeof(mr_sas_port->phy_mask) * 8) {
|
||||
ioc_warn(mrioc, "skipping port %u, max allowed value is %lu\n",
|
||||
ioc_warn(mrioc, "skipping port %u, max allowed value is %zu\n",
|
||||
i, sizeof(mr_sas_port->phy_mask) * 8);
|
||||
goto out_fail;
|
||||
}
|
||||
|
@ -8512,6 +8512,12 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
|
||||
ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
|
||||
if (ioc->facts.MaxDevHandle % 8)
|
||||
ioc->pd_handles_sz++;
|
||||
/*
|
||||
* pd_handles_sz should have, at least, the minimal room for
|
||||
* set_bit()/test_bit(), otherwise out-of-memory touch may occur.
|
||||
*/
|
||||
ioc->pd_handles_sz = ALIGN(ioc->pd_handles_sz, sizeof(unsigned long));
|
||||
|
||||
ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
|
||||
GFP_KERNEL);
|
||||
if (!ioc->pd_handles) {
|
||||
@ -8529,6 +8535,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
|
||||
ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
|
||||
if (ioc->facts.MaxDevHandle % 8)
|
||||
ioc->pend_os_device_add_sz++;
|
||||
|
||||
/*
|
||||
* pend_os_device_add_sz should have, at least, the minimal room for
|
||||
* set_bit()/test_bit(), otherwise out-of-memory may occur.
|
||||
*/
|
||||
ioc->pend_os_device_add_sz = ALIGN(ioc->pend_os_device_add_sz,
|
||||
sizeof(unsigned long));
|
||||
ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
|
||||
GFP_KERNEL);
|
||||
if (!ioc->pend_os_device_add) {
|
||||
@ -8820,6 +8833,12 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
|
||||
if (ioc->facts.MaxDevHandle % 8)
|
||||
pd_handles_sz++;
|
||||
|
||||
/*
|
||||
* pd_handles should have, at least, the minimal room for
|
||||
* set_bit()/test_bit(), otherwise out-of-memory touch may
|
||||
* occur.
|
||||
*/
|
||||
pd_handles_sz = ALIGN(pd_handles_sz, sizeof(unsigned long));
|
||||
pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
|
||||
GFP_KERNEL);
|
||||
if (!pd_handles) {
|
||||
|
@ -2048,9 +2048,6 @@ void
|
||||
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
|
||||
struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request);
|
||||
|
||||
/* NCQ Prio Handling Check */
|
||||
bool scsih_ncq_prio_supp(struct scsi_device *sdev);
|
||||
|
||||
void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc);
|
||||
void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc);
|
||||
void mpt3sas_init_debugfs(void);
|
||||
|
@ -4088,7 +4088,7 @@ sas_ncq_prio_supported_show(struct device *dev,
|
||||
{
|
||||
struct scsi_device *sdev = to_scsi_device(dev);
|
||||
|
||||
return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev));
|
||||
return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
|
||||
}
|
||||
static DEVICE_ATTR_RO(sas_ncq_prio_supported);
|
||||
|
||||
@ -4123,7 +4123,7 @@ sas_ncq_prio_enable_store(struct device *dev,
|
||||
if (kstrtobool(buf, &ncq_prio_enable))
|
||||
return -EINVAL;
|
||||
|
||||
if (!scsih_ncq_prio_supp(sdev))
|
||||
if (!sas_ata_ncq_prio_supported(sdev))
|
||||
return -EINVAL;
|
||||
|
||||
sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
|
||||
|
@ -302,8 +302,8 @@ struct _scsi_io_transfer {
|
||||
|
||||
/**
|
||||
* _scsih_set_debug_level - global setting of ioc->logging_level.
|
||||
* @val: ?
|
||||
* @kp: ?
|
||||
* @val: value of the parameter to be set
|
||||
* @kp: pointer to kernel_param structure
|
||||
*
|
||||
* Note: The logging levels are defined in mpt3sas_debug.h.
|
||||
*/
|
||||
@ -12571,29 +12571,6 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
/**
|
||||
* scsih_ncq_prio_supp - Check for NCQ command priority support
|
||||
* @sdev: scsi device struct
|
||||
*
|
||||
* This is called when a user indicates they would like to enable
|
||||
* ncq command priorities. This works only on SATA devices.
|
||||
*/
|
||||
bool scsih_ncq_prio_supp(struct scsi_device *sdev)
|
||||
{
|
||||
struct scsi_vpd *vpd;
|
||||
bool ncq_prio_supp = false;
|
||||
|
||||
rcu_read_lock();
|
||||
vpd = rcu_dereference(sdev->vpd_pg89);
|
||||
if (!vpd || vpd->len < 214)
|
||||
goto out;
|
||||
|
||||
ncq_prio_supp = (vpd->data[213] >> 4) & 1;
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
|
||||
return ncq_prio_supp;
|
||||
}
|
||||
/*
|
||||
* The pci device ids are defined in mpi/mpi2_cnfg.h.
|
||||
*/
|
||||
|
@ -363,6 +363,7 @@ struct qedf_ctx {
|
||||
#define QEDF_IN_RECOVERY 5
|
||||
#define QEDF_DBG_STOP_IO 6
|
||||
#define QEDF_PROBING 8
|
||||
#define QEDF_STAG_IN_PROGRESS 9
|
||||
unsigned long flags; /* Miscellaneous state flags */
|
||||
int fipvlan_retries;
|
||||
u8 num_queues;
|
||||
|
@ -318,11 +318,18 @@ static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
|
||||
*/
|
||||
if (resp == fc_lport_flogi_resp) {
|
||||
qedf->flogi_cnt++;
|
||||
qedf->flogi_pending++;
|
||||
|
||||
if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
|
||||
QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
|
||||
qedf->flogi_pending = 0;
|
||||
}
|
||||
|
||||
if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
|
||||
schedule_delayed_work(&qedf->stag_work, 2);
|
||||
return NULL;
|
||||
}
|
||||
qedf->flogi_pending++;
|
||||
|
||||
return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
|
||||
arg, timeout);
|
||||
}
|
||||
@ -912,13 +919,14 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
|
||||
struct qedf_ctx *qedf;
|
||||
struct qed_link_output if_link;
|
||||
|
||||
qedf = lport_priv(lport);
|
||||
|
||||
if (lport->vport) {
|
||||
clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
|
||||
printk_ratelimited("Cannot issue host reset on NPIV port.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
qedf = lport_priv(lport);
|
||||
|
||||
qedf->flogi_pending = 0;
|
||||
/* For host reset, essentially do a soft link up/down */
|
||||
atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
|
||||
@ -938,6 +946,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
|
||||
if (!if_link.link_up) {
|
||||
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
|
||||
"Physical link is not up.\n");
|
||||
clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
|
||||
return;
|
||||
}
|
||||
/* Flush and wait to make sure link down is processed */
|
||||
@ -950,6 +959,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
|
||||
"Queue link up work.\n");
|
||||
queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
|
||||
0);
|
||||
clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
|
||||
}
|
||||
|
||||
/* Reset the host by gracefully logging out and then logging back in */
|
||||
@ -3463,6 +3473,7 @@ retry_probe:
|
||||
}
|
||||
|
||||
/* Start the Slowpath-process */
|
||||
memset(&slowpath_params, 0, sizeof(struct qed_slowpath_params));
|
||||
slowpath_params.int_mode = QED_INT_MODE_MSIX;
|
||||
slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER;
|
||||
slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
|
||||
@ -3721,6 +3732,7 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
|
||||
{
|
||||
struct qedf_ctx *qedf;
|
||||
int rc;
|
||||
int cnt = 0;
|
||||
|
||||
if (!pdev) {
|
||||
QEDF_ERR(NULL, "pdev is NULL.\n");
|
||||
@ -3738,6 +3750,17 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
|
||||
return;
|
||||
}
|
||||
|
||||
stag_in_prog:
|
||||
if (test_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags)) {
|
||||
QEDF_ERR(&qedf->dbg_ctx, "Stag in progress, cnt=%d.\n", cnt);
|
||||
cnt++;
|
||||
|
||||
if (cnt < 5) {
|
||||
msleep(500);
|
||||
goto stag_in_prog;
|
||||
}
|
||||
}
|
||||
|
||||
if (mode != QEDF_MODE_RECOVERY)
|
||||
set_bit(QEDF_UNLOADING, &qedf->flags);
|
||||
|
||||
@ -3997,6 +4020,24 @@ void qedf_stag_change_work(struct work_struct *work)
|
||||
struct qedf_ctx *qedf =
|
||||
container_of(work, struct qedf_ctx, stag_work.work);
|
||||
|
||||
if (!qedf) {
|
||||
QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
|
||||
QEDF_ERR(&qedf->dbg_ctx,
|
||||
"Already is in recovery, hence not calling software context reset.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
|
||||
QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
|
||||
return;
|
||||
}
|
||||
|
||||
set_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
|
||||
|
||||
printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
|
||||
dev_name(&qedf->pdev->dev), __func__, __LINE__,
|
||||
qedf->dbg_ctx.host_no);
|
||||
|
@ -350,6 +350,13 @@ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
|
||||
if (result < SCSI_VPD_HEADER_SIZE)
|
||||
return 0;
|
||||
|
||||
if (result > sizeof(vpd)) {
|
||||
dev_warn_once(&sdev->sdev_gendev,
|
||||
"%s: long VPD page 0 length: %d bytes\n",
|
||||
__func__, result);
|
||||
result = sizeof(vpd);
|
||||
}
|
||||
|
||||
result -= SCSI_VPD_HEADER_SIZE;
|
||||
if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result))
|
||||
return 0;
|
||||
@ -666,6 +673,13 @@ void scsi_cdl_check(struct scsi_device *sdev)
|
||||
sdev->use_10_for_rw = 0;
|
||||
|
||||
sdev->cdl_supported = 1;
|
||||
|
||||
/*
|
||||
* If the device supports CDL, make sure that the current drive
|
||||
* feature status is consistent with the user controlled
|
||||
* cdl_enable state.
|
||||
*/
|
||||
scsi_cdl_enable(sdev, sdev->cdl_enable);
|
||||
} else {
|
||||
sdev->cdl_supported = 0;
|
||||
}
|
||||
|
@ -926,6 +926,7 @@ static const int device_qfull_result =
|
||||
static const int condition_met_result = SAM_STAT_CONDITION_MET;
|
||||
|
||||
static struct dentry *sdebug_debugfs_root;
|
||||
static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain);
|
||||
|
||||
static void sdebug_err_free(struct rcu_head *head)
|
||||
{
|
||||
@ -1148,6 +1149,8 @@ static int sdebug_target_alloc(struct scsi_target *starget)
|
||||
if (!targetip)
|
||||
return -ENOMEM;
|
||||
|
||||
async_synchronize_full_domain(&sdebug_async_domain);
|
||||
|
||||
targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
|
||||
sdebug_debugfs_root);
|
||||
|
||||
@ -1174,7 +1177,8 @@ static void sdebug_target_destroy(struct scsi_target *starget)
|
||||
targetip = (struct sdebug_target_info *)starget->hostdata;
|
||||
if (targetip) {
|
||||
starget->hostdata = NULL;
|
||||
async_schedule(sdebug_tartget_cleanup_async, targetip);
|
||||
async_schedule_domain(sdebug_tartget_cleanup_async, targetip,
|
||||
&sdebug_async_domain);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -416,6 +416,29 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *sdev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sas_is_tlr_enabled);
|
||||
|
||||
/**
|
||||
* sas_ata_ncq_prio_supported - Check for ATA NCQ command priority support
|
||||
* @sdev: SCSI device
|
||||
*
|
||||
* Check if an ATA device supports NCQ priority using VPD page 89h (ATA
|
||||
* Information). Since this VPD page is implemented only for ATA devices,
|
||||
* this function always returns false for SCSI devices.
|
||||
*/
|
||||
bool sas_ata_ncq_prio_supported(struct scsi_device *sdev)
|
||||
{
|
||||
struct scsi_vpd *vpd;
|
||||
bool ncq_prio_supported = false;
|
||||
|
||||
rcu_read_lock();
|
||||
vpd = rcu_dereference(sdev->vpd_pg89);
|
||||
if (vpd && vpd->len >= 214)
|
||||
ncq_prio_supported = (vpd->data[213] >> 4) & 1;
|
||||
rcu_read_unlock();
|
||||
|
||||
return ncq_prio_supported;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sas_ata_ncq_prio_supported);
|
||||
|
||||
/*
|
||||
* SAS Phy attributes
|
||||
*/
|
||||
|
@ -63,6 +63,7 @@
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_dbg.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_devinfo.h>
|
||||
#include <scsi/scsi_driver.h>
|
||||
#include <scsi/scsi_eh.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
@ -3118,6 +3119,9 @@ static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
|
||||
struct scsi_mode_data data;
|
||||
int res;
|
||||
|
||||
if (sdp->sdev_bflags & BLIST_SKIP_IO_HINTS)
|
||||
return;
|
||||
|
||||
res = scsi_mode_sense(sdp, /*dbd=*/0x8, /*modepage=*/0x0a,
|
||||
/*subpage=*/0x05, buffer, SD_BUF_SIZE, SD_TIMEOUT,
|
||||
sdkp->max_retries, &data, &sshdr);
|
||||
@ -3565,16 +3569,23 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
|
||||
|
||||
static void sd_read_block_zero(struct scsi_disk *sdkp)
|
||||
{
|
||||
unsigned int buf_len = sdkp->device->sector_size;
|
||||
char *buffer, cmd[10] = { };
|
||||
struct scsi_device *sdev = sdkp->device;
|
||||
unsigned int buf_len = sdev->sector_size;
|
||||
u8 *buffer, cmd[16] = { };
|
||||
|
||||
buffer = kmalloc(buf_len, GFP_KERNEL);
|
||||
if (!buffer)
|
||||
return;
|
||||
|
||||
cmd[0] = READ_10;
|
||||
put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
|
||||
put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
|
||||
if (sdev->use_16_for_rw) {
|
||||
cmd[0] = READ_16;
|
||||
put_unaligned_be64(0, &cmd[2]); /* Logical block address 0 */
|
||||
put_unaligned_be32(1, &cmd[10]);/* Transfer 1 logical block */
|
||||
} else {
|
||||
cmd[0] = READ_10;
|
||||
put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
|
||||
put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
|
||||
}
|
||||
|
||||
scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len,
|
||||
SD_TIMEOUT, sdkp->max_retries, NULL);
|
||||
@ -4106,8 +4117,6 @@ static int sd_resume(struct device *dev)
|
||||
{
|
||||
struct scsi_disk *sdkp = dev_get_drvdata(dev);
|
||||
|
||||
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
|
||||
|
||||
if (opal_unlock_from_suspend(sdkp->opal_dev)) {
|
||||
sd_printk(KERN_NOTICE, sdkp, "OPAL unlock failed\n");
|
||||
return -EIO;
|
||||
@ -4124,12 +4133,13 @@ static int sd_resume_common(struct device *dev, bool runtime)
|
||||
if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
|
||||
return 0;
|
||||
|
||||
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
|
||||
|
||||
if (!sd_do_start_stop(sdkp->device, runtime)) {
|
||||
sdkp->suspended = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
|
||||
ret = sd_start_stop_device(sdkp, 1);
|
||||
if (!ret) {
|
||||
sd_resume(dev);
|
||||
|
@ -65,7 +65,7 @@ int sr_disk_status(struct cdrom_device_info *);
|
||||
int sr_get_last_session(struct cdrom_device_info *, struct cdrom_multisession *);
|
||||
int sr_get_mcn(struct cdrom_device_info *, struct cdrom_mcn *);
|
||||
int sr_reset(struct cdrom_device_info *);
|
||||
int sr_select_speed(struct cdrom_device_info *cdi, int speed);
|
||||
int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed);
|
||||
int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
|
||||
|
||||
int sr_is_xa(Scsi_CD *);
|
||||
|
@ -425,11 +425,14 @@ int sr_reset(struct cdrom_device_info *cdi)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sr_select_speed(struct cdrom_device_info *cdi, int speed)
|
||||
int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed)
|
||||
{
|
||||
Scsi_CD *cd = cdi->handle;
|
||||
struct packet_command cgc;
|
||||
|
||||
/* avoid exceeding the max speed or overflowing integer bounds */
|
||||
speed = clamp(0, speed, 0xffff / 177);
|
||||
|
||||
if (speed == 0)
|
||||
speed = 0xffff; /* set to max */
|
||||
else
|
||||
|
@ -105,16 +105,15 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
|
||||
* @hba: per adapter instance
|
||||
* @req: pointer to the request to be issued
|
||||
*
|
||||
* Return: the hardware queue instance on which the request would
|
||||
* be queued.
|
||||
* Return: the hardware queue instance on which the request will be or has
|
||||
* been queued. %NULL if the request has already been freed.
|
||||
*/
|
||||
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
|
||||
struct request *req)
|
||||
{
|
||||
u32 utag = blk_mq_unique_tag(req);
|
||||
u32 hwq = blk_mq_unique_tag_to_hwq(utag);
|
||||
struct blk_mq_hw_ctx *hctx = READ_ONCE(req->mq_hctx);
|
||||
|
||||
return &hba->uhq[hwq];
|
||||
return hctx ? &hba->uhq[hctx->queue_num] : NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -531,6 +530,8 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
|
||||
if (!cmd)
|
||||
return -EINVAL;
|
||||
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
|
||||
if (!hwq)
|
||||
return 0;
|
||||
} else {
|
||||
hwq = hba->dev_cmd_queue;
|
||||
}
|
||||
@ -650,20 +651,20 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
|
||||
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
|
||||
struct ufs_hw_queue *hwq;
|
||||
unsigned long flags;
|
||||
int err = FAILED;
|
||||
int err;
|
||||
|
||||
if (!ufshcd_cmd_inflight(lrbp->cmd)) {
|
||||
dev_err(hba->dev,
|
||||
"%s: skip abort. cmd at tag %d already completed.\n",
|
||||
__func__, tag);
|
||||
goto out;
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
/* Skip task abort in case previous aborts failed and report failure */
|
||||
if (lrbp->req_abort_skip) {
|
||||
dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
|
||||
__func__, tag);
|
||||
goto out;
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
|
||||
@ -675,7 +676,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
|
||||
*/
|
||||
dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n",
|
||||
__func__, hwq->id, tag);
|
||||
goto out;
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -683,18 +684,17 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
|
||||
* in the completion queue either. Query the device to see if
|
||||
* the command is being processed in the device.
|
||||
*/
|
||||
if (ufshcd_try_to_abort_task(hba, tag)) {
|
||||
err = ufshcd_try_to_abort_task(hba, tag);
|
||||
if (err) {
|
||||
dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err);
|
||||
lrbp->req_abort_skip = true;
|
||||
goto out;
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
err = SUCCESS;
|
||||
spin_lock_irqsave(&hwq->cq_lock, flags);
|
||||
if (ufshcd_cmd_inflight(lrbp->cmd))
|
||||
ufshcd_release_scsi_cmd(hba, lrbp);
|
||||
spin_unlock_irqrestore(&hwq->cq_lock, flags);
|
||||
|
||||
out:
|
||||
return err;
|
||||
return SUCCESS;
|
||||
}
|
||||
|
@ -1369,7 +1369,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
|
||||
* make sure that there are no outstanding requests when
|
||||
* clock scaling is in progress
|
||||
*/
|
||||
ufshcd_scsi_block_requests(hba);
|
||||
blk_mq_quiesce_tagset(&hba->host->tag_set);
|
||||
mutex_lock(&hba->wb_mutex);
|
||||
down_write(&hba->clk_scaling_lock);
|
||||
|
||||
@ -1378,7 +1378,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
|
||||
ret = -EBUSY;
|
||||
up_write(&hba->clk_scaling_lock);
|
||||
mutex_unlock(&hba->wb_mutex);
|
||||
ufshcd_scsi_unblock_requests(hba);
|
||||
blk_mq_unquiesce_tagset(&hba->host->tag_set);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1399,7 +1399,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc
|
||||
|
||||
mutex_unlock(&hba->wb_mutex);
|
||||
|
||||
ufshcd_scsi_unblock_requests(hba);
|
||||
blk_mq_unquiesce_tagset(&hba->host->tag_set);
|
||||
ufshcd_release(hba);
|
||||
}
|
||||
|
||||
@ -6462,6 +6462,8 @@ static bool ufshcd_abort_one(struct request *rq, void *priv)
|
||||
/* Release cmd in MCQ mode if abort succeeds */
|
||||
if (is_mcq_enabled(hba) && (*ret == 0)) {
|
||||
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
|
||||
if (!hwq)
|
||||
return 0;
|
||||
spin_lock_irqsave(&hwq->cq_lock, flags);
|
||||
if (ufshcd_cmd_inflight(lrbp->cmd))
|
||||
ufshcd_release_scsi_cmd(hba, lrbp);
|
||||
@ -8829,6 +8831,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
|
||||
(hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
|
||||
/* Reset the device and controller before doing reinit */
|
||||
ufshcd_device_reset(hba);
|
||||
ufs_put_device_desc(hba);
|
||||
ufshcd_hba_stop(hba);
|
||||
ufshcd_vops_reinit_notify(hba);
|
||||
ret = ufshcd_hba_enable(hba);
|
||||
|
@ -79,6 +79,12 @@ static int slave_alloc (struct scsi_device *sdev)
|
||||
if (us->protocol == USB_PR_BULK && us->max_lun > 0)
|
||||
sdev->sdev_bflags |= BLIST_FORCELUN;
|
||||
|
||||
/*
|
||||
* Some USB storage devices reset if the IO advice hints grouping mode
|
||||
* page is queried. Hence skip that mode page.
|
||||
*/
|
||||
sdev->sdev_bflags |= BLIST_SKIP_IO_HINTS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_eh.h>
|
||||
#include <scsi/scsi_dbg.h>
|
||||
#include <scsi/scsi_devinfo.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
@ -820,6 +821,12 @@ static int uas_slave_alloc(struct scsi_device *sdev)
|
||||
struct uas_dev_info *devinfo =
|
||||
(struct uas_dev_info *)sdev->host->hostdata;
|
||||
|
||||
/*
|
||||
* Some USB storage devices reset if the IO advice hints grouping mode
|
||||
* page is queried. Hence skip that mode page.
|
||||
*/
|
||||
sdev->sdev_bflags |= BLIST_SKIP_IO_HINTS;
|
||||
|
||||
sdev->hostdata = devinfo;
|
||||
return 0;
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ struct cdrom_device_ops {
|
||||
unsigned int clearing, int slot);
|
||||
int (*tray_move) (struct cdrom_device_info *, int);
|
||||
int (*lock_door) (struct cdrom_device_info *, int);
|
||||
int (*select_speed) (struct cdrom_device_info *, int);
|
||||
int (*select_speed) (struct cdrom_device_info *, unsigned long);
|
||||
int (*get_last_session) (struct cdrom_device_info *,
|
||||
struct cdrom_multisession *);
|
||||
int (*get_mcn) (struct cdrom_device_info *,
|
||||
|
@ -69,8 +69,10 @@
|
||||
#define BLIST_RETRY_ITF ((__force blist_flags_t)(1ULL << 32))
|
||||
/* Always retry ABORTED_COMMAND with ASC 0xc1 */
|
||||
#define BLIST_RETRY_ASC_C1 ((__force blist_flags_t)(1ULL << 33))
|
||||
/* Do not query the IO Advice Hints Grouping mode page */
|
||||
#define BLIST_SKIP_IO_HINTS ((__force blist_flags_t)(1ULL << 34))
|
||||
|
||||
#define __BLIST_LAST_USED BLIST_RETRY_ASC_C1
|
||||
#define __BLIST_LAST_USED BLIST_SKIP_IO_HINTS
|
||||
|
||||
#define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \
|
||||
(__force blist_flags_t) \
|
||||
|
@ -200,6 +200,8 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *);
|
||||
void sas_disable_tlr(struct scsi_device *);
|
||||
void sas_enable_tlr(struct scsi_device *);
|
||||
|
||||
bool sas_ata_ncq_prio_supported(struct scsi_device *sdev);
|
||||
|
||||
extern struct sas_rphy *sas_end_device_alloc(struct sas_port *);
|
||||
extern struct sas_rphy *sas_expander_alloc(struct sas_port *, enum sas_device_type);
|
||||
void sas_rphy_free(struct sas_rphy *);
|
||||
|
Loading…
Reference in New Issue
Block a user