1

scsi: smartpqi: Add support for RAID NCQ priority

Enable NCQ priority feature for the RAID path when AIO path is disabled.

Move function pqi_is_io_high_priority() up to avoid adding a prototype.
Remove unused argument ctrl_info.

Reviewed-by: Scott Benesh <scott.benesh@microchip.com>
Reviewed-by: Scott Teel <scott.teel@microchip.com>
Reviewed-by: Mike McGowen <mike.mcgowen@microchip.com>
Reviewed-by: Kevin Barnett <kevin.barnett@microchip.com>
Signed-off-by: Gilbert Wu <Gilbert.Wu@microchip.com>
Signed-off-by: Don Brace <don.brace@microchip.com>
Link: https://lore.kernel.org/r/20230428153712.297638-8-don.brace@microchip.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Gilbert Wu 2023-04-28 10:37:07 -05:00 committed by Martin K. Petersen
parent 5c9e3c1c52
commit 68f7920492

View File

@ -519,6 +519,36 @@ static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
writeb(status, ctrl_info->soft_reset_status);
}
static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
{
bool io_high_prio;
int priority_class;
io_high_prio = false;
if (device->ncq_prio_enable) {
priority_class =
IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
if (priority_class == IOPRIO_CLASS_RT) {
/* Set NCQ priority for read/write commands. */
switch (scmd->cmnd[0]) {
case WRITE_16:
case READ_16:
case WRITE_12:
case READ_12:
case WRITE_10:
case READ_10:
case WRITE_6:
case READ_6:
io_high_prio = true;
break;
}
}
}
return io_high_prio;
}
static int pqi_map_single(struct pci_dev *pci_dev,
struct pqi_sg_descriptor *sg_descriptor, void *buffer,
size_t buffer_length, enum dma_data_direction data_direction)
@ -5505,15 +5535,19 @@ static void pqi_raid_io_complete(struct pqi_io_request *io_request,
pqi_scsi_done(scmd);
}
static int pqi_raid_submit_scsi_cmd_with_io_request(
struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group)
struct pqi_queue_group *queue_group, bool io_high_prio)
{
int rc;
size_t cdb_length;
struct pqi_io_request *io_request;
struct pqi_raid_path_request *request;
io_request = pqi_alloc_io_request(ctrl_info, scmd);
if (!io_request)
return SCSI_MLQUEUE_HOST_BUSY;
io_request->io_complete_callback = pqi_raid_io_complete;
io_request->scmd = scmd;
@ -5523,6 +5557,7 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
@ -5588,14 +5623,11 @@ static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group)
{
struct pqi_io_request *io_request;
bool io_high_prio;
io_request = pqi_alloc_io_request(ctrl_info, scmd);
if (!io_request)
return SCSI_MLQUEUE_HOST_BUSY;
io_high_prio = pqi_is_io_high_priority(device, scmd);
return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
device, scmd, queue_group);
return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio);
}
static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
@ -5640,44 +5672,13 @@ static void pqi_aio_io_complete(struct pqi_io_request *io_request,
pqi_scsi_done(scmd);
}
static inline bool pqi_is_io_high_priority(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
{
bool io_high_prio;
int priority_class;
io_high_prio = false;
if (device->ncq_prio_enable) {
priority_class =
IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
if (priority_class == IOPRIO_CLASS_RT) {
/* Set NCQ priority for read/write commands. */
switch (scmd->cmnd[0]) {
case WRITE_16:
case READ_16:
case WRITE_12:
case READ_12:
case WRITE_10:
case READ_10:
case WRITE_6:
case READ_6:
io_high_prio = true;
break;
}
}
}
return io_high_prio;
}
static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group)
{
bool io_high_prio;
io_high_prio = pqi_is_io_high_priority(ctrl_info, device, scmd);
io_high_prio = pqi_is_io_high_priority(device, scmd);
return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
scmd->cmnd, scmd->cmd_len, queue_group, NULL,
@ -5695,10 +5696,10 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_aio_path_request *request;
struct pqi_scsi_dev *device;
device = scmd->device->hostdata;
io_request = pqi_alloc_io_request(ctrl_info, scmd);
if (!io_request)
return SCSI_MLQUEUE_HOST_BUSY;
io_request->io_complete_callback = pqi_aio_io_complete;
io_request->scmd = scmd;
io_request->raid_bypass = raid_bypass;
@ -5713,6 +5714,7 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
device = scmd->device->hostdata;
if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
if (cdb_length > sizeof(request->cdb))
@ -7367,8 +7369,7 @@ static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
return -ENODEV;
}
if (!device->ncq_prio_support ||
!device->is_physical_device) {
if (!device->ncq_prio_support) {
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -EINVAL;
}