1

mmc: hsq: Improve random I/O write performance for 4k buffers

By dynamically adjusting the host->hsq_depth, based upon the buffer size
being 4k and that we get at least two I/O write requests in flight, we can
improve the throughput a bit. This is typical for a random I/O write
pattern.

More precisely, by dynamically changing the number of requests in flight
from 2 to 5, we can on some platforms observe ~4-5% increase in throughput.

Signed-off-by: Wenchao Chen <wenchao.chen@unisoc.com>
Link: https://lore.kernel.org/r/20230919074707.25517-3-wenchao.chen@unisoc.com
[Ulf: Re-wrote the commitmsg, minor adjustment to the code - all to clarify.]
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
This commit is contained in:
Wenchao Chen 2023-09-19 15:47:07 +08:00 committed by Ulf Hansson
parent 2e2b547950
commit 68df98c483
2 changed files with 26 additions and 0 deletions

View File

@ -21,6 +21,25 @@ static void mmc_hsq_retry_handler(struct work_struct *work)
mmc->ops->request(mmc, hsq->mrq);
}
static void mmc_hsq_modify_threshold(struct mmc_hsq *hsq)
{
struct mmc_host *mmc = hsq->mmc;
struct mmc_request *mrq;
unsigned int tag, need_change = 0;
mmc->hsq_depth = HSQ_NORMAL_DEPTH;
for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
mrq = hsq->slot[tag].mrq;
if (mrq && mrq->data &&
(mrq->data->blksz * mrq->data->blocks == 4096) &&
(mrq->data->flags & MMC_DATA_WRITE) &&
(++need_change == 2)) {
mmc->hsq_depth = HSQ_PERFORMANCE_DEPTH;
break;
}
}
}
static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
{
struct mmc_host *mmc = hsq->mmc;
@ -42,6 +61,8 @@ static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
return;
}
mmc_hsq_modify_threshold(hsq);
slot = &hsq->slot[hsq->next_tag];
hsq->mrq = slot->mrq;
hsq->qcnt--;

View File

@ -10,6 +10,11 @@
* flight to avoid a long latency.
*/
#define HSQ_NORMAL_DEPTH 2
/*
* For 4k random writes, we allow hsq_depth to increase to 5
* for better performance.
*/
#define HSQ_PERFORMANCE_DEPTH 5
struct hsq_slot {
struct mmc_request *mrq;