diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c index 4af8fd4a390b..0b35bd5fb659 100644 --- a/mm/damon/lru_sort.c +++ b/mm/damon/lru_sort.c @@ -148,12 +148,17 @@ static struct damon_target *target; static struct damos *damon_lru_sort_new_scheme( struct damos_access_pattern *pattern, enum damos_action action) { - struct damos_quota quota = damon_lru_sort_quota; + struct damos *damos; + struct damos_quota *quota = kmemdup(&damon_lru_sort_quota, + sizeof(damon_lru_sort_quota), GFP_KERNEL); + + if (!quota) + return NULL; /* Use half of total quota for hot/cold pages sorting */ - quota.ms = quota.ms / 2; + quota->ms = quota->ms / 2; - return damon_new_scheme( + damos = damon_new_scheme( /* find the pattern, and */ pattern, /* (de)prioritize on LRU-lists */ @@ -161,10 +166,12 @@ static struct damos *damon_lru_sort_new_scheme( /* for each aggregation interval */ 0, /* under the quota. */ - "a, + quota, /* (De)activate this according to the watermarks. */ &damon_lru_sort_wmarks, NUMA_NO_NODE); + kfree(quota); + return damos; } /* Create a DAMON-based operation scheme for hot memory regions */