diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c index 0b35bd5fb659..4af8fd4a390b 100644 --- a/mm/damon/lru_sort.c +++ b/mm/damon/lru_sort.c @@ -148,17 +148,12 @@ static struct damon_target *target; static struct damos *damon_lru_sort_new_scheme( struct damos_access_pattern *pattern, enum damos_action action) { - struct damos *damos; - struct damos_quota *quota = kmemdup(&damon_lru_sort_quota, - sizeof(damon_lru_sort_quota), GFP_KERNEL); - - if (!quota) - return NULL; + struct damos_quota quota = damon_lru_sort_quota; /* Use half of total quota for hot/cold pages sorting */ - quota->ms = quota->ms / 2; + quota.ms = quota.ms / 2; - damos = damon_new_scheme( + return damon_new_scheme( /* find the pattern, and */ pattern, /* (de)prioritize on LRU-lists */ @@ -166,12 +161,10 @@ static struct damos *damon_lru_sort_new_scheme( /* for each aggregation interval */ 0, /* under the quota. */ - quota, + "a, /* (De)activate this according to the watermarks. */ &damon_lru_sort_wmarks, NUMA_NO_NODE); - kfree(quota); - return damos; } /* Create a DAMON-based operation scheme for hot memory regions */