irqchip/gic-v4: Substitute vmovp_lock for a per-VM lock
vmovp_lock is abused in a number of cases to serialise updates to vlpi_count[] and deal with map/unmap of a VM to ITSs. Instead, provide a per-VM lock and revisit the use of vlpi_count[] so that it is always wrapped in this per-VM vmapp_lock. This reduces the potential contention on a concurrent VMOVP command, and paves the way for subsequent VPE locking that holding vmovp_lock actively prevents due to the lock ordering. Signed-off-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Nianyao Tang <tangnianyao@huawei.com> Link: https://lore.kernel.org/r/20240705093155.871070-3-maz@kernel.org
This commit is contained in:
parent
7d2c2048a8
commit
f0eb154c39
@ -1317,7 +1317,6 @@ static void its_send_vmovp(struct its_vpe *vpe)
|
|||||||
{
|
{
|
||||||
struct its_cmd_desc desc = {};
|
struct its_cmd_desc desc = {};
|
||||||
struct its_node *its;
|
struct its_node *its;
|
||||||
unsigned long flags;
|
|
||||||
int col_id = vpe->col_idx;
|
int col_id = vpe->col_idx;
|
||||||
|
|
||||||
desc.its_vmovp_cmd.vpe = vpe;
|
desc.its_vmovp_cmd.vpe = vpe;
|
||||||
@ -1329,6 +1328,12 @@ static void its_send_vmovp(struct its_vpe *vpe)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Protect against concurrent updates of the mapping state on
|
||||||
|
* individual VMs.
|
||||||
|
*/
|
||||||
|
guard(raw_spinlock_irqsave)(&vpe->its_vm->vmapp_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Yet another marvel of the architecture. If using the
|
* Yet another marvel of the architecture. If using the
|
||||||
* its_list "feature", we need to make sure that all ITSs
|
* its_list "feature", we need to make sure that all ITSs
|
||||||
@ -1337,8 +1342,7 @@ static void its_send_vmovp(struct its_vpe *vpe)
|
|||||||
*
|
*
|
||||||
* Wall <-- Head.
|
* Wall <-- Head.
|
||||||
*/
|
*/
|
||||||
raw_spin_lock_irqsave(&vmovp_lock, flags);
|
guard(raw_spinlock)(&vmovp_lock);
|
||||||
|
|
||||||
desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
|
desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
|
||||||
desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
|
desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
|
||||||
|
|
||||||
@ -1353,8 +1357,6 @@ static void its_send_vmovp(struct its_vpe *vpe)
|
|||||||
desc.its_vmovp_cmd.col = &its->collections[col_id];
|
desc.its_vmovp_cmd.col = &its->collections[col_id];
|
||||||
its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
|
its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&vmovp_lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
|
static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
|
||||||
@ -1791,12 +1793,10 @@ static bool gic_requires_eager_mapping(void)
|
|||||||
|
|
||||||
static void its_map_vm(struct its_node *its, struct its_vm *vm)
|
static void its_map_vm(struct its_node *its, struct its_vm *vm)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (gic_requires_eager_mapping())
|
if (gic_requires_eager_mapping())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&vmovp_lock, flags);
|
guard(raw_spinlock_irqsave)(&vm->vmapp_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the VM wasn't mapped yet, iterate over the vpes and get
|
* If the VM wasn't mapped yet, iterate over the vpes and get
|
||||||
@ -1814,19 +1814,15 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm)
|
|||||||
its_send_vinvall(its, vpe);
|
its_send_vinvall(its, vpe);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&vmovp_lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
|
static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
/* Not using the ITS list? Everything is always mapped. */
|
/* Not using the ITS list? Everything is always mapped. */
|
||||||
if (gic_requires_eager_mapping())
|
if (gic_requires_eager_mapping())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&vmovp_lock, flags);
|
guard(raw_spinlock_irqsave)(&vm->vmapp_lock);
|
||||||
|
|
||||||
if (!--vm->vlpi_count[its->list_nr]) {
|
if (!--vm->vlpi_count[its->list_nr]) {
|
||||||
int i;
|
int i;
|
||||||
@ -1834,8 +1830,6 @@ static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
|
|||||||
for (i = 0; i < vm->nr_vpes; i++)
|
for (i = 0; i < vm->nr_vpes; i++)
|
||||||
its_send_vmapp(its, vm->vpes[i], false);
|
its_send_vmapp(its, vm->vpes[i], false);
|
||||||
}
|
}
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&vmovp_lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
|
static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
|
||||||
@ -3942,6 +3936,8 @@ static void its_vpe_invall(struct its_vpe *vpe)
|
|||||||
{
|
{
|
||||||
struct its_node *its;
|
struct its_node *its;
|
||||||
|
|
||||||
|
guard(raw_spinlock_irqsave)(&vpe->its_vm->vmapp_lock);
|
||||||
|
|
||||||
list_for_each_entry(its, &its_nodes, entry) {
|
list_for_each_entry(its, &its_nodes, entry) {
|
||||||
if (!is_v4(its))
|
if (!is_v4(its))
|
||||||
continue;
|
continue;
|
||||||
@ -4547,6 +4543,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
|
|||||||
vm->db_lpi_base = base;
|
vm->db_lpi_base = base;
|
||||||
vm->nr_db_lpis = nr_ids;
|
vm->nr_db_lpis = nr_ids;
|
||||||
vm->vprop_page = vprop_page;
|
vm->vprop_page = vprop_page;
|
||||||
|
raw_spin_lock_init(&vm->vmapp_lock);
|
||||||
|
|
||||||
if (gic_rdists->has_rvpeid)
|
if (gic_rdists->has_rvpeid)
|
||||||
irqchip = &its_vpe_4_1_irq_chip;
|
irqchip = &its_vpe_4_1_irq_chip;
|
||||||
|
@ -25,6 +25,14 @@ struct its_vm {
|
|||||||
irq_hw_number_t db_lpi_base;
|
irq_hw_number_t db_lpi_base;
|
||||||
unsigned long *db_bitmap;
|
unsigned long *db_bitmap;
|
||||||
int nr_db_lpis;
|
int nr_db_lpis;
|
||||||
|
/*
|
||||||
|
* Ensures mutual exclusion between updates to vlpi_count[]
|
||||||
|
* and map/unmap when using the ITSList mechanism.
|
||||||
|
*
|
||||||
|
* The lock order for any sequence involving the ITSList is
|
||||||
|
* vmapp_lock -> vpe_lock ->vmovp_lock.
|
||||||
|
*/
|
||||||
|
raw_spinlock_t vmapp_lock;
|
||||||
u32 vlpi_count[GICv4_ITS_LIST_MAX];
|
u32 vlpi_count[GICv4_ITS_LIST_MAX];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user