drm/xe: Drop xe_gt_tlb_invalidation_wait
Having two methods to wait on GT TLB invalidations is not ideal. Remove
xe_gt_tlb_invalidation_wait and only use GT TLB invalidation fences.
In addition to two methods being less than ideal, once GT TLB
invalidations are coalesced the seqno cannot be assigned during
xe_gt_tlb_invalidation_ggtt/range. Thus xe_gt_tlb_invalidation_wait
would not have a seqno to wait one. A fence however can be armed and
later signaled.
v3:
- Add explaination about coalescing to commit message
v4:
- Don't put dma fence if defined on stack (CI)
v5:
- Initialize ret to zero (CI)
v6:
- Use invalidation_fence_signal helper in tlb timeout (Matthew Auld)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Nirmoy Das <nirmoy.das@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240719172905.1527927-3-matthew.brost@intel.com
(cherry picked from commit 61ac035361
)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
parent
90be4cc6f7
commit
58bfe66744
@ -17,6 +17,8 @@
|
|||||||
#include "xe_trace.h"
|
#include "xe_trace.h"
|
||||||
#include "regs/xe_guc_regs.h"
|
#include "regs/xe_guc_regs.h"
|
||||||
|
|
||||||
|
#define FENCE_STACK_BIT DMA_FENCE_FLAG_USER_BITS
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TLB inval depends on pending commands in the CT queue and then the real
|
* TLB inval depends on pending commands in the CT queue and then the real
|
||||||
* invalidation time. Double up the time to process full CT queue
|
* invalidation time. Double up the time to process full CT queue
|
||||||
@ -33,6 +35,23 @@ static long tlb_timeout_jiffies(struct xe_gt *gt)
|
|||||||
return hw_tlb_timeout + 2 * delay;
|
return hw_tlb_timeout + 2 * delay;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
|
||||||
|
{
|
||||||
|
bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
|
||||||
|
|
||||||
|
trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
|
||||||
|
dma_fence_signal(&fence->base);
|
||||||
|
if (!stack)
|
||||||
|
dma_fence_put(&fence->base);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
|
||||||
|
{
|
||||||
|
list_del(&fence->link);
|
||||||
|
__invalidation_fence_signal(xe, fence);
|
||||||
|
}
|
||||||
|
|
||||||
static void xe_gt_tlb_fence_timeout(struct work_struct *work)
|
static void xe_gt_tlb_fence_timeout(struct work_struct *work)
|
||||||
{
|
{
|
||||||
@ -54,10 +73,8 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
|
|||||||
xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
|
xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
|
||||||
fence->seqno, gt->tlb_invalidation.seqno_recv);
|
fence->seqno, gt->tlb_invalidation.seqno_recv);
|
||||||
|
|
||||||
list_del(&fence->link);
|
|
||||||
fence->base.error = -ETIME;
|
fence->base.error = -ETIME;
|
||||||
dma_fence_signal(&fence->base);
|
invalidation_fence_signal(xe, fence);
|
||||||
dma_fence_put(&fence->base);
|
|
||||||
}
|
}
|
||||||
if (!list_empty(>->tlb_invalidation.pending_fences))
|
if (!list_empty(>->tlb_invalidation.pending_fences))
|
||||||
queue_delayed_work(system_wq,
|
queue_delayed_work(system_wq,
|
||||||
@ -87,21 +104,6 @@ int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
|
|
||||||
{
|
|
||||||
trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
|
|
||||||
dma_fence_signal(&fence->base);
|
|
||||||
dma_fence_put(&fence->base);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
|
|
||||||
{
|
|
||||||
list_del(&fence->link);
|
|
||||||
__invalidation_fence_signal(xe, fence);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
|
* xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
|
||||||
* @gt: graphics tile
|
* @gt: graphics tile
|
||||||
@ -111,7 +113,6 @@ invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fe
|
|||||||
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
|
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
|
||||||
{
|
{
|
||||||
struct xe_gt_tlb_invalidation_fence *fence, *next;
|
struct xe_gt_tlb_invalidation_fence *fence, *next;
|
||||||
struct xe_guc *guc = >->uc.guc;
|
|
||||||
int pending_seqno;
|
int pending_seqno;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -134,7 +135,6 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
|
|||||||
else
|
else
|
||||||
pending_seqno = gt->tlb_invalidation.seqno - 1;
|
pending_seqno = gt->tlb_invalidation.seqno - 1;
|
||||||
WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno);
|
WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno);
|
||||||
wake_up_all(&guc->ct.wq);
|
|
||||||
|
|
||||||
list_for_each_entry_safe(fence, next,
|
list_for_each_entry_safe(fence, next,
|
||||||
>->tlb_invalidation.pending_fences, link)
|
>->tlb_invalidation.pending_fences, link)
|
||||||
@ -165,6 +165,8 @@ static int send_tlb_invalidation(struct xe_guc *guc,
|
|||||||
int seqno;
|
int seqno;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
xe_gt_assert(gt, fence);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX: The seqno algorithm relies on TLB invalidation being processed
|
* XXX: The seqno algorithm relies on TLB invalidation being processed
|
||||||
* in order which they currently are, if that changes the algorithm will
|
* in order which they currently are, if that changes the algorithm will
|
||||||
@ -173,10 +175,8 @@ static int send_tlb_invalidation(struct xe_guc *guc,
|
|||||||
|
|
||||||
mutex_lock(&guc->ct.lock);
|
mutex_lock(&guc->ct.lock);
|
||||||
seqno = gt->tlb_invalidation.seqno;
|
seqno = gt->tlb_invalidation.seqno;
|
||||||
if (fence) {
|
fence->seqno = seqno;
|
||||||
fence->seqno = seqno;
|
trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
|
||||||
trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
|
|
||||||
}
|
|
||||||
action[1] = seqno;
|
action[1] = seqno;
|
||||||
ret = xe_guc_ct_send_locked(&guc->ct, action, len,
|
ret = xe_guc_ct_send_locked(&guc->ct, action, len,
|
||||||
G2H_LEN_DW_TLB_INVALIDATE, 1);
|
G2H_LEN_DW_TLB_INVALIDATE, 1);
|
||||||
@ -209,7 +209,6 @@ static int send_tlb_invalidation(struct xe_guc *guc,
|
|||||||
TLB_INVALIDATION_SEQNO_MAX;
|
TLB_INVALIDATION_SEQNO_MAX;
|
||||||
if (!gt->tlb_invalidation.seqno)
|
if (!gt->tlb_invalidation.seqno)
|
||||||
gt->tlb_invalidation.seqno = 1;
|
gt->tlb_invalidation.seqno = 1;
|
||||||
ret = seqno;
|
|
||||||
}
|
}
|
||||||
mutex_unlock(&guc->ct.lock);
|
mutex_unlock(&guc->ct.lock);
|
||||||
|
|
||||||
@ -223,14 +222,16 @@ static int send_tlb_invalidation(struct xe_guc *guc,
|
|||||||
/**
|
/**
|
||||||
* xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
|
* xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
|
||||||
* @gt: graphics tile
|
* @gt: graphics tile
|
||||||
|
* @fence: invalidation fence which will be signal on TLB invalidation
|
||||||
|
* completion
|
||||||
*
|
*
|
||||||
* Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
|
* Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
|
||||||
* caller can use seqno + xe_gt_tlb_invalidation_wait to wait for completion.
|
* caller can use the invalidation fence to wait for completion.
|
||||||
*
|
*
|
||||||
* Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
|
* Return: 0 on success, negative error code on error
|
||||||
* negative error code on error.
|
|
||||||
*/
|
*/
|
||||||
static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt)
|
static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
|
||||||
|
struct xe_gt_tlb_invalidation_fence *fence)
|
||||||
{
|
{
|
||||||
u32 action[] = {
|
u32 action[] = {
|
||||||
XE_GUC_ACTION_TLB_INVALIDATION,
|
XE_GUC_ACTION_TLB_INVALIDATION,
|
||||||
@ -238,7 +239,7 @@ static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt)
|
|||||||
MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
|
MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
|
||||||
};
|
};
|
||||||
|
|
||||||
return send_tlb_invalidation(>->uc.guc, NULL, action,
|
return send_tlb_invalidation(>->uc.guc, fence, action,
|
||||||
ARRAY_SIZE(action));
|
ARRAY_SIZE(action));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -257,13 +258,15 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
|
|||||||
|
|
||||||
if (xe_guc_ct_enabled(>->uc.guc.ct) &&
|
if (xe_guc_ct_enabled(>->uc.guc.ct) &&
|
||||||
gt->uc.guc.submission_state.enabled) {
|
gt->uc.guc.submission_state.enabled) {
|
||||||
int seqno;
|
struct xe_gt_tlb_invalidation_fence fence;
|
||||||
|
int ret;
|
||||||
|
|
||||||
seqno = xe_gt_tlb_invalidation_guc(gt);
|
xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
|
||||||
if (seqno <= 0)
|
ret = xe_gt_tlb_invalidation_guc(gt, &fence);
|
||||||
return seqno;
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
xe_gt_tlb_invalidation_wait(gt, seqno);
|
xe_gt_tlb_invalidation_fence_wait(&fence);
|
||||||
} else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
|
} else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
|
||||||
if (IS_SRIOV_VF(xe))
|
if (IS_SRIOV_VF(xe))
|
||||||
return 0;
|
return 0;
|
||||||
@ -290,18 +293,16 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
|
|||||||
*
|
*
|
||||||
* @gt: graphics tile
|
* @gt: graphics tile
|
||||||
* @fence: invalidation fence which will be signal on TLB invalidation
|
* @fence: invalidation fence which will be signal on TLB invalidation
|
||||||
* completion, can be NULL
|
* completion
|
||||||
* @start: start address
|
* @start: start address
|
||||||
* @end: end address
|
* @end: end address
|
||||||
* @asid: address space id
|
* @asid: address space id
|
||||||
*
|
*
|
||||||
* Issue a range based TLB invalidation if supported, if not fallback to a full
|
* Issue a range based TLB invalidation if supported, if not fallback to a full
|
||||||
* TLB invalidation. Completion of TLB is asynchronous and caller can either use
|
* TLB invalidation. Completion of TLB is asynchronous and caller can use
|
||||||
* the invalidation fence or seqno + xe_gt_tlb_invalidation_wait to wait for
|
* the invalidation fence to wait for completion.
|
||||||
* completion.
|
|
||||||
*
|
*
|
||||||
* Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
|
* Return: Negative error code on error, 0 on success
|
||||||
* negative error code on error.
|
|
||||||
*/
|
*/
|
||||||
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
|
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
|
||||||
struct xe_gt_tlb_invalidation_fence *fence,
|
struct xe_gt_tlb_invalidation_fence *fence,
|
||||||
@ -312,11 +313,11 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
|
|||||||
u32 action[MAX_TLB_INVALIDATION_LEN];
|
u32 action[MAX_TLB_INVALIDATION_LEN];
|
||||||
int len = 0;
|
int len = 0;
|
||||||
|
|
||||||
|
xe_gt_assert(gt, fence);
|
||||||
|
|
||||||
/* Execlists not supported */
|
/* Execlists not supported */
|
||||||
if (gt_to_xe(gt)->info.force_execlist) {
|
if (gt_to_xe(gt)->info.force_execlist) {
|
||||||
if (fence)
|
__invalidation_fence_signal(xe, fence);
|
||||||
__invalidation_fence_signal(xe, fence);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -382,12 +383,10 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
|
|||||||
* @vma: VMA to invalidate
|
* @vma: VMA to invalidate
|
||||||
*
|
*
|
||||||
* Issue a range based TLB invalidation if supported, if not fallback to a full
|
* Issue a range based TLB invalidation if supported, if not fallback to a full
|
||||||
* TLB invalidation. Completion of TLB is asynchronous and caller can either use
|
* TLB invalidation. Completion of TLB is asynchronous and caller can use
|
||||||
* the invalidation fence or seqno + xe_gt_tlb_invalidation_wait to wait for
|
* the invalidation fence to wait for completion.
|
||||||
* completion.
|
|
||||||
*
|
*
|
||||||
* Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
|
* Return: Negative error code on error, 0 on success
|
||||||
* negative error code on error.
|
|
||||||
*/
|
*/
|
||||||
int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
|
int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
|
||||||
struct xe_gt_tlb_invalidation_fence *fence,
|
struct xe_gt_tlb_invalidation_fence *fence,
|
||||||
@ -400,43 +399,6 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
|
|||||||
xe_vma_vm(vma)->usm.asid);
|
xe_vma_vm(vma)->usm.asid);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* xe_gt_tlb_invalidation_wait - Wait for TLB to complete
|
|
||||||
* @gt: graphics tile
|
|
||||||
* @seqno: seqno to wait which was returned from xe_gt_tlb_invalidation
|
|
||||||
*
|
|
||||||
* Wait for tlb_timeout_jiffies() for a TLB invalidation to complete.
|
|
||||||
*
|
|
||||||
* Return: 0 on success, -ETIME on TLB invalidation timeout
|
|
||||||
*/
|
|
||||||
int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
|
|
||||||
{
|
|
||||||
struct xe_guc *guc = >->uc.guc;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
/* Execlists not supported */
|
|
||||||
if (gt_to_xe(gt)->info.force_execlist)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* XXX: See above, this algorithm only works if seqno are always in
|
|
||||||
* order
|
|
||||||
*/
|
|
||||||
ret = wait_event_timeout(guc->ct.wq,
|
|
||||||
tlb_invalidation_seqno_past(gt, seqno),
|
|
||||||
tlb_timeout_jiffies(gt));
|
|
||||||
if (!ret) {
|
|
||||||
struct drm_printer p = xe_gt_err_printer(gt);
|
|
||||||
|
|
||||||
xe_gt_err(gt, "TLB invalidation time'd out, seqno=%d, recv=%d\n",
|
|
||||||
seqno, gt->tlb_invalidation.seqno_recv);
|
|
||||||
xe_guc_ct_print(&guc->ct, &p, true);
|
|
||||||
return -ETIME;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
|
* xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
|
||||||
* @guc: guc
|
* @guc: guc
|
||||||
@ -480,12 +442,7 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* wake_up_all() and wait_event_timeout() already have the correct
|
|
||||||
* barriers.
|
|
||||||
*/
|
|
||||||
WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
|
WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
|
||||||
wake_up_all(&guc->ct.wq);
|
|
||||||
|
|
||||||
list_for_each_entry_safe(fence, next,
|
list_for_each_entry_safe(fence, next,
|
||||||
>->tlb_invalidation.pending_fences, link) {
|
>->tlb_invalidation.pending_fences, link) {
|
||||||
@ -530,11 +487,13 @@ static const struct dma_fence_ops invalidation_fence_ops = {
|
|||||||
* xe_gt_tlb_invalidation_fence_init - Initialize TLB invalidation fence
|
* xe_gt_tlb_invalidation_fence_init - Initialize TLB invalidation fence
|
||||||
* @gt: GT
|
* @gt: GT
|
||||||
* @fence: TLB invalidation fence to initialize
|
* @fence: TLB invalidation fence to initialize
|
||||||
|
* @stack: fence is stack variable
|
||||||
*
|
*
|
||||||
* Initialize TLB invalidation fence for use
|
* Initialize TLB invalidation fence for use
|
||||||
*/
|
*/
|
||||||
void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
|
void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
|
||||||
struct xe_gt_tlb_invalidation_fence *fence)
|
struct xe_gt_tlb_invalidation_fence *fence,
|
||||||
|
bool stack)
|
||||||
{
|
{
|
||||||
spin_lock_irq(>->tlb_invalidation.lock);
|
spin_lock_irq(>->tlb_invalidation.lock);
|
||||||
dma_fence_init(&fence->base, &invalidation_fence_ops,
|
dma_fence_init(&fence->base, &invalidation_fence_ops,
|
||||||
@ -542,5 +501,8 @@ void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
|
|||||||
dma_fence_context_alloc(1), 1);
|
dma_fence_context_alloc(1), 1);
|
||||||
spin_unlock_irq(>->tlb_invalidation.lock);
|
spin_unlock_irq(>->tlb_invalidation.lock);
|
||||||
INIT_LIST_HEAD(&fence->link);
|
INIT_LIST_HEAD(&fence->link);
|
||||||
dma_fence_get(&fence->base);
|
if (stack)
|
||||||
|
set_bit(FENCE_STACK_BIT, &fence->base.flags);
|
||||||
|
else
|
||||||
|
dma_fence_get(&fence->base);
|
||||||
}
|
}
|
||||||
|
@ -23,10 +23,16 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
|
|||||||
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
|
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
|
||||||
struct xe_gt_tlb_invalidation_fence *fence,
|
struct xe_gt_tlb_invalidation_fence *fence,
|
||||||
u64 start, u64 end, u32 asid);
|
u64 start, u64 end, u32 asid);
|
||||||
int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno);
|
|
||||||
int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
|
int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
|
||||||
|
|
||||||
void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
|
void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
|
||||||
struct xe_gt_tlb_invalidation_fence *fence);
|
struct xe_gt_tlb_invalidation_fence *fence,
|
||||||
|
bool stack);
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence)
|
||||||
|
{
|
||||||
|
dma_fence_wait(&fence->base, false);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _XE_GT_TLB_INVALIDATION_ */
|
#endif /* _XE_GT_TLB_INVALIDATION_ */
|
||||||
|
@ -1153,7 +1153,7 @@ static int invalidation_fence_init(struct xe_gt *gt,
|
|||||||
|
|
||||||
trace_xe_gt_tlb_invalidation_fence_create(gt_to_xe(gt), &ifence->base);
|
trace_xe_gt_tlb_invalidation_fence_create(gt_to_xe(gt), &ifence->base);
|
||||||
|
|
||||||
xe_gt_tlb_invalidation_fence_init(gt, &ifence->base);
|
xe_gt_tlb_invalidation_fence_init(gt, &ifence->base, false);
|
||||||
|
|
||||||
ifence->fence = fence;
|
ifence->fence = fence;
|
||||||
ifence->gt = gt;
|
ifence->gt = gt;
|
||||||
|
@ -3341,10 +3341,10 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
|
|||||||
{
|
{
|
||||||
struct xe_device *xe = xe_vma_vm(vma)->xe;
|
struct xe_device *xe = xe_vma_vm(vma)->xe;
|
||||||
struct xe_tile *tile;
|
struct xe_tile *tile;
|
||||||
|
struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE];
|
||||||
u32 tile_needs_invalidate = 0;
|
u32 tile_needs_invalidate = 0;
|
||||||
int seqno[XE_MAX_TILES_PER_DEVICE];
|
|
||||||
u8 id;
|
u8 id;
|
||||||
int ret;
|
int ret = 0;
|
||||||
|
|
||||||
xe_assert(xe, !xe_vma_is_null(vma));
|
xe_assert(xe, !xe_vma_is_null(vma));
|
||||||
trace_xe_vma_invalidate(vma);
|
trace_xe_vma_invalidate(vma);
|
||||||
@ -3369,29 +3369,31 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
|
|||||||
|
|
||||||
for_each_tile(tile, xe, id) {
|
for_each_tile(tile, xe, id) {
|
||||||
if (xe_pt_zap_ptes(tile, vma)) {
|
if (xe_pt_zap_ptes(tile, vma)) {
|
||||||
tile_needs_invalidate |= BIT(id);
|
|
||||||
xe_device_wmb(xe);
|
xe_device_wmb(xe);
|
||||||
|
xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
|
||||||
|
&fence[id], true);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FIXME: We potentially need to invalidate multiple
|
* FIXME: We potentially need to invalidate multiple
|
||||||
* GTs within the tile
|
* GTs within the tile
|
||||||
*/
|
*/
|
||||||
seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
|
ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
|
||||||
if (seqno[id] < 0)
|
&fence[id], vma);
|
||||||
return seqno[id];
|
if (ret < 0)
|
||||||
|
goto wait;
|
||||||
|
|
||||||
|
tile_needs_invalidate |= BIT(id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_tile(tile, xe, id) {
|
wait:
|
||||||
if (tile_needs_invalidate & BIT(id)) {
|
for_each_tile(tile, xe, id)
|
||||||
ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
|
if (tile_needs_invalidate & BIT(id))
|
||||||
if (ret < 0)
|
xe_gt_tlb_invalidation_fence_wait(&fence[id]);
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
vma->tile_invalidated = vma->tile_mask;
|
vma->tile_invalidated = vma->tile_mask;
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct xe_vm_snapshot {
|
struct xe_vm_snapshot {
|
||||||
|
Loading…
Reference in New Issue
Block a user