accel/habanalabs: trace dma map sgtable
Traces the DMA [un]map_sgtable using the new traces we added. Signed-off-by: Ohad Sharabi <osharabi@habana.ai> Reviewed-by: Oded Gabbay <ogabbay@kernel.org> Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
This commit is contained in:
parent
309ed96903
commit
ff92d01052
@ -188,7 +188,36 @@ void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *
|
||||
hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, vaddr);
|
||||
}
|
||||
|
||||
int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
|
||||
int hl_dma_map_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir, const char *caller)
|
||||
{
|
||||
struct asic_fixed_properties *prop = &hdev->asic_prop;
|
||||
struct scatterlist *sg;
|
||||
int rc, i;
|
||||
|
||||
rc = hdev->asic_funcs->dma_map_sgtable(hdev, sgt, dir);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (!trace_habanalabs_dma_map_page_enabled())
|
||||
return 0;
|
||||
|
||||
for_each_sgtable_dma_sg(sgt, sg, i)
|
||||
trace_habanalabs_dma_map_page(hdev->dev,
|
||||
page_to_phys(sg_page(sg)),
|
||||
sg->dma_address - prop->device_dma_offset_for_host_access,
|
||||
#ifdef CONFIG_NEED_SG_DMA_LENGTH
|
||||
sg->dma_length,
|
||||
#else
|
||||
sg->length,
|
||||
#endif
|
||||
dir, caller);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hl_asic_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct asic_fixed_properties *prop = &hdev->asic_prop;
|
||||
struct scatterlist *sg;
|
||||
@ -206,7 +235,30 @@ int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_da
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
|
||||
void hl_dma_unmap_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir, const char *caller)
|
||||
{
|
||||
struct asic_fixed_properties *prop = &hdev->asic_prop;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
hdev->asic_funcs->dma_unmap_sgtable(hdev, sgt, dir);
|
||||
|
||||
if (trace_habanalabs_dma_unmap_page_enabled()) {
|
||||
for_each_sgtable_dma_sg(sgt, sg, i)
|
||||
trace_habanalabs_dma_unmap_page(hdev->dev, page_to_phys(sg_page(sg)),
|
||||
sg->dma_address - prop->device_dma_offset_for_host_access,
|
||||
#ifdef CONFIG_NEED_SG_DMA_LENGTH
|
||||
sg->dma_length,
|
||||
#else
|
||||
sg->length,
|
||||
#endif
|
||||
dir, caller);
|
||||
}
|
||||
}
|
||||
|
||||
void hl_asic_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct asic_fixed_properties *prop = &hdev->asic_prop;
|
||||
struct scatterlist *sg;
|
||||
|
@ -159,6 +159,11 @@ enum hl_mmu_page_table_location {
|
||||
#define hl_asic_dma_pool_free(hdev, vaddr, dma_addr) \
|
||||
hl_asic_dma_pool_free_caller(hdev, vaddr, dma_addr, __func__)
|
||||
|
||||
#define hl_dma_map_sgtable(hdev, sgt, dir) \
|
||||
hl_dma_map_sgtable_caller(hdev, sgt, dir, __func__)
|
||||
#define hl_dma_unmap_sgtable(hdev, sgt, dir) \
|
||||
hl_dma_unmap_sgtable_caller(hdev, sgt, dir, __func__)
|
||||
|
||||
/*
|
||||
* Reset Flags
|
||||
*
|
||||
@ -1520,9 +1525,9 @@ struct engines_data {
|
||||
* @asic_dma_pool_free: free small DMA allocation from pool.
|
||||
* @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool.
|
||||
* @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool.
|
||||
* @hl_dma_unmap_sgtable: DMA unmap scatter-gather table.
|
||||
* @dma_unmap_sgtable: DMA unmap scatter-gather table.
|
||||
* @dma_map_sgtable: DMA map scatter-gather table.
|
||||
* @cs_parser: parse Command Submission.
|
||||
* @asic_dma_map_sgtable: DMA map scatter-gather table.
|
||||
* @add_end_of_cb_packets: Add packets to the end of CB, if device requires it.
|
||||
* @update_eq_ci: update event queue CI.
|
||||
* @context_switch: called upon ASID context switch.
|
||||
@ -1643,12 +1648,11 @@ struct hl_asic_funcs {
|
||||
size_t size, dma_addr_t *dma_handle);
|
||||
void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev,
|
||||
size_t size, void *vaddr);
|
||||
void (*hl_dma_unmap_sgtable)(struct hl_device *hdev,
|
||||
struct sg_table *sgt,
|
||||
void (*dma_unmap_sgtable)(struct hl_device *hdev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir);
|
||||
int (*dma_map_sgtable)(struct hl_device *hdev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir);
|
||||
int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser);
|
||||
int (*asic_dma_map_sgtable)(struct hl_device *hdev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir);
|
||||
void (*add_end_of_cb_packets)(struct hl_device *hdev,
|
||||
void *kernel_address, u32 len,
|
||||
u32 original_len,
|
||||
@ -3670,8 +3674,13 @@ void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t
|
||||
dma_addr_t *dma_handle, const char *caller);
|
||||
void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr,
|
||||
const char *caller);
|
||||
int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir);
|
||||
void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
|
||||
int hl_dma_map_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir, const char *caller);
|
||||
void hl_dma_unmap_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir, const char *caller);
|
||||
int hl_asic_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir);
|
||||
void hl_asic_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir);
|
||||
int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val,
|
||||
enum debugfs_access_type acc_type, enum pci_region region_type, bool set_dram_bar);
|
||||
|
@ -244,7 +244,7 @@ static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
|
||||
|
||||
*p_userptr = userptr;
|
||||
|
||||
rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, DMA_BIDIRECTIONAL);
|
||||
rc = hl_dma_map_sgtable(hdev, userptr->sgt, DMA_BIDIRECTIONAL);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "failed to map sgt with DMA region\n");
|
||||
goto dma_map_err;
|
||||
@ -2445,7 +2445,7 @@ void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
|
||||
hl_debugfs_remove_userptr(hdev, userptr);
|
||||
|
||||
if (userptr->dma_mapped)
|
||||
hdev->asic_funcs->hl_dma_unmap_sgtable(hdev, userptr->sgt, userptr->dir);
|
||||
hl_dma_unmap_sgtable(hdev, userptr->sgt, userptr->dir);
|
||||
|
||||
unpin_user_pages_dirty_lock(userptr->pages, userptr->npages, true);
|
||||
kvfree(userptr->pages);
|
||||
|
@ -4908,7 +4908,7 @@ static int gaudi_pin_memory_before_cs(struct hl_device *hdev,
|
||||
|
||||
list_add_tail(&userptr->job_node, parser->job_userptr_list);
|
||||
|
||||
rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, dir);
|
||||
rc = hl_dma_map_sgtable(hdev, userptr->sgt, dir);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "failed to map sgt with DMA region\n");
|
||||
goto unpin_memory;
|
||||
@ -9144,9 +9144,9 @@ static const struct hl_asic_funcs gaudi_funcs = {
|
||||
.asic_dma_pool_free = gaudi_dma_pool_free,
|
||||
.cpu_accessible_dma_pool_alloc = gaudi_cpu_accessible_dma_pool_alloc,
|
||||
.cpu_accessible_dma_pool_free = gaudi_cpu_accessible_dma_pool_free,
|
||||
.hl_dma_unmap_sgtable = hl_dma_unmap_sgtable,
|
||||
.dma_unmap_sgtable = hl_asic_dma_unmap_sgtable,
|
||||
.cs_parser = gaudi_cs_parser,
|
||||
.asic_dma_map_sgtable = hl_dma_map_sgtable,
|
||||
.dma_map_sgtable = hl_asic_dma_map_sgtable,
|
||||
.add_end_of_cb_packets = gaudi_add_end_of_cb_packets,
|
||||
.update_eq_ci = gaudi_update_eq_ci,
|
||||
.context_switch = gaudi_context_switch,
|
||||
|
@ -11497,9 +11497,9 @@ static const struct hl_asic_funcs gaudi2_funcs = {
|
||||
.asic_dma_pool_free = gaudi2_dma_pool_free,
|
||||
.cpu_accessible_dma_pool_alloc = gaudi2_cpu_accessible_dma_pool_alloc,
|
||||
.cpu_accessible_dma_pool_free = gaudi2_cpu_accessible_dma_pool_free,
|
||||
.hl_dma_unmap_sgtable = hl_dma_unmap_sgtable,
|
||||
.dma_unmap_sgtable = hl_asic_dma_unmap_sgtable,
|
||||
.cs_parser = gaudi2_cs_parser,
|
||||
.asic_dma_map_sgtable = hl_dma_map_sgtable,
|
||||
.dma_map_sgtable = hl_asic_dma_map_sgtable,
|
||||
.add_end_of_cb_packets = NULL,
|
||||
.update_eq_ci = gaudi2_update_eq_ci,
|
||||
.context_switch = gaudi2_context_switch,
|
||||
|
@ -3358,7 +3358,7 @@ static int goya_pin_memory_before_cs(struct hl_device *hdev,
|
||||
|
||||
list_add_tail(&userptr->job_node, parser->job_userptr_list);
|
||||
|
||||
rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, dir);
|
||||
rc = hl_dma_map_sgtable(hdev, userptr->sgt, dir);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "failed to map sgt with DMA region\n");
|
||||
goto unpin_memory;
|
||||
@ -5465,9 +5465,9 @@ static const struct hl_asic_funcs goya_funcs = {
|
||||
.asic_dma_pool_free = goya_dma_pool_free,
|
||||
.cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
|
||||
.cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
|
||||
.hl_dma_unmap_sgtable = hl_dma_unmap_sgtable,
|
||||
.dma_unmap_sgtable = hl_asic_dma_unmap_sgtable,
|
||||
.cs_parser = goya_cs_parser,
|
||||
.asic_dma_map_sgtable = hl_dma_map_sgtable,
|
||||
.dma_map_sgtable = hl_asic_dma_map_sgtable,
|
||||
.add_end_of_cb_packets = goya_add_end_of_cb_packets,
|
||||
.update_eq_ci = goya_update_eq_ci,
|
||||
.context_switch = goya_context_switch,
|
||||
|
Loading…
Reference in New Issue
Block a user