drm/amdgpu: Add XCC inst to PASID TLB flushing
Add XCC instance to select the correct KIQ ring when flushing TLBs on a multi-XCC setup. Signed-off-by: Mukul Joshi <mukul.joshi@amd.com> Tested-by: Amber Lin <Amber.Lin@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
committed by
Alex Deucher
parent
e2069a7b08
commit
f87f686482
@@ -743,7 +743,9 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev,
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
uint16_t pasid, enum TLB_FLUSH_TYPE flush_type)
|
||||
uint16_t pasid,
|
||||
enum TLB_FLUSH_TYPE flush_type,
|
||||
uint32_t inst)
|
||||
{
|
||||
bool all_hub = false;
|
||||
|
||||
@@ -751,7 +753,7 @@ int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
adev->family == AMDGPU_FAMILY_RV)
|
||||
all_hub = true;
|
||||
|
||||
return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
|
||||
return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub, inst);
|
||||
}
|
||||
|
||||
bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev)
|
||||
|
||||
@@ -160,7 +160,8 @@ bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev);
|
||||
int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev,
|
||||
uint16_t vmid);
|
||||
int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
uint16_t pasid, enum TLB_FLUSH_TYPE flush_type);
|
||||
uint16_t pasid, enum TLB_FLUSH_TYPE flush_type,
|
||||
uint32_t inst);
|
||||
|
||||
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
|
||||
|
||||
|
||||
@@ -119,7 +119,8 @@ struct amdgpu_gmc_funcs {
|
||||
uint32_t vmhub, uint32_t flush_type);
|
||||
/* flush the vm tlb via pasid */
|
||||
int (*flush_gpu_tlb_pasid)(struct amdgpu_device *adev, uint16_t pasid,
|
||||
uint32_t flush_type, bool all_hub);
|
||||
uint32_t flush_type, bool all_hub,
|
||||
uint32_t inst);
|
||||
/* flush the vm tlb via ring */
|
||||
uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
|
||||
uint64_t pd_addr);
|
||||
@@ -296,9 +297,9 @@ struct amdgpu_gmc {
|
||||
};
|
||||
|
||||
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
|
||||
#define amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, type, allhub) \
|
||||
#define amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, type, allhub, inst) \
|
||||
((adev)->gmc.gmc_funcs->flush_gpu_tlb_pasid \
|
||||
((adev), (pasid), (type), (allhub)))
|
||||
((adev), (pasid), (type), (allhub), (inst)))
|
||||
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
|
||||
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
|
||||
#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
|
||||
|
||||
@@ -419,7 +419,7 @@ error_alloc:
|
||||
*/
|
||||
static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
uint16_t pasid, uint32_t flush_type,
|
||||
bool all_hub)
|
||||
bool all_hub, uint32_t inst)
|
||||
{
|
||||
int vmid, i;
|
||||
signed long r;
|
||||
|
||||
@@ -324,7 +324,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
||||
*/
|
||||
static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
uint16_t pasid, uint32_t flush_type,
|
||||
bool all_hub)
|
||||
bool all_hub, uint32_t inst)
|
||||
{
|
||||
int vmid, i;
|
||||
signed long r;
|
||||
|
||||
@@ -424,7 +424,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
|
||||
*/
|
||||
static int gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
uint16_t pasid, uint32_t flush_type,
|
||||
bool all_hub)
|
||||
bool all_hub, uint32_t inst)
|
||||
{
|
||||
int vmid;
|
||||
unsigned int tmp;
|
||||
|
||||
@@ -622,7 +622,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
|
||||
*/
|
||||
static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
uint16_t pasid, uint32_t flush_type,
|
||||
bool all_hub)
|
||||
bool all_hub, uint32_t inst)
|
||||
{
|
||||
int vmid;
|
||||
unsigned int tmp;
|
||||
|
||||
@@ -924,7 +924,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
||||
*/
|
||||
static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
uint16_t pasid, uint32_t flush_type,
|
||||
bool all_hub)
|
||||
bool all_hub, uint32_t inst)
|
||||
{
|
||||
int vmid, i;
|
||||
signed long r;
|
||||
@@ -932,8 +932,8 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
uint16_t queried_pasid;
|
||||
bool ret;
|
||||
u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
|
||||
struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring;
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
|
||||
struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
|
||||
|
||||
if (amdgpu_in_reset(adev))
|
||||
return -EIO;
|
||||
@@ -953,7 +953,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
if (vega20_xgmi_wa)
|
||||
ndw += kiq->pmf->invalidate_tlbs_size;
|
||||
|
||||
spin_lock(&adev->gfx.kiq[0].ring_lock);
|
||||
spin_lock(&adev->gfx.kiq[inst].ring_lock);
|
||||
/* 2 dwords flush + 8 dwords fence */
|
||||
amdgpu_ring_alloc(ring, ndw);
|
||||
if (vega20_xgmi_wa)
|
||||
@@ -964,13 +964,13 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
|
||||
if (r) {
|
||||
amdgpu_ring_undo(ring);
|
||||
spin_unlock(&adev->gfx.kiq[0].ring_lock);
|
||||
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
|
||||
up_read(&adev->reset_domain->sem);
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
amdgpu_ring_commit(ring);
|
||||
spin_unlock(&adev->gfx.kiq[0].ring_lock);
|
||||
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
|
||||
r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
|
||||
if (r < 1) {
|
||||
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
|
||||
|
||||
@@ -2052,6 +2052,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
|
||||
struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
|
||||
uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
|
||||
struct kfd_node *dev = pdd->dev;
|
||||
int xcc = 0;
|
||||
|
||||
/*
|
||||
* It can be that we race and lose here, but that is extremely unlikely
|
||||
@@ -2069,8 +2070,10 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
|
||||
amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev,
|
||||
pdd->qpd.vmid);
|
||||
} else {
|
||||
amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev,
|
||||
pdd->process->pasid, type);
|
||||
for (xcc = 0; xcc < dev->num_xcc_per_node; xcc++)
|
||||
amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev,
|
||||
pdd->process->pasid, type,
|
||||
dev->start_xcc_id + xcc);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user