Merge tag 'amd-drm-fixes-6.16-2025-06-18' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes
amd-drm-fixes-6.16-2025-06-18: amdgpu: - DP tunneling fix - LTTPR fix - DSC fix - DML2.x ABGR16161616 fix - RMCM fix - Backlight fixes - GFX11 kicker support - SDMA reset fixes - VCN 5.0.1 fix - Reset fix - Misc small fixes amdkfd: - SDMA reset fix - Fix race in GWS scheduling Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://lore.kernel.org/r/20250618203115.1533451-1-alexander.deucher@amd.com
This commit is contained in:
@@ -1902,7 +1902,7 @@ no_preempt:
|
||||
continue;
|
||||
}
|
||||
job = to_amdgpu_job(s_job);
|
||||
if (preempted && (&job->hw_fence) == fence)
|
||||
if (preempted && (&job->hw_fence.base) == fence)
|
||||
/* mark the job as preempted */
|
||||
job->preemption_status |= AMDGPU_IB_PREEMPTED;
|
||||
}
|
||||
|
||||
@@ -6019,16 +6019,12 @@ static int amdgpu_device_health_check(struct list_head *device_list_handle)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_device_halt_activities(struct amdgpu_device *adev,
|
||||
struct amdgpu_job *job,
|
||||
struct amdgpu_reset_context *reset_context,
|
||||
struct list_head *device_list,
|
||||
struct amdgpu_hive_info *hive,
|
||||
bool need_emergency_restart)
|
||||
static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
|
||||
struct list_head *device_list,
|
||||
struct amdgpu_hive_info *hive)
|
||||
{
|
||||
struct list_head *device_list_handle = NULL;
|
||||
struct amdgpu_device *tmp_adev = NULL;
|
||||
int i, r = 0;
|
||||
int r;
|
||||
|
||||
/*
|
||||
* Build list of devices to reset.
|
||||
@@ -6045,26 +6041,54 @@ static int amdgpu_device_halt_activities(struct amdgpu_device *adev,
|
||||
}
|
||||
if (!list_is_first(&adev->reset_list, device_list))
|
||||
list_rotate_to_front(&adev->reset_list, device_list);
|
||||
device_list_handle = device_list;
|
||||
} else {
|
||||
list_add_tail(&adev->reset_list, device_list);
|
||||
device_list_handle = device_list;
|
||||
}
|
||||
|
||||
if (!amdgpu_sriov_vf(adev) && (!adev->pcie_reset_ctx.occurs_dpc)) {
|
||||
r = amdgpu_device_health_check(device_list_handle);
|
||||
r = amdgpu_device_health_check(device_list);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
/* We need to lock reset domain only once both for XGMI and single device */
|
||||
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
|
||||
reset_list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev,
|
||||
struct list_head *device_list)
|
||||
{
|
||||
struct amdgpu_device *tmp_adev = NULL;
|
||||
|
||||
if (list_empty(device_list))
|
||||
return;
|
||||
tmp_adev =
|
||||
list_first_entry(device_list, struct amdgpu_device, reset_list);
|
||||
amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
|
||||
}
|
||||
|
||||
static void amdgpu_device_recovery_put_reset_lock(struct amdgpu_device *adev,
|
||||
struct list_head *device_list)
|
||||
{
|
||||
struct amdgpu_device *tmp_adev = NULL;
|
||||
|
||||
if (list_empty(device_list))
|
||||
return;
|
||||
tmp_adev =
|
||||
list_first_entry(device_list, struct amdgpu_device, reset_list);
|
||||
amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
|
||||
}
|
||||
|
||||
static int amdgpu_device_halt_activities(
|
||||
struct amdgpu_device *adev, struct amdgpu_job *job,
|
||||
struct amdgpu_reset_context *reset_context,
|
||||
struct list_head *device_list, struct amdgpu_hive_info *hive,
|
||||
bool need_emergency_restart)
|
||||
{
|
||||
struct amdgpu_device *tmp_adev = NULL;
|
||||
int i, r = 0;
|
||||
|
||||
/* block all schedulers and reset given job's ring */
|
||||
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
|
||||
|
||||
list_for_each_entry(tmp_adev, device_list, reset_list) {
|
||||
amdgpu_device_set_mp1_state(tmp_adev);
|
||||
|
||||
/*
|
||||
@@ -6252,11 +6276,6 @@ static void amdgpu_device_gpu_resume(struct amdgpu_device *adev,
|
||||
amdgpu_ras_set_error_query_ready(tmp_adev, true);
|
||||
|
||||
}
|
||||
|
||||
tmp_adev = list_first_entry(device_list, struct amdgpu_device,
|
||||
reset_list);
|
||||
amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -6324,10 +6343,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
reset_context->hive = hive;
|
||||
INIT_LIST_HEAD(&device_list);
|
||||
|
||||
if (amdgpu_device_recovery_prepare(adev, &device_list, hive))
|
||||
goto end_reset;
|
||||
|
||||
/* We need to lock reset domain only once both for XGMI and single device */
|
||||
amdgpu_device_recovery_get_reset_lock(adev, &device_list);
|
||||
|
||||
r = amdgpu_device_halt_activities(adev, job, reset_context, &device_list,
|
||||
hive, need_emergency_restart);
|
||||
if (r)
|
||||
goto end_reset;
|
||||
goto reset_unlock;
|
||||
|
||||
if (need_emergency_restart)
|
||||
goto skip_sched_resume;
|
||||
@@ -6337,7 +6362,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
*
|
||||
* job->base holds a reference to parent fence
|
||||
*/
|
||||
if (job && dma_fence_is_signaled(&job->hw_fence)) {
|
||||
if (job && dma_fence_is_signaled(&job->hw_fence.base)) {
|
||||
job_signaled = true;
|
||||
dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
|
||||
goto skip_hw_reset;
|
||||
@@ -6345,13 +6370,15 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
|
||||
r = amdgpu_device_asic_reset(adev, &device_list, reset_context);
|
||||
if (r)
|
||||
goto end_reset;
|
||||
goto reset_unlock;
|
||||
skip_hw_reset:
|
||||
r = amdgpu_device_sched_resume(&device_list, reset_context, job_signaled);
|
||||
if (r)
|
||||
goto end_reset;
|
||||
goto reset_unlock;
|
||||
skip_sched_resume:
|
||||
amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart);
|
||||
reset_unlock:
|
||||
amdgpu_device_recovery_put_reset_lock(adev, &device_list);
|
||||
end_reset:
|
||||
if (hive) {
|
||||
mutex_unlock(&hive->hive_lock);
|
||||
@@ -6763,6 +6790,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
|
||||
memset(&reset_context, 0, sizeof(reset_context));
|
||||
INIT_LIST_HEAD(&device_list);
|
||||
|
||||
amdgpu_device_recovery_prepare(adev, &device_list, hive);
|
||||
amdgpu_device_recovery_get_reset_lock(adev, &device_list);
|
||||
r = amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
|
||||
hive, false);
|
||||
if (hive) {
|
||||
@@ -6880,8 +6909,8 @@ out:
|
||||
if (hive) {
|
||||
list_for_each_entry(tmp_adev, &device_list, reset_list)
|
||||
amdgpu_device_unset_mp1_state(tmp_adev);
|
||||
amdgpu_device_unlock_reset_domain(adev->reset_domain);
|
||||
}
|
||||
amdgpu_device_recovery_put_reset_lock(adev, &device_list);
|
||||
}
|
||||
|
||||
if (hive) {
|
||||
@@ -6927,6 +6956,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
|
||||
|
||||
amdgpu_device_sched_resume(&device_list, NULL, NULL);
|
||||
amdgpu_device_gpu_resume(adev, &device_list, false);
|
||||
amdgpu_device_recovery_put_reset_lock(adev, &device_list);
|
||||
adev->pcie_reset_ctx.occurs_dpc = false;
|
||||
|
||||
if (hive) {
|
||||
|
||||
@@ -41,22 +41,6 @@
|
||||
#include "amdgpu_trace.h"
|
||||
#include "amdgpu_reset.h"
|
||||
|
||||
/*
|
||||
* Fences mark an event in the GPUs pipeline and are used
|
||||
* for GPU/CPU synchronization. When the fence is written,
|
||||
* it is expected that all buffers associated with that fence
|
||||
* are no longer in use by the associated ring on the GPU and
|
||||
* that the relevant GPU caches have been flushed.
|
||||
*/
|
||||
|
||||
struct amdgpu_fence {
|
||||
struct dma_fence base;
|
||||
|
||||
/* RB, DMA, etc. */
|
||||
struct amdgpu_ring *ring;
|
||||
ktime_t start_timestamp;
|
||||
};
|
||||
|
||||
static struct kmem_cache *amdgpu_fence_slab;
|
||||
|
||||
int amdgpu_fence_slab_init(void)
|
||||
@@ -151,12 +135,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
|
||||
am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
|
||||
if (am_fence == NULL)
|
||||
return -ENOMEM;
|
||||
fence = &am_fence->base;
|
||||
am_fence->ring = ring;
|
||||
} else {
|
||||
/* take use of job-embedded fence */
|
||||
fence = &job->hw_fence;
|
||||
am_fence = &job->hw_fence;
|
||||
}
|
||||
fence = &am_fence->base;
|
||||
am_fence->ring = ring;
|
||||
|
||||
seq = ++ring->fence_drv.sync_seq;
|
||||
if (job && job->job_run_counter) {
|
||||
@@ -718,7 +702,7 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
|
||||
* it right here or we won't be able to track them in fence_drv
|
||||
* and they will remain unsignaled during sa_bo free.
|
||||
*/
|
||||
job = container_of(old, struct amdgpu_job, hw_fence);
|
||||
job = container_of(old, struct amdgpu_job, hw_fence.base);
|
||||
if (!job->base.s_fence && !dma_fence_is_signaled(old))
|
||||
dma_fence_signal(old);
|
||||
RCU_INIT_POINTER(*ptr, NULL);
|
||||
@@ -780,7 +764,7 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
|
||||
|
||||
static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
|
||||
{
|
||||
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
|
||||
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
|
||||
|
||||
return (const char *)to_amdgpu_ring(job->base.sched)->name;
|
||||
}
|
||||
@@ -810,7 +794,7 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
|
||||
*/
|
||||
static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
|
||||
{
|
||||
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
|
||||
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
|
||||
|
||||
if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
|
||||
amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
|
||||
@@ -845,7 +829,7 @@ static void amdgpu_job_fence_free(struct rcu_head *rcu)
|
||||
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
|
||||
|
||||
/* free job if fence has a parent job */
|
||||
kfree(container_of(f, struct amdgpu_job, hw_fence));
|
||||
kfree(container_of(f, struct amdgpu_job, hw_fence.base));
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -272,8 +272,8 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
|
||||
/* Check if any fences where initialized */
|
||||
if (job->base.s_fence && job->base.s_fence->finished.ops)
|
||||
f = &job->base.s_fence->finished;
|
||||
else if (job->hw_fence.ops)
|
||||
f = &job->hw_fence;
|
||||
else if (job->hw_fence.base.ops)
|
||||
f = &job->hw_fence.base;
|
||||
else
|
||||
f = NULL;
|
||||
|
||||
@@ -290,10 +290,10 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
|
||||
amdgpu_sync_free(&job->explicit_sync);
|
||||
|
||||
/* only put the hw fence if has embedded fence */
|
||||
if (!job->hw_fence.ops)
|
||||
if (!job->hw_fence.base.ops)
|
||||
kfree(job);
|
||||
else
|
||||
dma_fence_put(&job->hw_fence);
|
||||
dma_fence_put(&job->hw_fence.base);
|
||||
}
|
||||
|
||||
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
|
||||
@@ -322,10 +322,10 @@ void amdgpu_job_free(struct amdgpu_job *job)
|
||||
if (job->gang_submit != &job->base.s_fence->scheduled)
|
||||
dma_fence_put(job->gang_submit);
|
||||
|
||||
if (!job->hw_fence.ops)
|
||||
if (!job->hw_fence.base.ops)
|
||||
kfree(job);
|
||||
else
|
||||
dma_fence_put(&job->hw_fence);
|
||||
dma_fence_put(&job->hw_fence.base);
|
||||
}
|
||||
|
||||
struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
|
||||
|
||||
@@ -48,7 +48,7 @@ struct amdgpu_job {
|
||||
struct drm_sched_job base;
|
||||
struct amdgpu_vm *vm;
|
||||
struct amdgpu_sync explicit_sync;
|
||||
struct dma_fence hw_fence;
|
||||
struct amdgpu_fence hw_fence;
|
||||
struct dma_fence *gang_submit;
|
||||
uint32_t preamble_status;
|
||||
uint32_t preemption_status;
|
||||
|
||||
@@ -3522,8 +3522,12 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
|
||||
uint8_t *ucode_array_start_addr;
|
||||
int err = 0;
|
||||
|
||||
err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
|
||||
"amdgpu/%s_sos.bin", chip_name);
|
||||
if (amdgpu_is_kicker_fw(adev))
|
||||
err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
|
||||
"amdgpu/%s_sos_kicker.bin", chip_name);
|
||||
else
|
||||
err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
|
||||
"amdgpu/%s_sos.bin", chip_name);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@@ -3799,8 +3803,12 @@ int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
int err;
|
||||
|
||||
err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
|
||||
"amdgpu/%s_ta.bin", chip_name);
|
||||
if (amdgpu_is_kicker_fw(adev))
|
||||
err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
|
||||
"amdgpu/%s_ta_kicker.bin", chip_name);
|
||||
else
|
||||
err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
|
||||
"amdgpu/%s_ta.bin", chip_name);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
||||
@@ -127,6 +127,22 @@ struct amdgpu_fence_driver {
|
||||
struct dma_fence **fences;
|
||||
};
|
||||
|
||||
/*
|
||||
* Fences mark an event in the GPUs pipeline and are used
|
||||
* for GPU/CPU synchronization. When the fence is written,
|
||||
* it is expected that all buffers associated with that fence
|
||||
* are no longer in use by the associated ring on the GPU and
|
||||
* that the relevant GPU caches have been flushed.
|
||||
*/
|
||||
|
||||
struct amdgpu_fence {
|
||||
struct dma_fence base;
|
||||
|
||||
/* RB, DMA, etc. */
|
||||
struct amdgpu_ring *ring;
|
||||
ktime_t start_timestamp;
|
||||
};
|
||||
|
||||
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
|
||||
|
||||
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
|
||||
|
||||
@@ -540,8 +540,10 @@ static int amdgpu_sdma_soft_reset(struct amdgpu_device *adev, u32 instance_id)
|
||||
case IP_VERSION(4, 4, 2):
|
||||
case IP_VERSION(4, 4, 4):
|
||||
case IP_VERSION(4, 4, 5):
|
||||
/* For SDMA 4.x, use the existing DPM interface for backward compatibility */
|
||||
r = amdgpu_dpm_reset_sdma(adev, 1 << instance_id);
|
||||
/* For SDMA 4.x, use the existing DPM interface for backward compatibility,
|
||||
* we need to convert the logical instance ID to physical instance ID before reset.
|
||||
*/
|
||||
r = amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, instance_id));
|
||||
break;
|
||||
case IP_VERSION(5, 0, 0):
|
||||
case IP_VERSION(5, 0, 1):
|
||||
@@ -568,7 +570,7 @@ static int amdgpu_sdma_soft_reset(struct amdgpu_device *adev, u32 instance_id)
|
||||
/**
|
||||
* amdgpu_sdma_reset_engine - Reset a specific SDMA engine
|
||||
* @adev: Pointer to the AMDGPU device
|
||||
* @instance_id: ID of the SDMA engine instance to reset
|
||||
* @instance_id: Logical ID of the SDMA engine instance to reset
|
||||
*
|
||||
* Returns: 0 on success, or a negative error code on failure.
|
||||
*/
|
||||
@@ -601,7 +603,7 @@ int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id)
|
||||
/* Perform the SDMA reset for the specified instance */
|
||||
ret = amdgpu_sdma_soft_reset(adev, instance_id);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Failed to reset SDMA instance %u\n", instance_id);
|
||||
dev_err(adev->dev, "Failed to reset SDMA logical instance %u\n", instance_id);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
|
||||
@@ -30,6 +30,10 @@
|
||||
|
||||
#define AMDGPU_UCODE_NAME_MAX (128)
|
||||
|
||||
static const struct kicker_device kicker_device_list[] = {
|
||||
{0x744B, 0x00},
|
||||
};
|
||||
|
||||
static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr)
|
||||
{
|
||||
DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
|
||||
@@ -1387,6 +1391,19 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool amdgpu_is_kicker_fw(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(kicker_device_list); i++) {
|
||||
if (adev->pdev->device == kicker_device_list[i].device &&
|
||||
adev->pdev->revision == kicker_device_list[i].revision)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len)
|
||||
{
|
||||
int maj, min, rev;
|
||||
|
||||
@@ -605,6 +605,11 @@ struct amdgpu_firmware {
|
||||
uint32_t pldm_version;
|
||||
};
|
||||
|
||||
struct kicker_device{
|
||||
unsigned short device;
|
||||
u8 revision;
|
||||
};
|
||||
|
||||
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
|
||||
void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr);
|
||||
void amdgpu_ucode_print_imu_hdr(const struct common_firmware_header *hdr);
|
||||
@@ -632,5 +637,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type);
|
||||
const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id);
|
||||
|
||||
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len);
|
||||
bool amdgpu_is_kicker_fw(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -85,6 +85,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_kicker.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin");
|
||||
@@ -759,6 +760,10 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
|
||||
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
|
||||
AMDGPU_UCODE_REQUIRED,
|
||||
"amdgpu/gc_11_0_0_rlc_1.bin");
|
||||
else if (amdgpu_is_kicker_fw(adev))
|
||||
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
|
||||
AMDGPU_UCODE_REQUIRED,
|
||||
"amdgpu/%s_rlc_kicker.bin", ucode_prefix);
|
||||
else
|
||||
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
|
||||
AMDGPU_UCODE_REQUIRED,
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
#include "gc/gc_11_0_0_sh_mask.h"
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu_kicker.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin");
|
||||
@@ -51,8 +52,12 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
|
||||
err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
|
||||
"amdgpu/%s_imu.bin", ucode_prefix);
|
||||
if (amdgpu_is_kicker_fw(adev))
|
||||
err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
|
||||
"amdgpu/%s_imu_kicker.bin", ucode_prefix);
|
||||
else
|
||||
err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
|
||||
"amdgpu/%s_imu.bin", ucode_prefix);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
||||
@@ -42,7 +42,9 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_5_ta.bin");
|
||||
MODULE_FIRMWARE("amdgpu/psp_13_0_8_toc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/psp_13_0_8_ta.bin");
|
||||
MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos_kicker.bin");
|
||||
MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin");
|
||||
MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta_kicker.bin");
|
||||
MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin");
|
||||
MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin");
|
||||
|
||||
@@ -490,7 +490,7 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
|
||||
{
|
||||
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
|
||||
u32 doorbell_offset, doorbell;
|
||||
u32 rb_cntl, ib_cntl;
|
||||
u32 rb_cntl, ib_cntl, sdma_cntl;
|
||||
int i;
|
||||
|
||||
for_each_inst(i, inst_mask) {
|
||||
@@ -502,6 +502,9 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
|
||||
ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
|
||||
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0);
|
||||
WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
|
||||
sdma_cntl = RREG32_SDMA(i, regSDMA_CNTL);
|
||||
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, UTC_L1_ENABLE, 0);
|
||||
WREG32_SDMA(i, regSDMA_CNTL, sdma_cntl);
|
||||
|
||||
if (sdma[i]->use_doorbell) {
|
||||
doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
|
||||
@@ -995,6 +998,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
|
||||
/* set utc l1 enable flag always to 1 */
|
||||
temp = RREG32_SDMA(i, regSDMA_CNTL);
|
||||
temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
|
||||
WREG32_SDMA(i, regSDMA_CNTL, temp);
|
||||
|
||||
if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < IP_VERSION(4, 4, 5)) {
|
||||
/* enable context empty interrupt during initialization */
|
||||
@@ -1670,7 +1674,7 @@ static bool sdma_v4_4_2_page_ring_is_guilty(struct amdgpu_ring *ring)
|
||||
static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
u32 id = GET_INST(SDMA0, ring->me);
|
||||
u32 id = ring->me;
|
||||
int r;
|
||||
|
||||
if (!(adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
|
||||
@@ -1686,7 +1690,7 @@ static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
|
||||
static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
u32 instance_id = GET_INST(SDMA0, ring->me);
|
||||
u32 instance_id = ring->me;
|
||||
u32 inst_mask;
|
||||
uint64_t rptr;
|
||||
|
||||
|
||||
@@ -1399,6 +1399,7 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
return r;
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
mutex_init(&adev->sdma.instance[i].engine_reset_mutex);
|
||||
adev->sdma.instance[i].funcs = &sdma_v5_0_sdma_funcs;
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
ring->ring_obj = NULL;
|
||||
|
||||
@@ -1318,6 +1318,7 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
mutex_init(&adev->sdma.instance[i].engine_reset_mutex);
|
||||
adev->sdma.instance[i].funcs = &sdma_v5_2_sdma_funcs;
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
ring->ring_obj = NULL;
|
||||
|
||||
@@ -669,6 +669,9 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
|
||||
if (indirect)
|
||||
amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
|
||||
|
||||
/* resetting ring, fw should not check RB ring */
|
||||
fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
|
||||
|
||||
/* Pause dpg */
|
||||
vcn_v5_0_1_pause_dpg_mode(vinst, &state);
|
||||
|
||||
@@ -681,7 +684,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
|
||||
tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
|
||||
tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
|
||||
WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
|
||||
fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
|
||||
|
||||
WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
|
||||
WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
|
||||
|
||||
@@ -692,6 +695,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
|
||||
tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
|
||||
tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
|
||||
WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
|
||||
/* resetting done, fw can check RB ring */
|
||||
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
|
||||
|
||||
WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
|
||||
|
||||
@@ -240,7 +240,7 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
|
||||
|
||||
packet->bitfields2.engine_sel =
|
||||
engine_sel__mes_map_queues__compute_vi;
|
||||
packet->bitfields2.gws_control_queue = q->gws ? 1 : 0;
|
||||
packet->bitfields2.gws_control_queue = q->properties.is_gws ? 1 : 0;
|
||||
packet->bitfields2.extended_engine_sel =
|
||||
extended_engine_sel__mes_map_queues__legacy_engine_sel;
|
||||
packet->bitfields2.queue_type =
|
||||
|
||||
@@ -510,6 +510,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
|
||||
dev->node_props.capability |=
|
||||
HSA_CAP_AQL_QUEUE_DOUBLE_MAP;
|
||||
|
||||
if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0) &&
|
||||
(dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
|
||||
dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
|
||||
|
||||
sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_fcompute",
|
||||
dev->node_props.max_engine_clk_fcompute);
|
||||
|
||||
@@ -2008,8 +2012,6 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
|
||||
if (!amdgpu_sriov_vf(dev->gpu->adev))
|
||||
dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
|
||||
|
||||
if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
|
||||
dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
|
||||
} else {
|
||||
dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
|
||||
HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
|
||||
|
||||
@@ -4718,9 +4718,23 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps,
|
||||
uint32_t *brightness)
|
||||
/* Rescale from [min..max] to [0..AMDGPU_MAX_BL_LEVEL] */
|
||||
static inline u32 scale_input_to_fw(int min, int max, u64 input)
|
||||
{
|
||||
return DIV_ROUND_CLOSEST_ULL(input * AMDGPU_MAX_BL_LEVEL, max - min);
|
||||
}
|
||||
|
||||
/* Rescale from [0..AMDGPU_MAX_BL_LEVEL] to [min..max] */
|
||||
static inline u32 scale_fw_to_input(int min, int max, u64 input)
|
||||
{
|
||||
return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), AMDGPU_MAX_BL_LEVEL);
|
||||
}
|
||||
|
||||
static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps,
|
||||
unsigned int min, unsigned int max,
|
||||
uint32_t *user_brightness)
|
||||
{
|
||||
u32 brightness = scale_input_to_fw(min, max, *user_brightness);
|
||||
u8 prev_signal = 0, prev_lum = 0;
|
||||
int i = 0;
|
||||
|
||||
@@ -4731,7 +4745,7 @@ static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *cap
|
||||
return;
|
||||
|
||||
/* choose start to run less interpolation steps */
|
||||
if (caps->luminance_data[caps->data_points/2].input_signal > *brightness)
|
||||
if (caps->luminance_data[caps->data_points/2].input_signal > brightness)
|
||||
i = caps->data_points/2;
|
||||
do {
|
||||
u8 signal = caps->luminance_data[i].input_signal;
|
||||
@@ -4742,17 +4756,18 @@ static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *cap
|
||||
* brightness < signal: interpolate between previous and current luminance numerator
|
||||
* brightness > signal: find next data point
|
||||
*/
|
||||
if (*brightness > signal) {
|
||||
if (brightness > signal) {
|
||||
prev_signal = signal;
|
||||
prev_lum = lum;
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
if (*brightness < signal)
|
||||
if (brightness < signal)
|
||||
lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) *
|
||||
(*brightness - prev_signal),
|
||||
(brightness - prev_signal),
|
||||
signal - prev_signal);
|
||||
*brightness = DIV_ROUND_CLOSEST(lum * *brightness, 101);
|
||||
*user_brightness = scale_fw_to_input(min, max,
|
||||
DIV_ROUND_CLOSEST(lum * brightness, 101));
|
||||
return;
|
||||
} while (i < caps->data_points);
|
||||
}
|
||||
@@ -4765,11 +4780,10 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c
|
||||
if (!get_brightness_range(caps, &min, &max))
|
||||
return brightness;
|
||||
|
||||
convert_custom_brightness(caps, &brightness);
|
||||
convert_custom_brightness(caps, min, max, &brightness);
|
||||
|
||||
// Rescale 0..255 to min..max
|
||||
return min + DIV_ROUND_CLOSEST((max - min) * brightness,
|
||||
AMDGPU_MAX_BL_LEVEL);
|
||||
// Rescale 0..max to min..max
|
||||
return min + DIV_ROUND_CLOSEST_ULL((u64)(max - min) * brightness, max);
|
||||
}
|
||||
|
||||
static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
|
||||
@@ -4782,8 +4796,8 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap
|
||||
|
||||
if (brightness < min)
|
||||
return 0;
|
||||
// Rescale min..max to 0..255
|
||||
return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
|
||||
// Rescale min..max to 0..max
|
||||
return DIV_ROUND_CLOSEST_ULL((u64)max * (brightness - min),
|
||||
max - min);
|
||||
}
|
||||
|
||||
@@ -4908,7 +4922,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
|
||||
struct drm_device *drm = aconnector->base.dev;
|
||||
struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
|
||||
struct backlight_properties props = { 0 };
|
||||
struct amdgpu_dm_backlight_caps caps = { 0 };
|
||||
struct amdgpu_dm_backlight_caps *caps;
|
||||
char bl_name[16];
|
||||
int min, max;
|
||||
|
||||
@@ -4922,22 +4936,21 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
|
||||
return;
|
||||
}
|
||||
|
||||
amdgpu_acpi_get_backlight_caps(&caps);
|
||||
if (caps.caps_valid && get_brightness_range(&caps, &min, &max)) {
|
||||
caps = &dm->backlight_caps[aconnector->bl_idx];
|
||||
if (get_brightness_range(caps, &min, &max)) {
|
||||
if (power_supply_is_system_supplied() > 0)
|
||||
props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps.ac_level, 100);
|
||||
props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps->ac_level, 100);
|
||||
else
|
||||
props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps.dc_level, 100);
|
||||
props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps->dc_level, 100);
|
||||
/* min is zero, so max needs to be adjusted */
|
||||
props.max_brightness = max - min;
|
||||
drm_dbg(drm, "Backlight caps: min: %d, max: %d, ac %d, dc %d\n", min, max,
|
||||
caps.ac_level, caps.dc_level);
|
||||
caps->ac_level, caps->dc_level);
|
||||
} else
|
||||
props.brightness = AMDGPU_MAX_BL_LEVEL;
|
||||
props.brightness = props.max_brightness = AMDGPU_MAX_BL_LEVEL;
|
||||
|
||||
if (caps.data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE))
|
||||
if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE))
|
||||
drm_info(drm, "Using custom brightness curve\n");
|
||||
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
|
||||
props.type = BACKLIGHT_RAW;
|
||||
|
||||
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
|
||||
|
||||
@@ -241,6 +241,7 @@ static bool create_links(
|
||||
DC_LOG_DC("BIOS object table - end");
|
||||
|
||||
/* Create a link for each usb4 dpia port */
|
||||
dc->lowest_dpia_link_index = MAX_LINKS;
|
||||
for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
|
||||
struct link_init_data link_init_params = {0};
|
||||
struct dc_link *link;
|
||||
@@ -253,6 +254,9 @@ static bool create_links(
|
||||
|
||||
link = dc->link_srv->create_link(&link_init_params);
|
||||
if (link) {
|
||||
if (dc->lowest_dpia_link_index > dc->link_count)
|
||||
dc->lowest_dpia_link_index = dc->link_count;
|
||||
|
||||
dc->links[dc->link_count] = link;
|
||||
link->dc = dc;
|
||||
++dc->link_count;
|
||||
@@ -6376,6 +6380,35 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
/**
|
||||
***********************************************************************************************
|
||||
* dc_get_host_router_index: Get index of host router from a dpia link
|
||||
*
|
||||
* This function return a host router index of the target link. If the target link is dpia link.
|
||||
*
|
||||
* @param [in] link: target link
|
||||
* @param [out] host_router_index: host router index of the target link
|
||||
*
|
||||
* @return: true if the host router index is found and valid.
|
||||
*
|
||||
***********************************************************************************************
|
||||
*/
|
||||
bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index)
|
||||
{
|
||||
struct dc *dc = link->ctx->dc;
|
||||
|
||||
if (link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
|
||||
return false;
|
||||
|
||||
if (link->link_index < dc->lowest_dpia_link_index)
|
||||
return false;
|
||||
|
||||
*host_router_index = (link->link_index - dc->lowest_dpia_link_index) / dc->caps.num_of_dpias_per_host_router;
|
||||
if (*host_router_index < dc->caps.num_of_host_routers)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
bool dc_is_cursor_limit_pending(struct dc *dc)
|
||||
{
|
||||
|
||||
@@ -66,7 +66,8 @@ struct dmub_notification;
|
||||
#define MAX_STREAMS 6
|
||||
#define MIN_VIEWPORT_SIZE 12
|
||||
#define MAX_NUM_EDP 2
|
||||
#define MAX_HOST_ROUTERS_NUM 2
|
||||
#define MAX_HOST_ROUTERS_NUM 3
|
||||
#define MAX_DPIA_PER_HOST_ROUTER 2
|
||||
|
||||
/* Display Core Interfaces */
|
||||
struct dc_versions {
|
||||
@@ -305,6 +306,8 @@ struct dc_caps {
|
||||
/* Conservative limit for DCC cases which require ODM4:1 to support*/
|
||||
uint32_t dcc_plane_width_limit;
|
||||
struct dc_scl_caps scl_caps;
|
||||
uint8_t num_of_host_routers;
|
||||
uint8_t num_of_dpias_per_host_router;
|
||||
};
|
||||
|
||||
struct dc_bug_wa {
|
||||
@@ -1603,6 +1606,7 @@ struct dc {
|
||||
|
||||
uint8_t link_count;
|
||||
struct dc_link *links[MAX_LINKS];
|
||||
uint8_t lowest_dpia_link_index;
|
||||
struct link_service *link_srv;
|
||||
|
||||
struct dc_state *current_state;
|
||||
@@ -2595,6 +2599,8 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state
|
||||
|
||||
unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context);
|
||||
|
||||
bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index);
|
||||
|
||||
/* DSC Interfaces */
|
||||
#include "dc_dsc.h"
|
||||
|
||||
|
||||
@@ -1172,8 +1172,8 @@ struct dc_lttpr_caps {
|
||||
union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates;
|
||||
union dp_alpm_lttpr_cap alpm;
|
||||
uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
|
||||
uint8_t lttpr_ieee_oui[3];
|
||||
uint8_t lttpr_device_id[6];
|
||||
uint8_t lttpr_ieee_oui[3]; // Always read from closest LTTPR to host
|
||||
uint8_t lttpr_device_id[6]; // Always read from closest LTTPR to host
|
||||
};
|
||||
|
||||
struct dc_dongle_dfp_cap_ext {
|
||||
|
||||
@@ -788,6 +788,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
|
||||
plane->pixel_format = dml2_420_10;
|
||||
break;
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
|
||||
plane->pixel_format = dml2_444_64;
|
||||
|
||||
@@ -4685,7 +4685,10 @@ static void calculate_tdlut_setting(
|
||||
//the tdlut is fetched during the 2 row times of prefetch.
|
||||
if (p->setup_for_tdlut) {
|
||||
*p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2((double) *p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1);
|
||||
*p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
|
||||
if (*p->tdlut_bytes_per_frame > p->cursor_buffer_size * 1024)
|
||||
*p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
|
||||
else
|
||||
*p->tdlut_opt_time = 0;
|
||||
*p->tdlut_drain_time = p->cursor_buffer_size * 1024 / tdlut_drain_rate;
|
||||
*p->tdlut_bytes_to_deliver = (unsigned int) (p->cursor_buffer_size * 1024.0);
|
||||
}
|
||||
|
||||
@@ -953,6 +953,7 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
|
||||
out->SourcePixelFormat[location] = dml_420_10;
|
||||
break;
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
|
||||
out->SourcePixelFormat[location] = dml_444_64;
|
||||
|
||||
@@ -1225,7 +1225,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
|
||||
return;
|
||||
|
||||
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
|
||||
if (!link->skip_implict_edp_power_control)
|
||||
if (!link->skip_implict_edp_power_control && hws)
|
||||
hws->funcs.edp_backlight_control(link, false);
|
||||
link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
|
||||
}
|
||||
|
||||
@@ -1047,6 +1047,15 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
|
||||
if (dc->caps.sequential_ono) {
|
||||
update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->stream_res.dsc->inst] = false;
|
||||
update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->stream_res.dsc->inst] = false;
|
||||
|
||||
/* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */
|
||||
if (!pipe_ctx->top_pipe && pipe_ctx->plane_res.hubp &&
|
||||
pipe_ctx->plane_res.hubp->inst != pipe_ctx->stream_res.dsc->inst) {
|
||||
for (j = 0; j < dc->res_pool->pipe_count; ++j) {
|
||||
update_state->pg_pipe_res_update[PG_HUBP][j] = false;
|
||||
update_state->pg_pipe_res_update[PG_DPP][j] = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1193,6 +1202,25 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
|
||||
update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
|
||||
|
||||
if (dc->caps.sequential_ono) {
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (new_pipe->stream_res.dsc && !new_pipe->top_pipe &&
|
||||
update_state->pg_pipe_res_update[PG_DSC][new_pipe->stream_res.dsc->inst]) {
|
||||
update_state->pg_pipe_res_update[PG_HUBP][new_pipe->stream_res.dsc->inst] = true;
|
||||
update_state->pg_pipe_res_update[PG_DPP][new_pipe->stream_res.dsc->inst] = true;
|
||||
|
||||
/* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */
|
||||
if (new_pipe->plane_res.hubp &&
|
||||
new_pipe->plane_res.hubp->inst != new_pipe->stream_res.dsc->inst) {
|
||||
for (j = 0; j < dc->res_pool->pipe_count; ++j) {
|
||||
update_state->pg_pipe_res_update[PG_HUBP][j] = true;
|
||||
update_state->pg_pipe_res_update[PG_DPP][j] = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
|
||||
if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
|
||||
update_state->pg_pipe_res_update[PG_DPP][i]) {
|
||||
|
||||
@@ -385,9 +385,15 @@ bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx)
|
||||
bool dp_is_lttpr_present(struct dc_link *link)
|
||||
{
|
||||
/* Some sink devices report invalid LTTPR revision, so don't validate against that cap */
|
||||
return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
|
||||
uint32_t lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
|
||||
bool is_lttpr_present = (lttpr_count > 0 &&
|
||||
link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
|
||||
link->dpcd_caps.lttpr_caps.max_lane_count <= 4);
|
||||
|
||||
if (lttpr_count > 0 && !is_lttpr_present)
|
||||
DC_LOG_ERROR("LTTPR count is nonzero but invalid lane count reported. Assuming no LTTPR present.\n");
|
||||
|
||||
return is_lttpr_present;
|
||||
}
|
||||
|
||||
/* in DP compliance test, DPR-120 may have
|
||||
@@ -1551,6 +1557,8 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
|
||||
uint8_t lttpr_dpcd_data[10] = {0};
|
||||
enum dc_status status;
|
||||
bool is_lttpr_present;
|
||||
uint32_t lttpr_count;
|
||||
uint32_t closest_lttpr_offset;
|
||||
|
||||
/* Logic to determine LTTPR support*/
|
||||
bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
|
||||
@@ -1602,20 +1610,22 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
|
||||
lttpr_dpcd_data[DP_LTTPR_ALPM_CAPABILITIES -
|
||||
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
|
||||
|
||||
lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
|
||||
|
||||
/* If this chip cap is set, at least one retimer must exist in the chain
|
||||
* Override count to 1 if we receive a known bad count (0 or an invalid value) */
|
||||
if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
|
||||
(dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
|
||||
lttpr_count == 0) {
|
||||
/* If you see this message consistently, either the host platform has FIXED_VS flag
|
||||
* incorrectly configured or the sink device is returning an invalid count.
|
||||
*/
|
||||
DC_LOG_ERROR("lttpr_caps phy_repeater_cnt is 0x%x, forcing it to 0x80.",
|
||||
link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
|
||||
link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
|
||||
lttpr_count = 1;
|
||||
DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
|
||||
}
|
||||
|
||||
/* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
|
||||
is_lttpr_present = dp_is_lttpr_present(link);
|
||||
|
||||
DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present);
|
||||
@@ -1623,11 +1633,25 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
|
||||
if (is_lttpr_present) {
|
||||
CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
|
||||
|
||||
core_link_read_dpcd(link, DP_LTTPR_IEEE_OUI, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui));
|
||||
CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui), "LTTPR IEEE OUI: ");
|
||||
// Identify closest LTTPR to determine if workarounds required for known embedded LTTPR
|
||||
closest_lttpr_offset = dp_get_closest_lttpr_offset(lttpr_count);
|
||||
|
||||
core_link_read_dpcd(link, DP_LTTPR_DEVICE_ID, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id));
|
||||
CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id), "LTTPR Device ID: ");
|
||||
core_link_read_dpcd(link, (DP_LTTPR_IEEE_OUI + closest_lttpr_offset),
|
||||
link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui));
|
||||
core_link_read_dpcd(link, (DP_LTTPR_DEVICE_ID + closest_lttpr_offset),
|
||||
link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id));
|
||||
|
||||
if (lttpr_count > 1) {
|
||||
CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui),
|
||||
"Closest LTTPR To Host's IEEE OUI: ");
|
||||
CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id),
|
||||
"Closest LTTPR To Host's LTTPR Device ID: ");
|
||||
} else {
|
||||
CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui),
|
||||
"LTTPR IEEE OUI: ");
|
||||
CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id),
|
||||
"LTTPR Device ID: ");
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
|
||||
@@ -1954,6 +1954,9 @@ static bool dcn31_resource_construct(
|
||||
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
|
||||
dc->caps.color.mpc.ocsc = 1;
|
||||
|
||||
dc->caps.num_of_host_routers = 2;
|
||||
dc->caps.num_of_dpias_per_host_router = 2;
|
||||
|
||||
/* Use pipe context based otg sync logic */
|
||||
dc->config.use_pipe_ctx_sync_logic = true;
|
||||
dc->config.disable_hbr_audio_dp2 = true;
|
||||
|
||||
@@ -1885,6 +1885,9 @@ static bool dcn314_resource_construct(
|
||||
|
||||
dc->caps.max_disp_clock_khz_at_vmin = 650000;
|
||||
|
||||
dc->caps.num_of_host_routers = 2;
|
||||
dc->caps.num_of_dpias_per_host_router = 2;
|
||||
|
||||
/* Use pipe context based otg sync logic */
|
||||
dc->config.use_pipe_ctx_sync_logic = true;
|
||||
|
||||
|
||||
@@ -1894,6 +1894,9 @@ static bool dcn35_resource_construct(
|
||||
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
|
||||
dc->caps.color.mpc.ocsc = 1;
|
||||
|
||||
dc->caps.num_of_host_routers = 2;
|
||||
dc->caps.num_of_dpias_per_host_router = 2;
|
||||
|
||||
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
|
||||
* to provide some margin.
|
||||
* It's expected for furture ASIC to have equal or higher value, in order to
|
||||
|
||||
@@ -1866,6 +1866,9 @@ static bool dcn351_resource_construct(
|
||||
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
|
||||
dc->caps.color.mpc.ocsc = 1;
|
||||
|
||||
dc->caps.num_of_host_routers = 2;
|
||||
dc->caps.num_of_dpias_per_host_router = 2;
|
||||
|
||||
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
|
||||
* to provide some margin.
|
||||
* It's expected for furture ASIC to have equal or higher value, in order to
|
||||
|
||||
@@ -1867,6 +1867,9 @@ static bool dcn36_resource_construct(
|
||||
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
|
||||
dc->caps.color.mpc.ocsc = 1;
|
||||
|
||||
dc->caps.num_of_host_routers = 2;
|
||||
dc->caps.num_of_dpias_per_host_router = 2;
|
||||
|
||||
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
|
||||
* to provide some margin.
|
||||
* It's expected for furture ASIC to have equal or higher value, in order to
|
||||
|
||||
@@ -58,6 +58,7 @@
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin");
|
||||
MODULE_FIRMWARE("amdgpu/smu_13_0_0_kicker.bin");
|
||||
MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
|
||||
MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
|
||||
|
||||
@@ -92,7 +93,7 @@ const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
|
||||
int smu_v13_0_init_microcode(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
char ucode_prefix[15];
|
||||
char ucode_prefix[30];
|
||||
int err = 0;
|
||||
const struct smc_firmware_header_v1_0 *hdr;
|
||||
const struct common_firmware_header *header;
|
||||
@@ -103,8 +104,13 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
|
||||
return 0;
|
||||
|
||||
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
|
||||
err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
|
||||
"amdgpu/%s.bin", ucode_prefix);
|
||||
|
||||
if (amdgpu_is_kicker_fw(adev))
|
||||
err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
|
||||
"amdgpu/%s_kicker.bin", ucode_prefix);
|
||||
else
|
||||
err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
|
||||
"amdgpu/%s.bin", ucode_prefix);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user