drm/amdgpu: fix documentation errors in sdma v7.0
Fix up parameter descriptions. Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
@@ -224,7 +224,9 @@ static void sdma_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
||||
* sdma_v7_0_ring_emit_ib - Schedule an IB on the DMA engine
|
||||
*
|
||||
* @ring: amdgpu ring pointer
|
||||
* @job: job to retrieve vmid from
|
||||
* @ib: IB object to schedule
|
||||
* @flags: unused
|
||||
*
|
||||
* Schedule an IB in the DMA ring.
|
||||
*/
|
||||
@@ -260,8 +262,6 @@ static void sdma_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
* sdma_v7_0_ring_emit_mem_sync - flush the IB by graphics cache rinse
|
||||
*
|
||||
* @ring: amdgpu ring pointer
|
||||
* @job: job to retrieve vmid from
|
||||
* @ib: IB object to schedule
|
||||
*
|
||||
* flush the IB by graphics cache rinse.
|
||||
*/
|
||||
@@ -313,7 +313,9 @@ static void sdma_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
* sdma_v7_0_ring_emit_fence - emit a fence on the DMA ring
|
||||
*
|
||||
* @ring: amdgpu ring pointer
|
||||
* @fence: amdgpu fence object
|
||||
* @addr: address
|
||||
* @seq: fence seq number
|
||||
* @flags: fence flags
|
||||
*
|
||||
* Add a DMA fence packet to the ring to write
|
||||
* the fence seq number and DMA trap packet to generate
|
||||
@@ -915,6 +917,7 @@ static int sdma_v7_0_ring_test_ring(struct amdgpu_ring *ring)
|
||||
* sdma_v7_0_ring_test_ib - test an IB on the DMA engine
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
* @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
|
||||
*
|
||||
* Test a simple IB in the DMA ring.
|
||||
* Returns 0 on success, error on failure.
|
||||
@@ -1038,10 +1041,9 @@ static void sdma_v7_0_vm_copy_pte(struct amdgpu_ib *ib,
|
||||
*
|
||||
* @ib: indirect buffer to fill with commands
|
||||
* @pe: addr of the page entry
|
||||
* @addr: dst addr to write into pe
|
||||
* @value: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
* @flags: access flags
|
||||
*
|
||||
* Update PTEs by writing them manually using sDMA.
|
||||
*/
|
||||
@@ -1095,6 +1097,8 @@ static void sdma_v7_0_vm_set_pte_pde(struct amdgpu_ib *ib,
|
||||
|
||||
/**
|
||||
* sdma_v7_0_ring_pad_ib - pad the IB
|
||||
*
|
||||
* @ring: amdgpu ring pointer
|
||||
* @ib: indirect buffer to fill with padding
|
||||
*
|
||||
* Pad the IB with NOPs to a boundary multiple of 8.
|
||||
@@ -1145,7 +1149,8 @@ static void sdma_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
||||
* sdma_v7_0_ring_emit_vm_flush - vm flush using sDMA
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
* @vm: amdgpu_vm pointer
|
||||
* @vmid: vmid number to use
|
||||
* @pd_addr: address
|
||||
*
|
||||
* Update the page table base and flush the VM TLB
|
||||
* using sDMA.
|
||||
@@ -1549,11 +1554,11 @@ static void sdma_v7_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||
/**
|
||||
* sdma_v7_0_emit_copy_buffer - copy buffer using the sDMA engine
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
* @ib: indirect buffer to fill with commands
|
||||
* @src_offset: src GPU address
|
||||
* @dst_offset: dst GPU address
|
||||
* @byte_count: number of bytes to xfer
|
||||
* @copy_flags: flags for the copy
|
||||
* @copy_flags: copy flags for the buffers
|
||||
*
|
||||
* Copy GPU buffers using the DMA engine.
|
||||
* Used by the amdgpu ttm implementation to move pages if
|
||||
@@ -1579,7 +1584,7 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
|
||||
/**
|
||||
* sdma_v7_0_emit_fill_buffer - fill buffer using the sDMA engine
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
* @ib: indirect buffer to fill
|
||||
* @src_data: value to write to buffer
|
||||
* @dst_offset: dst GPU address
|
||||
* @byte_count: number of bytes to xfer
|
||||
|
||||
Reference in New Issue
Block a user