MALI: midgard: Fix all compile errors under kernel 5.10

Change-Id: I8672520b7c7118ab7622032481802eae72349f81
Signed-off-by: Zhen Chen <chenzhen@rock-chips.com>
This commit is contained in:
Zhen Chen
2021-07-08 10:18:53 +08:00
committed by Tao Huang
parent fcb101fe62
commit 970017f88e
20 changed files with 180 additions and 116 deletions
@@ -349,7 +349,7 @@ static bool kbasep_js_job_check_ref_cores(struct kbase_device *kbdev,
KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
/* fallthrough */
case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
{
enum kbase_pm_cores_ready cores_ready;
@@ -394,7 +394,7 @@ static bool kbasep_js_job_check_ref_cores(struct kbase_device *kbdev,
}
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
/* fallthrough */
case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
(katom->core_req & BASE_JD_REQ_T));
@@ -483,6 +483,7 @@ static bool kbasep_js_job_check_ref_cores(struct kbase_device *kbdev,
KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS;
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
/* fallthrough */
case KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS:
KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
(katom->core_req & BASE_JD_REQ_T));
@@ -539,7 +540,7 @@ static void kbasep_js_job_check_deref_cores(struct kbase_device *kbdev,
KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
(katom->core_req & BASE_JD_REQ_T));
/* *** FALLTHROUGH *** */
/* fallthrough */
case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
/* State where cores were registered */
@@ -587,7 +588,7 @@ static void kbasep_js_job_check_deref_cores_nokatom(struct kbase_device *kbdev,
KBASE_DEBUG_ASSERT(affinity != 0 ||
(core_req & BASE_JD_REQ_T));
/* *** FALLTHROUGH *** */
/* fallthrough */
case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
/* State where cores were registered */
@@ -649,15 +650,15 @@ static void kbase_gpu_release_atom(struct kbase_device *kbdev,
KBASE_TLSTREAM_TL_NRET_CTX_LPU(kctx,
&kbdev->gpu_props.props.raw_props.js_features
[katom->slot_nr]);
/* fallthrough */
case KBASE_ATOM_GPU_RB_READY:
/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
/* fallthrough */
case KBASE_ATOM_GPU_RB_WAITING_AFFINITY:
kbase_js_affinity_release_slot_cores(kbdev, katom->slot_nr,
katom->affinity);
/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
/* fallthrough */
case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE:
break;
@@ -679,13 +680,13 @@ static void kbase_gpu_release_atom(struct kbase_device *kbdev,
/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
/* fallthrough */
case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV:
/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
/* fallthrough */
case KBASE_ATOM_GPU_RB_WAITING_BLOCKED:
/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
/* fallthrough */
case KBASE_ATOM_GPU_RB_RETURN_TO_JS:
break;
}
@@ -831,8 +832,8 @@ static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
katom[idx]->protected_state.enter =
KBASE_ATOM_ENTER_PROTECTED_VINSTR;
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
/* ***TRANSITION TO HIGHER STATE*** */
/* fallthrough */
case KBASE_ATOM_ENTER_PROTECTED_VINSTR:
if (kbase_vinstr_try_suspend(kbdev->vinstr_ctx) < 0) {
/*
@@ -860,8 +861,8 @@ static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
kbase_pm_update_cores_state_nolock(kbdev);
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
/* ***TRANSITION TO HIGHER STATE*** */
/* fallthrough */
case KBASE_ATOM_ENTER_PROTECTED_IDLE_L2:
/* Avoid unnecessary waiting on non-ACE platforms. */
if (kbdev->current_gpu_coherency_mode == COHERENCY_ACE) {
@@ -878,8 +879,8 @@ static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
katom[idx]->protected_state.enter =
KBASE_ATOM_ENTER_PROTECTED_FINISHED;
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
/* ***TRANSITION TO HIGHER STATE*** */
/* fallthrough */
case KBASE_ATOM_ENTER_PROTECTED_FINISHED:
/* No jobs running, so we can switch GPU mode right now. */
@@ -954,7 +955,8 @@ static int kbase_jm_exit_protected_mode(struct kbase_device *kbdev,
kbdev->protected_mode_transition = true;
kbase_pm_update_cores_state_nolock(kbdev);
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
/* ***TRANSITION TO HIGHER STATE*** */
/* fallthrough */
case KBASE_ATOM_EXIT_PROTECTED_IDLE_L2:
if (kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2) ||
kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_L2)) {
@@ -967,8 +969,8 @@ static int kbase_jm_exit_protected_mode(struct kbase_device *kbdev,
katom[idx]->protected_state.exit =
KBASE_ATOM_EXIT_PROTECTED_RESET;
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
/* ***TRANSITION TO HIGHER STATE*** */
/* fallthrough */
case KBASE_ATOM_EXIT_PROTECTED_RESET:
/* Issue the reset to the GPU */
err = kbase_gpu_protected_mode_reset(kbdev);
@@ -999,8 +1001,8 @@ static int kbase_jm_exit_protected_mode(struct kbase_device *kbdev,
katom[idx]->protected_state.exit =
KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT;
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
/* ***TRANSITION TO HIGHER STATE*** */
/* fallthrough */
case KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT:
/* A GPU reset is issued when exiting protected mode. Once the
* reset is done all atoms' state will also be reset. For this
@@ -1049,8 +1051,8 @@ void kbase_backend_slot_update(struct kbase_device *kbdev)
katom[idx]->gpu_rb_state =
KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV;
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
/* ***TRANSITION TO HIGHER STATE*** */
/* fallthrough */
case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV:
if (kbase_gpu_check_secure_atoms(kbdev,
!kbase_jd_katom_is_protected(
@@ -1069,8 +1071,8 @@ void kbase_backend_slot_update(struct kbase_device *kbdev)
katom[idx]->gpu_rb_state =
KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION;
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
/* ***TRANSITION TO HIGHER STATE*** */
/* fallthrough */
case KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION:
/*
@@ -1104,8 +1106,8 @@ void kbase_backend_slot_update(struct kbase_device *kbdev)
katom[idx]->gpu_rb_state =
KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE;
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
/* ***TRANSITION TO HIGHER STATE*** */
/* fallthrough */
case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE:
if (katom[idx]->will_fail_event_code) {
kbase_gpu_mark_atom_for_return(kbdev,
@@ -1144,8 +1146,8 @@ void kbase_backend_slot_update(struct kbase_device *kbdev)
katom[idx]->gpu_rb_state =
KBASE_ATOM_GPU_RB_WAITING_AFFINITY;
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
/* ***TRANSITION TO HIGHER STATE*** */
/* fallthrough */
case KBASE_ATOM_GPU_RB_WAITING_AFFINITY:
if (!kbase_gpu_rmu_workaround(kbdev, js))
break;
@@ -1153,8 +1155,8 @@ void kbase_backend_slot_update(struct kbase_device *kbdev)
katom[idx]->gpu_rb_state =
KBASE_ATOM_GPU_RB_READY;
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
/* ***TRANSITION TO HIGHER STATE*** */
/* fallthrough */
case KBASE_ATOM_GPU_RB_READY:
if (idx == 1) {
@@ -1202,8 +1204,8 @@ void kbase_backend_slot_update(struct kbase_device *kbdev)
kbase_pm_metrics_update(kbdev,
&katom[idx]->start_timestamp);
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
/* ***TRANSITION TO HIGHER STATE*** */
/* fallthrough */
case KBASE_ATOM_GPU_RB_SUBMITTED:
/* Atom submitted to HW, nothing else to do */
break;
@@ -21,7 +21,7 @@
#include <backend/gpu/mali_kbase_pm_internal.h>
void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
u64 *system_time, struct timespec *ts)
u64 *system_time, struct timespec64 *ts)
{
u32 hi1, hi2;
@@ -52,7 +52,7 @@ void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
} while (hi1 != hi2);
/* Record the CPU's idea of current time */
getrawmonotonic(ts);
ktime_get_raw_ts64(ts);
kbase_pm_release_gpu_cycle_counter(kbdev);
}
@@ -23,11 +23,11 @@
* @kbdev: Device pointer
* @cycle_counter: Pointer to u64 to store cycle counter in
* @system_time: Pointer to u64 to store system time in
* @ts: Pointer to struct timespec to store current monotonic
* @ts: Pointer to struct timespec64 to store current monotonic
* time in
*/
void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
u64 *system_time, struct timespec *ts);
u64 *system_time, struct timespec64 *ts);
/**
* kbase_wait_write_flush() - Wait for GPU write flush
+1 -1
View File
@@ -738,7 +738,7 @@ enum kbase_trace_code {
#define KBASE_TRACE_FLAG_JOBSLOT (((u8)1) << 1)
struct kbase_trace {
struct timespec timestamp;
struct timespec64 timestamp;
u32 thread_id;
u32 cpu;
void *ctx;
+1 -1
View File
@@ -444,7 +444,7 @@ void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, vo
trace_msg->thread_id = task_pid_nr(current);
trace_msg->cpu = task_cpu(current);
getnstimeofday(&trace_msg->timestamp);
ktime_get_real_ts64(&trace_msg->timestamp);
trace_msg->code = code;
trace_msg->ctx = ctx;
@@ -62,7 +62,11 @@ kbase_fence_fence_value_str(struct fence *fence, char *str, int size)
kbase_fence_fence_value_str(struct dma_fence *fence, char *str, int size)
#endif
{
#if (KERNEL_VERSION(5, 1, 0) > LINUX_VERSION_CODE)
snprintf(str, size, "%u", fence->seqno);
#else
snprintf(str, size, "%llu", fence->seqno);
#endif
}
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
@@ -238,7 +238,7 @@ static void kbase_gpuprops_calculate_props(base_gpu_props * const gpu_props, str
/* Populate the base_gpu_props structure */
kbase_gpuprops_update_core_props_gpu_id(gpu_props);
gpu_props->core_props.log2_program_counter_size = KBASE_GPU_PC_SIZE_LOG2;
gpu_props->core_props.gpu_available_memory_size = totalram_pages << PAGE_SHIFT;
gpu_props->core_props.gpu_available_memory_size = totalram_pages() << PAGE_SHIFT;
for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
gpu_props->core_props.texture_features[i] = gpu_props->raw_props.texture_features[i];
@@ -28,11 +28,11 @@
* @kbdev: Device pointer
* @cycle_counter: Pointer to u64 to store cycle counter in
* @system_time: Pointer to u64 to store system time in
* @ts: Pointer to struct timespec to store current monotonic
* @ts: Pointer to struct timespec64 to store current monotonic
* time in
*/
void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
u64 *system_time, struct timespec *ts);
u64 *system_time, struct timespec64 *ts);
/**
* kbase_wait_write_flush() - Wait for GPU write flush
+4 -4
View File
@@ -374,7 +374,7 @@ static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const st
#endif /* CONFIG_MALI_DMA_FENCE */
/* Take the processes mmap lock */
down_read(&current->mm->mmap_sem);
down_read(&current->mm->mmap_lock);
/* need to keep the GPU VM locked while we set up UMM buffers */
kbase_gpu_vm_lock(katom->kctx);
@@ -439,7 +439,7 @@ static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const st
kbase_gpu_vm_unlock(katom->kctx);
/* Release the processes mmap lock */
up_read(&current->mm->mmap_sem);
up_read(&current->mm->mmap_lock);
#ifdef CONFIG_KDS
if (kds_res_count) {
@@ -506,7 +506,7 @@ failed_kds_setup:
#endif
#if defined(CONFIG_KDS) || defined(CONFIG_MALI_DMA_FENCE)
/* Lock the processes mmap lock */
down_read(&current->mm->mmap_sem);
down_read(&current->mm->mmap_lock);
/* lock before we unmap */
kbase_gpu_vm_lock(katom->kctx);
@@ -522,7 +522,7 @@ failed_kds_setup:
kbase_gpu_vm_unlock(katom->kctx);
/* Release the processes mmap lock */
up_read(&current->mm->mmap_sem);
up_read(&current->mm->mmap_lock);
early_err_out:
kfree(katom->extres);
@@ -40,18 +40,16 @@ static void kbase_jd_debugfs_fence_info(struct kbase_jd_atom *atom,
switch (atom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
res = kbase_sync_fence_out_info_get(atom, &info);
if (0 == res) {
if (res == 0)
seq_printf(sfile, "Sa([%p]%d) ",
info.fence, info.status);
break;
}
break;
case BASE_JD_REQ_SOFT_FENCE_WAIT:
res = kbase_sync_fence_in_info_get(atom, &info);
if (0 == res) {
if (res == 0)
seq_printf(sfile, "Wa([%p]%d) ",
info.fence, info.status);
break;
}
break;
default:
break;
}
+8 -2
View File
@@ -1017,7 +1017,7 @@ static struct kbase_cpu_mapping *kbasep_find_enclosing_cpu_mapping(
unsigned long map_start;
size_t map_size;
lockdep_assert_held(&current->mm->mmap_sem);
lockdep_assert_held(&current->mm->mmap_lock);
if ((uintptr_t) uaddr + size < (uintptr_t) uaddr) /* overflow check */
return NULL;
@@ -2180,12 +2180,18 @@ static int kbase_jd_user_buf_map(struct kbase_context *kctx,
alloc->imported.user_buf.nr_pages,
reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
pages, NULL);
#else
#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)
pinned_pages = get_user_pages_remote(NULL, mm,
address,
alloc->imported.user_buf.nr_pages,
reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
pages, NULL, NULL);
#else
pinned_pages = get_user_pages_remote(mm,
address,
alloc->imported.user_buf.nr_pages,
reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
pages, NULL, NULL);
#endif
if (pinned_pages <= 0)
+15 -15
View File
@@ -58,7 +58,7 @@ static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_
* Shrink (or completely remove) all CPU mappings which reference the shrunk
* part of the allocation.
*
* Note: Caller must be holding the processes mmap_sem lock.
* Note: Caller must be holding the processes mmap_lock lock.
*/
static void kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx,
struct kbase_va_region *reg,
@@ -611,7 +611,7 @@ int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned in
real_flags |= KBASE_REG_SHARE_IN;
/* now we can lock down the context, and find the region */
down_write(&current->mm->mmap_sem);
down_write(&current->mm->mmap_lock);
kbase_gpu_vm_lock(kctx);
/* Validate the region */
@@ -683,7 +683,7 @@ int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned in
out_unlock:
kbase_gpu_vm_unlock(kctx);
up_write(&current->mm->mmap_sem);
up_write(&current->mm->mmap_lock);
out:
return ret;
}
@@ -1019,7 +1019,7 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
*flags |= KBASE_MEM_IMPORT_HAVE_PAGES;
}
down_read(&current->mm->mmap_sem);
down_read(&current->mm->mmap_lock);
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
faulted_pages = get_user_pages(current, current->mm, address, *va_pages,
@@ -1033,7 +1033,7 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
pages, NULL);
#endif
up_read(&current->mm->mmap_sem);
up_read(&current->mm->mmap_lock);
if (faulted_pages != *va_pages)
goto fault_mismatch;
@@ -1498,7 +1498,7 @@ int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages)
return -EINVAL;
}
down_write(&current->mm->mmap_sem);
down_write(&current->mm->mmap_lock);
kbase_gpu_vm_lock(kctx);
/* Validate the region */
@@ -1540,7 +1540,7 @@ int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages)
* No update to the mm so downgrade the writer lock to a read
* lock so other readers aren't blocked after this point.
*/
downgrade_write(&current->mm->mmap_sem);
downgrade_write(&current->mm->mmap_lock);
read_locked = true;
/* Allocate some more pages */
@@ -1596,9 +1596,9 @@ int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages)
out_unlock:
kbase_gpu_vm_unlock(kctx);
if (read_locked)
up_read(&current->mm->mmap_sem);
up_read(&current->mm->mmap_lock);
else
up_write(&current->mm->mmap_sem);
up_write(&current->mm->mmap_lock);
return res;
}
@@ -1651,10 +1651,10 @@ KBASE_EXPORT_TEST_API(kbase_cpu_vm_close);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
static int kbase_cpu_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static vm_fault_t kbase_cpu_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
#else
static int kbase_cpu_vm_fault(struct vm_fault *vmf)
static vm_fault_t kbase_cpu_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
#endif
@@ -1686,7 +1686,7 @@ static int kbase_cpu_vm_fault(struct vm_fault *vmf)
addr = (pgoff_t)(vmf->address >> PAGE_SHIFT);
#endif
while (i < map->alloc->nents && (addr < vma->vm_end >> PAGE_SHIFT)) {
int ret = vm_insert_pfn(vma, addr << PAGE_SHIFT,
int ret = vmf_insert_pfn(vma, addr << PAGE_SHIFT,
PFN_DOWN(map->alloc->pages[i]));
if (ret < 0 && ret != -EBUSY)
goto locked_bad_fault;
@@ -1768,7 +1768,7 @@ static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vm
for (i = 0; i < nr_pages; i++) {
unsigned long pfn = PFN_DOWN(page_array[i + start_off]);
err = vm_insert_pfn(vma, addr, pfn);
err = vmf_insert_pfn(vma, addr, pfn);
if (WARN_ON(err))
break;
@@ -1950,14 +1950,14 @@ void kbase_os_mem_map_lock(struct kbase_context *kctx)
{
struct mm_struct *mm = current->mm;
(void)kctx;
down_read(&mm->mmap_sem);
down_read(&mm->mmap_lock);
}
void kbase_os_mem_map_unlock(struct kbase_context *kctx)
{
struct mm_struct *mm = current->mm;
(void)kctx;
up_read(&mm->mmap_sem);
up_read(&mm->mmap_lock);
}
static int kbasep_reg_mmap(struct kbase_context *kctx,
@@ -98,7 +98,7 @@ int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx,
* Take the provided region and make all the physical pages within it
* reclaimable by the kernel, updating the per-process VM stats as well.
* Remove any CPU mappings (as these can't be removed in the shrinker callback
* as mmap_sem might already be taken) but leave the GPU mapping intact as
* as mmap_lock might already be taken) but leave the GPU mapping intact as
* and until the shrinker reclaims the allocation.
*
* Note: Must be called with the region lock of the containing context.
+12
View File
@@ -22,6 +22,18 @@
#include <linux/compiler.h>
/* __asmeq is not available on Kernel versions >= 4.20 */
#ifndef __asmeq
/*
* This is used to ensure the compiler did actually allocate the register we
* asked it for some inline assembly sequences. Apparently we can't trust the
* compiler from one version to another so a bit of paranoia won't hurt. This
* string is meant to be concatenated with the inline asm string and will
* cause compilation to stop on mismatch. (for details, see gcc PR 15089)
*/
#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
#endif
static noinline u64 invoke_smc_fid(u64 function_id,
u64 arg0, u64 arg1, u64 arg2)
{
+40 -1
View File
@@ -131,7 +131,7 @@ static int kbase_dump_cpu_gpu_time(struct kbase_jd_atom *katom)
{
struct kbase_vmap_struct map;
void *user_result;
struct timespec ts;
struct timespec64 ts;
struct base_dump_cpu_gpu_counters data;
u64 system_time;
u64 cycle_counter;
@@ -759,6 +759,36 @@ static void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
kunmap(pages[*target_page_nr]);
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
static void *dma_buf_kmap_page(struct kbase_mem_phy_alloc *gpu_alloc,
unsigned long page_num, struct page **page)
{
struct sg_table *sgt = gpu_alloc->imported.umm.sgt;
struct sg_page_iter sg_iter;
unsigned long page_index = 0;
if (WARN_ON(gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM))
return NULL;
if (!sgt)
return NULL;
if (WARN_ON(page_num >= gpu_alloc->nents))
return NULL;
for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
if (page_index == page_num) {
*page = sg_page_iter_page(&sg_iter);
return kmap(*page);
}
page_index++;
}
return NULL;
}
#endif
static int kbase_mem_copy_from_extres(struct kbase_context *kctx,
struct kbase_debug_copy_buffer *buf_data)
{
@@ -818,7 +848,12 @@ static int kbase_mem_copy_from_extres(struct kbase_context *kctx,
for (i = 0; i < buf_data->nr_extres_pages; i++) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
struct page *pg;
void *extres_page = dma_buf_kmap_page(gpu_alloc, i, &pg);
#else
void *extres_page = dma_buf_kmap(dma_buf, i);
#endif
if (extres_page)
kbase_mem_copy_from_extres_page(kctx,
@@ -827,7 +862,11 @@ static int kbase_mem_copy_from_extres(struct kbase_context *kctx,
&target_page_nr,
offset, &to_copy);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
kunmap(pg);
#else
dma_buf_kunmap(dma_buf, i, extres_page);
#endif
if (target_page_nr >= buf_data->nr_pages)
break;
}
@@ -301,9 +301,12 @@ static void kbase_sync_fence_info_get(struct dma_fence *fence,
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
scnprintf(info->name, sizeof(info->name), "%u#%u",
fence->context, fence->seqno);
#else
#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0))
scnprintf(info->name, sizeof(info->name), "%llu#%u",
fence->context, fence->seqno);
#else
scnprintf(info->name, sizeof(info->name), "%llu#%llu",
fence->context, fence->seqno);
#endif
}
@@ -587,10 +587,10 @@ atomic_t kbase_tlstream_enabled = {0};
*/
static u64 kbasep_tlstream_get_timestamp(void)
{
struct timespec ts;
struct timespec64 ts;
u64 timestamp;
getrawmonotonic(&ts);
ktime_get_raw_ts64(&ts);
timestamp = (u64)ts.tv_sec * NSECS_IN_SEC + ts.tv_nsec;
return timestamp;
}
@@ -49,8 +49,8 @@ void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
process of being returned to user */
#define KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, count) \
do { \
struct timespec ts; \
getrawmonotonic(&ts); \
struct timespec64 ts; \
ktime_get_raw_ts64(&ts); \
trace_mali_timeline_atoms_in_flight(ts.tv_sec, ts.tv_nsec, \
(int)kctx->timeline.owner_tgid, \
count); \
@@ -59,8 +59,8 @@ void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
/* Trace atom_id being Ready to Run */
#define KBASE_TIMELINE_ATOM_READY(kctx, atom_id) \
do { \
struct timespec ts; \
getrawmonotonic(&ts); \
struct timespec64 ts; \
ktime_get_raw_ts64(&ts); \
trace_mali_timeline_atom(ts.tv_sec, ts.tv_nsec, \
CTX_FLOW_ATOM_READY, \
(int)kctx->timeline.owner_tgid, \
@@ -76,8 +76,8 @@ void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
* utilization easily and accurately */
#define KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, count) \
do { \
struct timespec ts; \
getrawmonotonic(&ts); \
struct timespec64 ts; \
ktime_get_raw_ts64(&ts); \
trace_mali_timeline_gpu_slot_active(ts.tv_sec, ts.tv_nsec, \
SW_SET_GPU_SLOT_ACTIVE, \
(int)kctx->timeline.owner_tgid, \
@@ -88,8 +88,8 @@ void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
/* Trace atoms present in JS_NEXT */
#define KBASE_TIMELINE_JOB_START_NEXT(kctx, js, count) \
do { \
struct timespec ts; \
getrawmonotonic(&ts); \
struct timespec64 ts; \
ktime_get_raw_ts64(&ts); \
trace_mali_timeline_gpu_slot_action(ts.tv_sec, ts.tv_nsec, \
SW_SET_GPU_SLOT_NEXT, \
(int)kctx->timeline.owner_tgid, \
@@ -99,8 +99,8 @@ void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
/* Trace atoms present in JS_HEAD */
#define KBASE_TIMELINE_JOB_START_HEAD(kctx, js, count) \
do { \
struct timespec ts; \
getrawmonotonic(&ts); \
struct timespec64 ts; \
ktime_get_raw_ts64(&ts); \
trace_mali_timeline_gpu_slot_action(ts.tv_sec, ts.tv_nsec, \
SW_SET_GPU_SLOT_HEAD, \
(int)kctx->timeline.owner_tgid, \
@@ -110,8 +110,8 @@ void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
/* Trace that a soft stop/evict from next is being attempted on a slot */
#define KBASE_TIMELINE_TRY_SOFT_STOP(kctx, js, count) \
do { \
struct timespec ts; \
getrawmonotonic(&ts); \
struct timespec64 ts; \
ktime_get_raw_ts64(&ts); \
trace_mali_timeline_gpu_slot_action(ts.tv_sec, ts.tv_nsec, \
SW_SET_GPU_SLOT_STOPPING, \
(kctx) ? (int)kctx->timeline.owner_tgid : 0, \
@@ -123,8 +123,8 @@ void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
/* Trace state of overall GPU power */
#define KBASE_TIMELINE_GPU_POWER(kbdev, active) \
do { \
struct timespec ts; \
getrawmonotonic(&ts); \
struct timespec64 ts; \
ktime_get_raw_ts64(&ts); \
trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \
SW_SET_GPU_POWER_ACTIVE, active); \
} while (0)
@@ -132,8 +132,8 @@ void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
/* Trace state of tiler power */
#define KBASE_TIMELINE_POWER_TILER(kbdev, bitmap) \
do { \
struct timespec ts; \
getrawmonotonic(&ts); \
struct timespec64 ts; \
ktime_get_raw_ts64(&ts); \
trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \
SW_SET_GPU_POWER_TILER_ACTIVE, \
hweight64(bitmap)); \
@@ -142,8 +142,8 @@ void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
/* Trace number of shaders currently powered */
#define KBASE_TIMELINE_POWER_SHADER(kbdev, bitmap) \
do { \
struct timespec ts; \
getrawmonotonic(&ts); \
struct timespec64 ts; \
ktime_get_raw_ts64(&ts); \
trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \
SW_SET_GPU_POWER_SHADER_ACTIVE, \
hweight64(bitmap)); \
@@ -152,8 +152,8 @@ void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
/* Trace state of L2 power */
#define KBASE_TIMELINE_POWER_L2(kbdev, bitmap) \
do { \
struct timespec ts; \
getrawmonotonic(&ts); \
struct timespec64 ts; \
ktime_get_raw_ts64(&ts); \
trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \
SW_SET_GPU_POWER_L2_ACTIVE, \
hweight64(bitmap)); \
@@ -162,8 +162,8 @@ void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
/* Trace state of L2 cache*/
#define KBASE_TIMELINE_POWERING_L2(kbdev) \
do { \
struct timespec ts; \
getrawmonotonic(&ts); \
struct timespec64 ts; \
ktime_get_raw_ts64(&ts); \
trace_mali_timeline_l2_power_active(ts.tv_sec, ts.tv_nsec, \
SW_FLOW_GPU_POWER_L2_POWERING, \
1); \
@@ -171,8 +171,8 @@ void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
#define KBASE_TIMELINE_POWERED_L2(kbdev) \
do { \
struct timespec ts; \
getrawmonotonic(&ts); \
struct timespec64 ts; \
ktime_get_raw_ts64(&ts); \
trace_mali_timeline_l2_power_active(ts.tv_sec, ts.tv_nsec, \
SW_FLOW_GPU_POWER_L2_ACTIVE, \
1); \
@@ -181,8 +181,8 @@ void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
/* Trace kbase_pm_send_event message send */
#define KBASE_TIMELINE_PM_SEND_EVENT(kbdev, event_type, pm_event_id) \
do { \
struct timespec ts; \
getrawmonotonic(&ts); \
struct timespec64 ts; \
ktime_get_raw_ts64(&ts); \
trace_mali_timeline_pm_event(ts.tv_sec, ts.tv_nsec, \
SW_FLOW_PM_SEND_EVENT, \
event_type, pm_event_id); \
@@ -191,8 +191,8 @@ void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
/* Trace kbase_pm_worker message receive */
#define KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event_type, pm_event_id) \
do { \
struct timespec ts; \
getrawmonotonic(&ts); \
struct timespec64 ts; \
ktime_get_raw_ts64(&ts); \
trace_mali_timeline_pm_event(ts.tv_sec, ts.tv_nsec, \
SW_FLOW_PM_HANDLE_EVENT, \
event_type, pm_event_id); \
@@ -202,8 +202,8 @@ void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
/* Trace atom_id starting in JS_HEAD */
#define KBASE_TIMELINE_JOB_START(kctx, js, _consumerof_atom_number) \
do { \
struct timespec ts; \
getrawmonotonic(&ts); \
struct timespec64 ts; \
ktime_get_raw_ts64(&ts); \
trace_mali_timeline_slot_atom(ts.tv_sec, ts.tv_nsec, \
HW_START_GPU_JOB_CHAIN_SW_APPROX, \
(int)kctx->timeline.owner_tgid, \
@@ -213,8 +213,8 @@ void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
/* Trace atom_id stopping on JS_HEAD */
#define KBASE_TIMELINE_JOB_STOP(kctx, js, _producerof_atom_number_completed) \
do { \
struct timespec ts; \
getrawmonotonic(&ts); \
struct timespec64 ts; \
ktime_get_raw_ts64(&ts); \
trace_mali_timeline_slot_atom(ts.tv_sec, ts.tv_nsec, \
HW_STOP_GPU_JOB_CHAIN_SW_APPROX, \
(int)kctx->timeline.owner_tgid, \
@@ -225,8 +225,8 @@ void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
* certin caller */
#define KBASE_TIMELINE_PM_CHECKTRANS(kbdev, trace_code) \
do { \
struct timespec ts; \
getrawmonotonic(&ts); \
struct timespec64 ts; \
ktime_get_raw_ts64(&ts); \
trace_mali_timeline_pm_checktrans(ts.tv_sec, ts.tv_nsec, \
trace_code, 1); \
} while (0)
@@ -234,8 +234,8 @@ void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
/* Trace number of contexts active */
#define KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, count) \
do { \
struct timespec ts; \
getrawmonotonic(&ts); \
struct timespec64 ts; \
ktime_get_raw_ts64(&ts); \
trace_mali_timeline_context_active(ts.tv_sec, ts.tv_nsec, \
count); \
} while (0)
+2 -2
View File
@@ -891,9 +891,9 @@ static void accum_clients(struct kbase_vinstr_context *vinstr_ctx)
*/
static u64 kbasep_vinstr_get_timestamp(void)
{
struct timespec ts;
struct timespec64 ts;
getrawmonotonic(&ts);
ktime_get_raw_ts64(&ts);
return (u64)ts.tv_sec * NSECS_IN_SEC + ts.tv_nsec;
}
@@ -87,9 +87,9 @@ static irqreturn_t kbase_gpu_irq_custom_handler(int irq, void *data)
val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS), NULL);
if (val & TEST_IRQ) {
struct timespec tval;
struct timespec64 tval;
getnstimeofday(&tval);
ktime_get_real_ts64(&tval);
irq_time = SEC_TO_NANO(tval.tv_sec) + (tval.tv_nsec);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val,
@@ -179,12 +179,12 @@ static void mali_kutf_irq_latency(struct kutf_context *context)
GPU_IRQ_HANDLER);
for (i = 0; i < NR_TEST_IRQS; i++) {
struct timespec tval;
struct timespec64 tval;
u64 start_time;
int ret;
triggered = false;
getnstimeofday(&tval);
ktime_get_real_ts64(&tval);
start_time = SEC_TO_NANO(tval.tv_sec) + (tval.tv_nsec);
/* Trigger fake IRQ */