blob: f01fc5486d2c4747f5f736d59e32991bf5ad8d60 [file] [log] [blame]
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
* (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU license.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
*/
/*
* THIS FILE IS AUTOGENERATED BY generate_tracepoints.py.
* DO NOT EDIT.
*/
#if !defined(_KBASE_TRACEPOINTS_H)
#define _KBASE_TRACEPOINTS_H
/* Tracepoints are abstract callbacks notifying that some important
* software or hardware event has happened.
*
* In this particular implementation, it results into a MIPE
* timeline event and, in some cases, it also fires an ftrace event
* (a.k.a. Gator events, see details below).
*/
#include "mali_kbase.h"
#include "mali_kbase_gator.h"
#include <linux/types.h>
#include <linux/atomic.h>
/* clang-format off */
struct kbase_tlstream;
extern const size_t __obj_stream_offset;
extern const size_t __aux_stream_offset;
/* This macro dispatches a kbase_tlstream from
* a kbase_device instance. Only AUX or OBJ
* streams can be dispatched. It is aware of
* kbase_timeline binary representation and
* relies on offset variables:
* __obj_stream_offset and __aux_stream_offset.
*/
#define __TL_DISPATCH_STREAM(kbdev, stype) \
((struct kbase_tlstream *) \
((u8 *)kbdev->timeline + __ ## stype ## _stream_offset))
struct tp_desc;
/* Descriptors of timeline messages transmitted in object events stream. */
extern const char *obj_desc_header;
extern const size_t obj_desc_header_size;
/* Descriptors of timeline messages transmitted in auxiliary events stream. */
extern const char *aux_desc_header;
extern const size_t aux_desc_header_size;
#define TL_ATOM_STATE_IDLE 0
#define TL_ATOM_STATE_READY 1
#define TL_ATOM_STATE_DONE 2
#define TL_ATOM_STATE_POSTED 3
#define TL_JS_EVENT_START GATOR_JOB_SLOT_START
#define TL_JS_EVENT_STOP GATOR_JOB_SLOT_STOP
#define TL_JS_EVENT_SOFT_STOP GATOR_JOB_SLOT_SOFT_STOPPED
#define TLSTREAM_ENABLED (1 << 31)
void __kbase_tlstream_tl_new_ctx(
struct kbase_tlstream *stream,
const void *ctx,
u32 ctx_nr,
u32 tgid
);
void __kbase_tlstream_tl_new_gpu(
struct kbase_tlstream *stream,
const void *gpu,
u32 gpu_id,
u32 core_count
);
void __kbase_tlstream_tl_new_lpu(
struct kbase_tlstream *stream,
const void *lpu,
u32 lpu_nr,
u32 lpu_fn
);
void __kbase_tlstream_tl_new_atom(
struct kbase_tlstream *stream,
const void *atom,
u32 atom_nr
);
void __kbase_tlstream_tl_new_as(
struct kbase_tlstream *stream,
const void *address_space,
u32 as_nr
);
void __kbase_tlstream_tl_del_ctx(
struct kbase_tlstream *stream,
const void *ctx
);
void __kbase_tlstream_tl_del_atom(
struct kbase_tlstream *stream,
const void *atom
);
void __kbase_tlstream_tl_lifelink_lpu_gpu(
struct kbase_tlstream *stream,
const void *lpu,
const void *gpu
);
void __kbase_tlstream_tl_lifelink_as_gpu(
struct kbase_tlstream *stream,
const void *address_space,
const void *gpu
);
void __kbase_tlstream_tl_ret_ctx_lpu(
struct kbase_tlstream *stream,
const void *ctx,
const void *lpu
);
void __kbase_tlstream_tl_ret_atom_ctx(
struct kbase_tlstream *stream,
const void *atom,
const void *ctx
);
void __kbase_tlstream_tl_ret_atom_lpu(
struct kbase_tlstream *stream,
const void *atom,
const void *lpu,
const char *attrib_match_list
);
void __kbase_tlstream_tl_nret_ctx_lpu(
struct kbase_tlstream *stream,
const void *ctx,
const void *lpu
);
void __kbase_tlstream_tl_nret_atom_ctx(
struct kbase_tlstream *stream,
const void *atom,
const void *ctx
);
void __kbase_tlstream_tl_nret_atom_lpu(
struct kbase_tlstream *stream,
const void *atom,
const void *lpu
);
void __kbase_tlstream_tl_ret_as_ctx(
struct kbase_tlstream *stream,
const void *address_space,
const void *ctx
);
void __kbase_tlstream_tl_nret_as_ctx(
struct kbase_tlstream *stream,
const void *address_space,
const void *ctx
);
void __kbase_tlstream_tl_ret_atom_as(
struct kbase_tlstream *stream,
const void *atom,
const void *address_space
);
void __kbase_tlstream_tl_nret_atom_as(
struct kbase_tlstream *stream,
const void *atom,
const void *address_space
);
void __kbase_tlstream_tl_attrib_atom_config(
struct kbase_tlstream *stream,
const void *atom,
u64 descriptor,
u64 affinity,
u32 config
);
void __kbase_tlstream_tl_jit_usedpages(
struct kbase_tlstream *stream,
u64 used_pages,
u32 j_id
);
void __kbase_tlstream_tl_attrib_atom_jitallocinfo(
struct kbase_tlstream *stream,
const void *atom,
u64 va_pgs,
u64 com_pgs,
u64 extent,
u32 j_id,
u32 bin_id,
u32 max_allocs,
u32 jit_flags,
u32 usg_id
);
void __kbase_tlstream_tl_attrib_atom_jitfreeinfo(
struct kbase_tlstream *stream,
const void *atom,
u32 j_id
);
void __kbase_tlstream_tl_attrib_as_config(
struct kbase_tlstream *stream,
const void *address_space,
u64 transtab,
u64 memattr,
u64 transcfg
);
void __kbase_tlstream_tl_event_lpu_softstop(
struct kbase_tlstream *stream,
const void *lpu
);
void __kbase_tlstream_tl_event_atom_softstop_ex(
struct kbase_tlstream *stream,
const void *atom
);
void __kbase_tlstream_tl_event_atom_softstop_issue(
struct kbase_tlstream *stream,
const void *atom
);
void __kbase_tlstream_tl_event_atom_softjob_start(
struct kbase_tlstream *stream,
const void *atom
);
void __kbase_tlstream_tl_event_atom_softjob_end(
struct kbase_tlstream *stream,
const void *atom
);
void __kbase_tlstream_tl_arbiter_granted(
struct kbase_tlstream *stream,
const void *gpu
);
void __kbase_tlstream_tl_arbiter_started(
struct kbase_tlstream *stream,
const void *gpu
);
void __kbase_tlstream_tl_arbiter_stop_requested(
struct kbase_tlstream *stream,
const void *gpu
);
void __kbase_tlstream_tl_arbiter_stopped(
struct kbase_tlstream *stream,
const void *gpu
);
void __kbase_tlstream_tl_arbiter_requested(
struct kbase_tlstream *stream,
const void *gpu
);
void __kbase_tlstream_jd_gpu_soft_reset(
struct kbase_tlstream *stream,
const void *gpu
);
void __kbase_tlstream_jd_tiler_heap_chunk_alloc(
struct kbase_tlstream *stream,
u32 ctx_nr,
u64 heap_id,
u64 chunk_va
);
void __kbase_tlstream_tl_js_sched_start(
struct kbase_tlstream *stream,
u32 dummy
);
void __kbase_tlstream_tl_js_sched_end(
struct kbase_tlstream *stream,
u32 dummy
);
void __kbase_tlstream_tl_jd_submit_atom_start(
struct kbase_tlstream *stream,
const void *atom
);
void __kbase_tlstream_tl_jd_submit_atom_end(
struct kbase_tlstream *stream,
const void *atom
);
void __kbase_tlstream_tl_jd_done_no_lock_start(
struct kbase_tlstream *stream,
const void *atom
);
void __kbase_tlstream_tl_jd_done_no_lock_end(
struct kbase_tlstream *stream,
const void *atom
);
void __kbase_tlstream_tl_jd_done_start(
struct kbase_tlstream *stream,
const void *atom
);
void __kbase_tlstream_tl_jd_done_end(
struct kbase_tlstream *stream,
const void *atom
);
void __kbase_tlstream_tl_jd_atom_complete(
struct kbase_tlstream *stream,
const void *atom
);
void __kbase_tlstream_tl_run_atom_start(
struct kbase_tlstream *stream,
const void *atom,
u32 atom_nr
);
void __kbase_tlstream_tl_run_atom_end(
struct kbase_tlstream *stream,
const void *atom,
u32 atom_nr
);
void __kbase_tlstream_tl_attrib_atom_priority(
struct kbase_tlstream *stream,
const void *atom,
u32 prio
);
void __kbase_tlstream_tl_attrib_atom_state(
struct kbase_tlstream *stream,
const void *atom,
u32 state
);
void __kbase_tlstream_tl_attrib_atom_prioritized(
struct kbase_tlstream *stream,
const void *atom
);
void __kbase_tlstream_tl_attrib_atom_jit(
struct kbase_tlstream *stream,
const void *atom,
u64 edit_addr,
u64 new_addr,
u32 jit_flags,
u64 mem_flags,
u32 j_id,
u64 com_pgs,
u64 extent,
u64 va_pgs
);
void __kbase_tlstream_tl_kbase_new_device(
struct kbase_tlstream *stream,
u32 kbase_device_id,
u32 kbase_device_gpu_core_count,
u32 kbase_device_max_num_csgs,
u32 kbase_device_as_count,
u32 kbase_device_sb_entry_count,
u32 kbase_device_has_cross_stream_sync,
u32 kbase_device_supports_gpu_sleep
);
void __kbase_tlstream_tl_kbase_device_program_csg(
struct kbase_tlstream *stream,
u32 kbase_device_id,
u32 kernel_ctx_id,
u32 gpu_cmdq_grp_handle,
u32 kbase_device_csg_slot_index,
u32 kbase_device_csg_slot_resumed
);
void __kbase_tlstream_tl_kbase_device_deprogram_csg(
struct kbase_tlstream *stream,
u32 kbase_device_id,
u32 kbase_device_csg_slot_index
);
void __kbase_tlstream_tl_kbase_device_halt_csg(
struct kbase_tlstream *stream,
u32 kbase_device_id,
u32 kbase_device_csg_slot_index
);
void __kbase_tlstream_tl_kbase_new_ctx(
struct kbase_tlstream *stream,
u32 kernel_ctx_id,
u32 kbase_device_id
);
void __kbase_tlstream_tl_kbase_del_ctx(
struct kbase_tlstream *stream,
u32 kernel_ctx_id
);
void __kbase_tlstream_tl_kbase_ctx_assign_as(
struct kbase_tlstream *stream,
u32 kernel_ctx_id,
u32 kbase_device_as_index
);
void __kbase_tlstream_tl_kbase_ctx_unassign_as(
struct kbase_tlstream *stream,
u32 kernel_ctx_id
);
void __kbase_tlstream_tl_kbase_new_kcpuqueue(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u32 kcpu_queue_id,
u32 kernel_ctx_id,
u32 kcpuq_num_pending_cmds
);
void __kbase_tlstream_tl_kbase_del_kcpuqueue(
struct kbase_tlstream *stream,
const void *kcpu_queue
);
void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_signal(
struct kbase_tlstream *stream,
const void *kcpu_queue,
const void *fence
);
void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_wait(
struct kbase_tlstream *stream,
const void *kcpu_queue,
const void *fence
);
void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_wait(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 cqs_obj_gpu_addr,
u32 cqs_obj_compare_value,
u32 cqs_obj_inherit_error
);
void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_set(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 cqs_obj_gpu_addr
);
void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_map_import(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 map_import_buf_gpu_addr
);
void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 map_import_buf_gpu_addr
);
void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import_force(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 map_import_buf_gpu_addr
);
void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_error_barrier(
struct kbase_tlstream *stream,
const void *kcpu_queue
);
void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_group_suspend(
struct kbase_tlstream *stream,
const void *kcpu_queue,
const void *group_suspend_buf,
u32 gpu_cmdq_grp_handle
);
void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_alloc(
struct kbase_tlstream *stream,
const void *kcpu_queue
);
void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_alloc(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u64 jit_alloc_gpu_alloc_addr_dest,
u64 jit_alloc_va_pages,
u64 jit_alloc_commit_pages,
u64 jit_alloc_extent,
u32 jit_alloc_jit_id,
u32 jit_alloc_bin_id,
u32 jit_alloc_max_allocations,
u32 jit_alloc_flags,
u32 jit_alloc_usage_id
);
void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_alloc(
struct kbase_tlstream *stream,
const void *kcpu_queue
);
void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_free(
struct kbase_tlstream *stream,
const void *kcpu_queue
);
void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_free(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u32 jit_alloc_jit_id
);
void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_free(
struct kbase_tlstream *stream,
const void *kcpu_queue
);
void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_start(
struct kbase_tlstream *stream,
const void *kcpu_queue
);
void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_end(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u32 execute_error
);
void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_start(
struct kbase_tlstream *stream,
const void *kcpu_queue
);
void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_end(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u32 execute_error
);
void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_start(
struct kbase_tlstream *stream,
const void *kcpu_queue
);
void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_end(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u32 execute_error
);
void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u32 execute_error
);
void __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_start(
struct kbase_tlstream *stream,
const void *kcpu_queue
);
void __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_end(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u32 execute_error
);
void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_start(
struct kbase_tlstream *stream,
const void *kcpu_queue
);
void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_end(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u32 execute_error
);
void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_start(
struct kbase_tlstream *stream,
const void *kcpu_queue
);
void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_end(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u32 execute_error
);
void __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_alloc_start(
struct kbase_tlstream *stream,
const void *kcpu_queue
);
void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_alloc_end(
struct kbase_tlstream *stream,
const void *kcpu_queue
);
void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_alloc_end(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u32 execute_error,
u64 jit_alloc_gpu_alloc_addr,
u64 jit_alloc_mmu_flags
);
void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_alloc_end(
struct kbase_tlstream *stream,
const void *kcpu_queue
);
void __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_free_start(
struct kbase_tlstream *stream,
const void *kcpu_queue
);
void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_free_end(
struct kbase_tlstream *stream,
const void *kcpu_queue
);
void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_free_end(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u32 execute_error,
u64 jit_free_pages_used
);
void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_free_end(
struct kbase_tlstream *stream,
const void *kcpu_queue
);
void __kbase_tlstream_tl_kbase_kcpuqueue_execute_error_barrier(
struct kbase_tlstream *stream,
const void *kcpu_queue
);
void __kbase_tlstream_tl_kbase_kcpuqueue_execute_group_suspend_start(
struct kbase_tlstream *stream,
const void *kcpu_queue
);
void __kbase_tlstream_tl_kbase_kcpuqueue_execute_group_suspend_end(
struct kbase_tlstream *stream,
const void *kcpu_queue,
u32 execute_error
);
void __kbase_tlstream_tl_kbase_csffw_fw_reloading(
struct kbase_tlstream *stream,
u64 csffw_cycle
);
void __kbase_tlstream_tl_kbase_csffw_fw_enabling(
struct kbase_tlstream *stream,
u64 csffw_cycle
);
void __kbase_tlstream_tl_kbase_csffw_fw_request_sleep(
struct kbase_tlstream *stream,
u64 csffw_cycle
);
void __kbase_tlstream_tl_kbase_csffw_fw_request_wakeup(
struct kbase_tlstream *stream,
u64 csffw_cycle
);
void __kbase_tlstream_tl_kbase_csffw_fw_request_halt(
struct kbase_tlstream *stream,
u64 csffw_cycle
);
void __kbase_tlstream_tl_kbase_csffw_fw_disabling(
struct kbase_tlstream *stream,
u64 csffw_cycle
);
void __kbase_tlstream_tl_kbase_csffw_fw_off(
struct kbase_tlstream *stream,
u64 csffw_cycle
);
void __kbase_tlstream_tl_kbase_csffw_tlstream_overflow(
struct kbase_tlstream *stream,
u64 csffw_timestamp,
u64 csffw_cycle
);
void __kbase_tlstream_aux_pm_state(
struct kbase_tlstream *stream,
u32 core_type,
u64 core_state_bitset
);
void __kbase_tlstream_aux_pagefault(
struct kbase_tlstream *stream,
u32 ctx_nr,
u32 as_nr,
u64 page_cnt_change
);
void __kbase_tlstream_aux_pagesalloc(
struct kbase_tlstream *stream,
u32 ctx_nr,
u64 page_cnt
);
void __kbase_tlstream_aux_devfreq_target(
struct kbase_tlstream *stream,
u64 target_freq
);
void __kbase_tlstream_aux_jit_stats(
struct kbase_tlstream *stream,
u32 ctx_nr,
u32 bid,
u32 max_allocs,
u32 allocs,
u32 va_pages,
u32 ph_pages
);
void __kbase_tlstream_aux_tiler_heap_stats(
struct kbase_tlstream *stream,
u32 ctx_nr,
u64 heap_id,
u32 va_pages,
u32 ph_pages,
u32 max_chunks,
u32 chunk_size,
u32 chunk_count,
u32 target_in_flight,
u32 nr_in_flight
);
void __kbase_tlstream_aux_event_job_slot(
struct kbase_tlstream *stream,
const void *ctx,
u32 slot_nr,
u32 atom_nr,
u32 event
);
void __kbase_tlstream_aux_protected_enter_start(
struct kbase_tlstream *stream,
const void *gpu
);
void __kbase_tlstream_aux_protected_enter_end(
struct kbase_tlstream *stream,
const void *gpu
);
void __kbase_tlstream_aux_mmu_command(
struct kbase_tlstream *stream,
u32 kernel_ctx_id,
u32 mmu_cmd_id,
u32 mmu_synchronicity,
u64 mmu_lock_addr,
u32 mmu_lock_page_num
);
void __kbase_tlstream_aux_protected_leave_start(
struct kbase_tlstream *stream,
const void *gpu
);
void __kbase_tlstream_aux_protected_leave_end(
struct kbase_tlstream *stream,
const void *gpu
);
struct kbase_tlstream;
/**
* KBASE_TLSTREAM_TL_NEW_CTX - object ctx is created
*
* @kbdev: Kbase device
* @ctx: Name of the context object
* @ctx_nr: Kernel context number
* @tgid: Thread Group Id
*/
#define KBASE_TLSTREAM_TL_NEW_CTX( \
kbdev, \
ctx, \
ctx_nr, \
tgid \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_new_ctx( \
__TL_DISPATCH_STREAM(kbdev, obj), \
ctx, \
ctx_nr, \
tgid \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_NEW_GPU - object gpu is created
*
* @kbdev: Kbase device
* @gpu: Name of the GPU object
* @gpu_id: Name of the GPU object
* @core_count: Number of cores this GPU hosts
*/
#define KBASE_TLSTREAM_TL_NEW_GPU( \
kbdev, \
gpu, \
gpu_id, \
core_count \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_new_gpu( \
__TL_DISPATCH_STREAM(kbdev, obj), \
gpu, \
gpu_id, \
core_count \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_NEW_LPU - object lpu is created
*
* @kbdev: Kbase device
* @lpu: Name of the Logical Processing Unit object
* @lpu_nr: Sequential number assigned to the newly created LPU
* @lpu_fn: Property describing functional abilities of this LPU
*/
#define KBASE_TLSTREAM_TL_NEW_LPU( \
kbdev, \
lpu, \
lpu_nr, \
lpu_fn \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_new_lpu( \
__TL_DISPATCH_STREAM(kbdev, obj), \
lpu, \
lpu_nr, \
lpu_fn \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_NEW_ATOM - object atom is created
*
* @kbdev: Kbase device
* @atom: Atom identifier
* @atom_nr: Sequential number of an atom
*/
#define KBASE_TLSTREAM_TL_NEW_ATOM( \
kbdev, \
atom, \
atom_nr \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_new_atom( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom, \
atom_nr \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_NEW_AS - address space object is created
*
* @kbdev: Kbase device
* @address_space: Name of the address space object
* @as_nr: Address space number
*/
#define KBASE_TLSTREAM_TL_NEW_AS( \
kbdev, \
address_space, \
as_nr \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_new_as( \
__TL_DISPATCH_STREAM(kbdev, obj), \
address_space, \
as_nr \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_DEL_CTX - context is destroyed
*
* @kbdev: Kbase device
* @ctx: Name of the context object
*/
#define KBASE_TLSTREAM_TL_DEL_CTX( \
kbdev, \
ctx \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_del_ctx( \
__TL_DISPATCH_STREAM(kbdev, obj), \
ctx \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_DEL_ATOM - atom is destroyed
*
* @kbdev: Kbase device
* @atom: Atom identifier
*/
#define KBASE_TLSTREAM_TL_DEL_ATOM( \
kbdev, \
atom \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_del_atom( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_LIFELINK_LPU_GPU - lpu is deleted with gpu
*
* @kbdev: Kbase device
* @lpu: Name of the Logical Processing Unit object
* @gpu: Name of the GPU object
*/
#define KBASE_TLSTREAM_TL_LIFELINK_LPU_GPU( \
kbdev, \
lpu, \
gpu \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_lifelink_lpu_gpu( \
__TL_DISPATCH_STREAM(kbdev, obj), \
lpu, \
gpu \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_LIFELINK_AS_GPU - address space is deleted with gpu
*
* @kbdev: Kbase device
* @address_space: Name of the address space object
* @gpu: Name of the GPU object
*/
#define KBASE_TLSTREAM_TL_LIFELINK_AS_GPU( \
kbdev, \
address_space, \
gpu \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_lifelink_as_gpu( \
__TL_DISPATCH_STREAM(kbdev, obj), \
address_space, \
gpu \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_RET_CTX_LPU - context is retained by lpu
*
* @kbdev: Kbase device
* @ctx: Name of the context object
* @lpu: Name of the Logical Processing Unit object
*/
#define KBASE_TLSTREAM_TL_RET_CTX_LPU( \
kbdev, \
ctx, \
lpu \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_ret_ctx_lpu( \
__TL_DISPATCH_STREAM(kbdev, obj), \
ctx, \
lpu \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_RET_ATOM_CTX - atom is retained by context
*
* @kbdev: Kbase device
* @atom: Atom identifier
* @ctx: Name of the context object
*/
#define KBASE_TLSTREAM_TL_RET_ATOM_CTX( \
kbdev, \
atom, \
ctx \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_ret_atom_ctx( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom, \
ctx \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_RET_ATOM_LPU - atom is retained by lpu
*
* @kbdev: Kbase device
* @atom: Atom identifier
* @lpu: Name of the Logical Processing Unit object
* @attrib_match_list: List containing match operator attributes
*/
#define KBASE_TLSTREAM_TL_RET_ATOM_LPU( \
kbdev, \
atom, \
lpu, \
attrib_match_list \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_ret_atom_lpu( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom, \
lpu, \
attrib_match_list \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_NRET_CTX_LPU - context is released by lpu
*
* @kbdev: Kbase device
* @ctx: Name of the context object
* @lpu: Name of the Logical Processing Unit object
*/
#define KBASE_TLSTREAM_TL_NRET_CTX_LPU( \
kbdev, \
ctx, \
lpu \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_nret_ctx_lpu( \
__TL_DISPATCH_STREAM(kbdev, obj), \
ctx, \
lpu \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_NRET_ATOM_CTX - atom is released by context
*
* @kbdev: Kbase device
* @atom: Atom identifier
* @ctx: Name of the context object
*/
#define KBASE_TLSTREAM_TL_NRET_ATOM_CTX( \
kbdev, \
atom, \
ctx \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_nret_atom_ctx( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom, \
ctx \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_NRET_ATOM_LPU - atom is released by lpu
*
* @kbdev: Kbase device
* @atom: Atom identifier
* @lpu: Name of the Logical Processing Unit object
*/
#define KBASE_TLSTREAM_TL_NRET_ATOM_LPU( \
kbdev, \
atom, \
lpu \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_nret_atom_lpu( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom, \
lpu \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_RET_AS_CTX - address space is retained by context
*
* @kbdev: Kbase device
* @address_space: Name of the address space object
* @ctx: Name of the context object
*/
#define KBASE_TLSTREAM_TL_RET_AS_CTX( \
kbdev, \
address_space, \
ctx \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_ret_as_ctx( \
__TL_DISPATCH_STREAM(kbdev, obj), \
address_space, \
ctx \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_NRET_AS_CTX - address space is released by context
*
* @kbdev: Kbase device
* @address_space: Name of the address space object
* @ctx: Name of the context object
*/
#define KBASE_TLSTREAM_TL_NRET_AS_CTX( \
kbdev, \
address_space, \
ctx \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_nret_as_ctx( \
__TL_DISPATCH_STREAM(kbdev, obj), \
address_space, \
ctx \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_RET_ATOM_AS - atom is retained by address space
*
* @kbdev: Kbase device
* @atom: Atom identifier
* @address_space: Name of the address space object
*/
#define KBASE_TLSTREAM_TL_RET_ATOM_AS( \
kbdev, \
atom, \
address_space \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_ret_atom_as( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom, \
address_space \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_NRET_ATOM_AS - atom is released by address space
*
* @kbdev: Kbase device
* @atom: Atom identifier
* @address_space: Name of the address space object
*/
#define KBASE_TLSTREAM_TL_NRET_ATOM_AS( \
kbdev, \
atom, \
address_space \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_nret_atom_as( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom, \
address_space \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG - atom job slot attributes
*
* @kbdev: Kbase device
* @atom: Atom identifier
* @descriptor: Job descriptor address
* @affinity: Job affinity
* @config: Job config
*/
#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG( \
kbdev, \
atom, \
descriptor, \
affinity, \
config \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_attrib_atom_config( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom, \
descriptor, \
affinity, \
config \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_JIT_USEDPAGES - used pages for jit
*
* @kbdev: Kbase device
* @used_pages: Number of pages used for jit
* @j_id: Unique ID provided by the caller, this is used to pair allocation and free requests.
*/
#define KBASE_TLSTREAM_TL_JIT_USEDPAGES( \
kbdev, \
used_pages, \
j_id \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_jit_usedpages( \
__TL_DISPATCH_STREAM(kbdev, obj), \
used_pages, \
j_id \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITALLOCINFO - Information about JIT allocations
*
* @kbdev: Kbase device
* @atom: Atom identifier
* @va_pgs: The minimum number of virtual pages required
* @com_pgs: The minimum number of physical pages which should back the allocation.
* @extent: Granularity of physical pages to grow the allocation by during a fault.
* @j_id: Unique ID provided by the caller, this is used to pair allocation and free requests.
* @bin_id: The JIT allocation bin, used in conjunction with max_allocations to limit the number of each type of JIT allocation.
* @max_allocs: Maximum allocations allowed in this bin.
* @jit_flags: Flags specifying the special requirements for the JIT allocation.
* @usg_id: A hint about which allocation should be reused.
*/
#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITALLOCINFO( \
kbdev, \
atom, \
va_pgs, \
com_pgs, \
extent, \
j_id, \
bin_id, \
max_allocs, \
jit_flags, \
usg_id \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_attrib_atom_jitallocinfo( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom, \
va_pgs, \
com_pgs, \
extent, \
j_id, \
bin_id, \
max_allocs, \
jit_flags, \
usg_id \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITFREEINFO - Information about JIT frees
*
* @kbdev: Kbase device
* @atom: Atom identifier
* @j_id: Unique ID provided by the caller, this is used to pair allocation and free requests.
*/
#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITFREEINFO( \
kbdev, \
atom, \
j_id \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_attrib_atom_jitfreeinfo( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom, \
j_id \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG - address space attributes
*
* @kbdev: Kbase device
* @address_space: Name of the address space object
* @transtab: Configuration of the TRANSTAB register
* @memattr: Configuration of the MEMATTR register
* @transcfg: Configuration of the TRANSCFG register (or zero if not present)
*/
#define KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG( \
kbdev, \
address_space, \
transtab, \
memattr, \
transcfg \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_attrib_as_config( \
__TL_DISPATCH_STREAM(kbdev, obj), \
address_space, \
transtab, \
memattr, \
transcfg \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP - softstop event on given lpu
*
* @kbdev: Kbase device
* @lpu: Name of the Logical Processing Unit object
*/
#define KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP( \
kbdev, \
lpu \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_event_lpu_softstop( \
__TL_DISPATCH_STREAM(kbdev, obj), \
lpu \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX - atom softstopped
*
* @kbdev: Kbase device
* @atom: Atom identifier
*/
#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX( \
kbdev, \
atom \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_event_atom_softstop_ex( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE - atom softstop issued
*
* @kbdev: Kbase device
* @atom: Atom identifier
*/
#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE( \
kbdev, \
atom \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_event_atom_softstop_issue( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START - atom soft job has started
*
* @kbdev: Kbase device
* @atom: Atom identifier
*/
#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START( \
kbdev, \
atom \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_event_atom_softjob_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END - atom soft job has completed
*
* @kbdev: Kbase device
* @atom: Atom identifier
*/
#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END( \
kbdev, \
atom \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_event_atom_softjob_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_ARBITER_GRANTED - Arbiter has granted gpu access
*
* @kbdev: Kbase device
* @gpu: Name of the GPU object
*/
#define KBASE_TLSTREAM_TL_ARBITER_GRANTED( \
kbdev, \
gpu \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_arbiter_granted( \
__TL_DISPATCH_STREAM(kbdev, obj), \
gpu \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_ARBITER_STARTED - Driver is running again and able to process jobs
*
* @kbdev: Kbase device
* @gpu: Name of the GPU object
*/
#define KBASE_TLSTREAM_TL_ARBITER_STARTED( \
kbdev, \
gpu \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_arbiter_started( \
__TL_DISPATCH_STREAM(kbdev, obj), \
gpu \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_ARBITER_STOP_REQUESTED - Arbiter has requested driver to stop using gpu
*
* @kbdev: Kbase device
* @gpu: Name of the GPU object
*/
#define KBASE_TLSTREAM_TL_ARBITER_STOP_REQUESTED( \
kbdev, \
gpu \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_arbiter_stop_requested( \
__TL_DISPATCH_STREAM(kbdev, obj), \
gpu \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_ARBITER_STOPPED - Driver has stopped using gpu
*
* @kbdev: Kbase device
* @gpu: Name of the GPU object
*/
#define KBASE_TLSTREAM_TL_ARBITER_STOPPED( \
kbdev, \
gpu \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_arbiter_stopped( \
__TL_DISPATCH_STREAM(kbdev, obj), \
gpu \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_ARBITER_REQUESTED - Driver has requested the arbiter for gpu access
*
* @kbdev: Kbase device
* @gpu: Name of the GPU object
*/
#define KBASE_TLSTREAM_TL_ARBITER_REQUESTED( \
kbdev, \
gpu \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_arbiter_requested( \
__TL_DISPATCH_STREAM(kbdev, obj), \
gpu \
); \
} while (0)
/**
* KBASE_TLSTREAM_JD_GPU_SOFT_RESET - gpu soft reset
*
* @kbdev: Kbase device
* @gpu: Name of the GPU object
*/
#define KBASE_TLSTREAM_JD_GPU_SOFT_RESET( \
kbdev, \
gpu \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_jd_gpu_soft_reset( \
__TL_DISPATCH_STREAM(kbdev, obj), \
gpu \
); \
} while (0)
/**
* KBASE_TLSTREAM_JD_TILER_HEAP_CHUNK_ALLOC - Tiler Heap Chunk Allocation
*
* @kbdev: Kbase device
* @ctx_nr: Kernel context number
* @heap_id: Unique id used to represent a heap under a context
* @chunk_va: Virtual start address of tiler heap chunk
*/
#define KBASE_TLSTREAM_JD_TILER_HEAP_CHUNK_ALLOC( \
kbdev, \
ctx_nr, \
heap_id, \
chunk_va \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_jd_tiler_heap_chunk_alloc( \
__TL_DISPATCH_STREAM(kbdev, obj), \
ctx_nr, \
heap_id, \
chunk_va \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_JS_SCHED_START - Scheduling starts
*
* @kbdev: Kbase device
* @dummy: dummy argument
*/
#define KBASE_TLSTREAM_TL_JS_SCHED_START( \
kbdev, \
dummy \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_js_sched_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
dummy \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_JS_SCHED_END - Scheduling ends
*
* @kbdev: Kbase device
* @dummy: dummy argument
*/
#define KBASE_TLSTREAM_TL_JS_SCHED_END( \
kbdev, \
dummy \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_js_sched_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
dummy \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_JD_SUBMIT_ATOM_START - Submitting an atom starts
*
* @kbdev: Kbase device
* @atom: Atom identifier
*/
#define KBASE_TLSTREAM_TL_JD_SUBMIT_ATOM_START( \
kbdev, \
atom \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_jd_submit_atom_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_JD_SUBMIT_ATOM_END - Submitting an atom ends
*
* @kbdev: Kbase device
* @atom: Atom identifier
*/
#define KBASE_TLSTREAM_TL_JD_SUBMIT_ATOM_END( \
kbdev, \
atom \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_jd_submit_atom_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_JD_DONE_NO_LOCK_START - Within function kbase_jd_done_nolock
*
* @kbdev: Kbase device
* @atom: Atom identifier
*/
#define KBASE_TLSTREAM_TL_JD_DONE_NO_LOCK_START( \
kbdev, \
atom \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_jd_done_no_lock_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_JD_DONE_NO_LOCK_END - Within function kbase_jd_done_nolock - end
*
* @kbdev: Kbase device
* @atom: Atom identifier
*/
#define KBASE_TLSTREAM_TL_JD_DONE_NO_LOCK_END( \
kbdev, \
atom \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_jd_done_no_lock_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_JD_DONE_START - Start of kbase_jd_done
*
* @kbdev: Kbase device
* @atom: Atom identifier
*/
#define KBASE_TLSTREAM_TL_JD_DONE_START( \
kbdev, \
atom \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_jd_done_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_JD_DONE_END - End of kbase_jd_done
*
* @kbdev: Kbase device
* @atom: Atom identifier
*/
#define KBASE_TLSTREAM_TL_JD_DONE_END( \
kbdev, \
atom \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_jd_done_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_JD_ATOM_COMPLETE - Atom marked complete
*
* @kbdev: Kbase device
* @atom: Atom identifier
*/
#define KBASE_TLSTREAM_TL_JD_ATOM_COMPLETE( \
kbdev, \
atom \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_jd_atom_complete( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_RUN_ATOM_START - Running of atom starts
*
* @kbdev: Kbase device
* @atom: Atom identifier
* @atom_nr: Sequential number of an atom
*/
#define KBASE_TLSTREAM_TL_RUN_ATOM_START( \
kbdev, \
atom, \
atom_nr \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_run_atom_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom, \
atom_nr \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_RUN_ATOM_END - Running of atom ends
*
* @kbdev: Kbase device
* @atom: Atom identifier
* @atom_nr: Sequential number of an atom
*/
#define KBASE_TLSTREAM_TL_RUN_ATOM_END( \
kbdev, \
atom, \
atom_nr \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_tl_run_atom_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom, \
atom_nr \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY - atom priority
*
* @kbdev: Kbase device
* @atom: Atom identifier
* @prio: Atom priority
*/
#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY( \
kbdev, \
atom, \
prio \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
__kbase_tlstream_tl_attrib_atom_priority( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom, \
prio \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE - atom state
*
* @kbdev: Kbase device
* @atom: Atom identifier
* @state: Atom state
*/
#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE( \
kbdev, \
atom, \
state \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
__kbase_tlstream_tl_attrib_atom_state( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom, \
state \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED - atom caused priority change
*
* @kbdev: Kbase device
* @atom: Atom identifier
*/
#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED( \
kbdev, \
atom \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
__kbase_tlstream_tl_attrib_atom_prioritized( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT - jit done for atom
*
* @kbdev: Kbase device
* @atom: Atom identifier
* @edit_addr: Address edited by jit
* @new_addr: Address placed into the edited location
* @jit_flags: Flags specifying the special requirements for the JIT allocation.
* @mem_flags: Flags defining the properties of a memory region
* @j_id: Unique ID provided by the caller, this is used to pair allocation and free requests.
* @com_pgs: The minimum number of physical pages which should back the allocation.
* @extent: Granularity of physical pages to grow the allocation by during a fault.
* @va_pgs: The minimum number of virtual pages required
*/
#define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT( \
kbdev, \
atom, \
edit_addr, \
new_addr, \
jit_flags, \
mem_flags, \
j_id, \
com_pgs, \
extent, \
va_pgs \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_JOB_DUMPING_ENABLED) \
__kbase_tlstream_tl_attrib_atom_jit( \
__TL_DISPATCH_STREAM(kbdev, obj), \
atom, \
edit_addr, \
new_addr, \
jit_flags, \
mem_flags, \
j_id, \
com_pgs, \
extent, \
va_pgs \
); \
} while (0)
/**
* KBASE_TLSTREAM_TL_KBASE_NEW_DEVICE - New KBase Device
*
* @kbdev: Kbase device
* @kbase_device_id: The ID of the physical hardware
* @kbase_device_gpu_core_count: The number of gpu cores in the physical hardware
* @kbase_device_max_num_csgs: The max number of CSGs the physical hardware supports
* @kbase_device_as_count: The number of address spaces the physical hardware has available
* @kbase_device_sb_entry_count: The number of entries each scoreboard set in the physical hardware has available
* @kbase_device_has_cross_stream_sync: Whether cross-stream synchronization is supported
* @kbase_device_supports_gpu_sleep: Whether GPU sleep is supported
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_NEW_DEVICE( \
kbdev, \
kbase_device_id, \
kbase_device_gpu_core_count, \
kbase_device_max_num_csgs, \
kbase_device_as_count, \
kbase_device_sb_entry_count, \
kbase_device_has_cross_stream_sync, \
kbase_device_supports_gpu_sleep \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_new_device( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kbase_device_id, \
kbase_device_gpu_core_count, \
kbase_device_max_num_csgs, \
kbase_device_as_count, \
kbase_device_sb_entry_count, \
kbase_device_has_cross_stream_sync, \
kbase_device_supports_gpu_sleep \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_NEW_DEVICE( \
kbdev, \
kbase_device_id, \
kbase_device_gpu_core_count, \
kbase_device_max_num_csgs, \
kbase_device_as_count, \
kbase_device_sb_entry_count, \
kbase_device_has_cross_stream_sync, \
kbase_device_supports_gpu_sleep \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_DEVICE_PROGRAM_CSG - CSG is programmed to a slot
*
* @kbdev: Kbase device
* @kbase_device_id: The ID of the physical hardware
* @kernel_ctx_id: Unique ID for the KBase Context
* @gpu_cmdq_grp_handle: GPU Command Queue Group handle which will match userspace
* @kbase_device_csg_slot_index: The index of the slot in the scheduler being programmed
* @kbase_device_csg_slot_resumed: Whether the csg is being resumed
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_DEVICE_PROGRAM_CSG( \
kbdev, \
kbase_device_id, \
kernel_ctx_id, \
gpu_cmdq_grp_handle, \
kbase_device_csg_slot_index, \
kbase_device_csg_slot_resumed \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_device_program_csg( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kbase_device_id, \
kernel_ctx_id, \
gpu_cmdq_grp_handle, \
kbase_device_csg_slot_index, \
kbase_device_csg_slot_resumed \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_DEVICE_PROGRAM_CSG( \
kbdev, \
kbase_device_id, \
kernel_ctx_id, \
gpu_cmdq_grp_handle, \
kbase_device_csg_slot_index, \
kbase_device_csg_slot_resumed \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_DEVICE_DEPROGRAM_CSG - CSG is deprogrammed from a slot
*
* @kbdev: Kbase device
* @kbase_device_id: The ID of the physical hardware
* @kbase_device_csg_slot_index: The index of the slot in the scheduler being programmed
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_DEVICE_DEPROGRAM_CSG( \
kbdev, \
kbase_device_id, \
kbase_device_csg_slot_index \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_device_deprogram_csg( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kbase_device_id, \
kbase_device_csg_slot_index \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_DEVICE_DEPROGRAM_CSG( \
kbdev, \
kbase_device_id, \
kbase_device_csg_slot_index \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_DEVICE_HALT_CSG - CSG is halted
*
* @kbdev: Kbase device
* @kbase_device_id: The ID of the physical hardware
* @kbase_device_csg_slot_index: The index of the slot in the scheduler being programmed
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_DEVICE_HALT_CSG( \
kbdev, \
kbase_device_id, \
kbase_device_csg_slot_index \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_device_halt_csg( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kbase_device_id, \
kbase_device_csg_slot_index \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_DEVICE_HALT_CSG( \
kbdev, \
kbase_device_id, \
kbase_device_csg_slot_index \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_NEW_CTX - New KBase Context
*
* @kbdev: Kbase device
* @kernel_ctx_id: Unique ID for the KBase Context
* @kbase_device_id: The ID of the physical hardware
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_NEW_CTX( \
kbdev, \
kernel_ctx_id, \
kbase_device_id \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_new_ctx( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kernel_ctx_id, \
kbase_device_id \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_NEW_CTX( \
kbdev, \
kernel_ctx_id, \
kbase_device_id \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_DEL_CTX - Delete KBase Context
*
* @kbdev: Kbase device
* @kernel_ctx_id: Unique ID for the KBase Context
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_DEL_CTX( \
kbdev, \
kernel_ctx_id \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_del_ctx( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kernel_ctx_id \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_DEL_CTX( \
kbdev, \
kernel_ctx_id \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_CTX_ASSIGN_AS - Address Space is assigned to a KBase context
*
* @kbdev: Kbase device
* @kernel_ctx_id: Unique ID for the KBase Context
* @kbase_device_as_index: The index of the device address space being assigned
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_CTX_ASSIGN_AS( \
kbdev, \
kernel_ctx_id, \
kbase_device_as_index \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_ctx_assign_as( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kernel_ctx_id, \
kbase_device_as_index \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_CTX_ASSIGN_AS( \
kbdev, \
kernel_ctx_id, \
kbase_device_as_index \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS - Address Space is unassigned from a KBase context
*
* @kbdev: Kbase device
* @kernel_ctx_id: Unique ID for the KBase Context
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS( \
kbdev, \
kernel_ctx_id \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_ctx_unassign_as( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kernel_ctx_id \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS( \
kbdev, \
kernel_ctx_id \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_NEW_KCPUQUEUE - New KCPU Queue
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @kcpu_queue_id: KCPU queue ID
* @kernel_ctx_id: Unique ID for the KBase Context
* @kcpuq_num_pending_cmds: Number of commands already enqueued in the KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_NEW_KCPUQUEUE( \
kbdev, \
kcpu_queue, \
kcpu_queue_id, \
kernel_ctx_id, \
kcpuq_num_pending_cmds \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_new_kcpuqueue( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
kcpu_queue_id, \
kernel_ctx_id, \
kcpuq_num_pending_cmds \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_NEW_KCPUQUEUE( \
kbdev, \
kcpu_queue, \
kcpu_queue_id, \
kernel_ctx_id, \
kcpuq_num_pending_cmds \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_DEL_KCPUQUEUE - Delete KCPU Queue
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_DEL_KCPUQUEUE( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_del_kcpuqueue( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_DEL_KCPUQUEUE( \
kbdev, \
kcpu_queue \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL - KCPU Queue enqueues Signal on Fence
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @fence: Fence object handle
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL( \
kbdev, \
kcpu_queue, \
fence \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_signal( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
fence \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL( \
kbdev, \
kcpu_queue, \
fence \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT - KCPU Queue enqueues Wait on Fence
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @fence: Fence object handle
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT( \
kbdev, \
kcpu_queue, \
fence \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_wait( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
fence \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT( \
kbdev, \
kcpu_queue, \
fence \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT - KCPU Queue enqueues Wait on Cross Queue Sync Object
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @cqs_obj_gpu_addr: CQS Object GPU pointer
* @cqs_obj_compare_value: Semaphore value that should be exceeded for the WAIT to pass
* @cqs_obj_inherit_error: Flag which indicates if the CQS object error state should be inherited by the queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT( \
kbdev, \
kcpu_queue, \
cqs_obj_gpu_addr, \
cqs_obj_compare_value, \
cqs_obj_inherit_error \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_wait( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
cqs_obj_gpu_addr, \
cqs_obj_compare_value, \
cqs_obj_inherit_error \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT( \
kbdev, \
kcpu_queue, \
cqs_obj_gpu_addr, \
cqs_obj_compare_value, \
cqs_obj_inherit_error \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET - KCPU Queue enqueues Set on Cross Queue Sync Object
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @cqs_obj_gpu_addr: CQS Object GPU pointer
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET( \
kbdev, \
kcpu_queue, \
cqs_obj_gpu_addr \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_set( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
cqs_obj_gpu_addr \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET( \
kbdev, \
kcpu_queue, \
cqs_obj_gpu_addr \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT - KCPU Queue enqueues Map Import
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @map_import_buf_gpu_addr: Map import buffer GPU pointer
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT( \
kbdev, \
kcpu_queue, \
map_import_buf_gpu_addr \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_map_import( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
map_import_buf_gpu_addr \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT( \
kbdev, \
kcpu_queue, \
map_import_buf_gpu_addr \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT - KCPU Queue enqueues Unmap Import
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @map_import_buf_gpu_addr: Map import buffer GPU pointer
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT( \
kbdev, \
kcpu_queue, \
map_import_buf_gpu_addr \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
map_import_buf_gpu_addr \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT( \
kbdev, \
kcpu_queue, \
map_import_buf_gpu_addr \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE - KCPU Queue enqueues Unmap Import ignoring reference count
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @map_import_buf_gpu_addr: Map import buffer GPU pointer
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE( \
kbdev, \
kcpu_queue, \
map_import_buf_gpu_addr \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import_force( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
map_import_buf_gpu_addr \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE( \
kbdev, \
kcpu_queue, \
map_import_buf_gpu_addr \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_ERROR_BARRIER - KCPU Queue enqueues Error Barrier
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_ERROR_BARRIER( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_error_barrier( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_ERROR_BARRIER( \
kbdev, \
kcpu_queue \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_GROUP_SUSPEND - KCPU Queue enqueues Group Suspend
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @group_suspend_buf: Pointer to the suspend buffer structure
* @gpu_cmdq_grp_handle: GPU Command Queue Group handle which will match userspace
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_GROUP_SUSPEND( \
kbdev, \
kcpu_queue, \
group_suspend_buf, \
gpu_cmdq_grp_handle \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_group_suspend( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
group_suspend_buf, \
gpu_cmdq_grp_handle \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_ENQUEUE_GROUP_SUSPEND( \
kbdev, \
kcpu_queue, \
group_suspend_buf, \
gpu_cmdq_grp_handle \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC - Begin array of KCPU Queue enqueues JIT Alloc
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_alloc( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \
kbdev, \
kcpu_queue \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC - Array item of KCPU Queue enqueues JIT Alloc
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @jit_alloc_gpu_alloc_addr_dest: The GPU virtual address to write the JIT allocated GPU virtual address to
* @jit_alloc_va_pages: The minimum number of virtual pages required
* @jit_alloc_commit_pages: The minimum number of physical pages which should back the allocation
* @jit_alloc_extent: Granularity of physical pages to grow the allocation by during a fault
* @jit_alloc_jit_id: Unique ID provided by the caller, this is used to pair allocation and free requests. Zero is not a valid value
* @jit_alloc_bin_id: The JIT allocation bin, used in conjunction with max_allocations to limit the number of each type of JIT allocation
* @jit_alloc_max_allocations: The maximum number of allocations allowed within the bin specified by bin_id. Should be the same for all JIT allocations within the same bin.
* @jit_alloc_flags: Flags specifying the special requirements for the JIT allocation
* @jit_alloc_usage_id: A hint about which allocation should be reused. The kernel should attempt to use a previous allocation with the same usage_id
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \
kbdev, \
kcpu_queue, \
jit_alloc_gpu_alloc_addr_dest, \
jit_alloc_va_pages, \
jit_alloc_commit_pages, \
jit_alloc_extent, \
jit_alloc_jit_id, \
jit_alloc_bin_id, \
jit_alloc_max_allocations, \
jit_alloc_flags, \
jit_alloc_usage_id \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_alloc( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
jit_alloc_gpu_alloc_addr_dest, \
jit_alloc_va_pages, \
jit_alloc_commit_pages, \
jit_alloc_extent, \
jit_alloc_jit_id, \
jit_alloc_bin_id, \
jit_alloc_max_allocations, \
jit_alloc_flags, \
jit_alloc_usage_id \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \
kbdev, \
kcpu_queue, \
jit_alloc_gpu_alloc_addr_dest, \
jit_alloc_va_pages, \
jit_alloc_commit_pages, \
jit_alloc_extent, \
jit_alloc_jit_id, \
jit_alloc_bin_id, \
jit_alloc_max_allocations, \
jit_alloc_flags, \
jit_alloc_usage_id \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC - End array of KCPU Queue enqueues JIT Alloc
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_alloc( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \
kbdev, \
kcpu_queue \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE - Begin array of KCPU Queue enqueues JIT Free
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_free( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE( \
kbdev, \
kcpu_queue \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE - Array item of KCPU Queue enqueues JIT Free
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @jit_alloc_jit_id: Unique ID provided by the caller, this is used to pair allocation and free requests. Zero is not a valid value
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE( \
kbdev, \
kcpu_queue, \
jit_alloc_jit_id \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_free( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
jit_alloc_jit_id \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE( \
kbdev, \
kcpu_queue, \
jit_alloc_jit_id \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE - End array of KCPU Queue enqueues JIT Free
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_free( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE( \
kbdev, \
kcpu_queue \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START - KCPU Queue starts a Signal on Fence
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START( \
kbdev, \
kcpu_queue \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END - KCPU Queue ends a Signal on Fence
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END( \
kbdev, \
kcpu_queue, \
execute_error \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
execute_error \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END( \
kbdev, \
kcpu_queue, \
execute_error \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START - KCPU Queue starts a Wait on Fence
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START( \
kbdev, \
kcpu_queue \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END - KCPU Queue ends a Wait on Fence
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END( \
kbdev, \
kcpu_queue, \
execute_error \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
execute_error \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END( \
kbdev, \
kcpu_queue, \
execute_error \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START - KCPU Queue starts a Wait on an array of Cross Queue Sync Objects
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START( \
kbdev, \
kcpu_queue \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END - KCPU Queue ends a Wait on an array of Cross Queue Sync Objects
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END( \
kbdev, \
kcpu_queue, \
execute_error \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
execute_error \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END( \
kbdev, \
kcpu_queue, \
execute_error \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET - KCPU Queue executes a Set on an array of Cross Queue Sync Objects
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET( \
kbdev, \
kcpu_queue, \
execute_error \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
execute_error \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET( \
kbdev, \
kcpu_queue, \
execute_error \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START - KCPU Queue starts a Map Import
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START( \
kbdev, \
kcpu_queue \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END - KCPU Queue ends a Map Import
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END( \
kbdev, \
kcpu_queue, \
execute_error \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
execute_error \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END( \
kbdev, \
kcpu_queue, \
execute_error \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START - KCPU Queue starts an Unmap Import
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START( \
kbdev, \
kcpu_queue \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END - KCPU Queue ends an Unmap Import
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END( \
kbdev, \
kcpu_queue, \
execute_error \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
execute_error \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END( \
kbdev, \
kcpu_queue, \
execute_error \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START - KCPU Queue starts an Unmap Import ignoring reference count
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START( \
kbdev, \
kcpu_queue \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END - KCPU Queue ends an Unmap Import ignoring reference count
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END( \
kbdev, \
kcpu_queue, \
execute_error \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
execute_error \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END( \
kbdev, \
kcpu_queue, \
execute_error \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START - KCPU Queue starts an array of JIT Allocs
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_alloc_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START( \
kbdev, \
kcpu_queue \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END - Begin array of KCPU Queue ends an array of JIT Allocs
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_alloc_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \
kbdev, \
kcpu_queue \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END - Array item of KCPU Queue ends an array of JIT Allocs
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero
* @jit_alloc_gpu_alloc_addr: The JIT allocated GPU virtual address
* @jit_alloc_mmu_flags: The MMU flags for the JIT allocation
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \
kbdev, \
kcpu_queue, \
execute_error, \
jit_alloc_gpu_alloc_addr, \
jit_alloc_mmu_flags \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_alloc_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
execute_error, \
jit_alloc_gpu_alloc_addr, \
jit_alloc_mmu_flags \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \
kbdev, \
kcpu_queue, \
execute_error, \
jit_alloc_gpu_alloc_addr, \
jit_alloc_mmu_flags \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END - End array of KCPU Queue ends an array of JIT Allocs
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_alloc_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \
kbdev, \
kcpu_queue \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START - KCPU Queue starts an array of JIT Frees
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_free_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START( \
kbdev, \
kcpu_queue \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END - Begin array of KCPU Queue ends an array of JIT Frees
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_free_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END( \
kbdev, \
kcpu_queue \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END - Array item of KCPU Queue ends an array of JIT Frees
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero
* @jit_free_pages_used: The actual number of pages used by the JIT allocation
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END( \
kbdev, \
kcpu_queue, \
execute_error, \
jit_free_pages_used \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_free_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
execute_error, \
jit_free_pages_used \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END( \
kbdev, \
kcpu_queue, \
execute_error, \
jit_free_pages_used \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END - End array of KCPU Queue ends an array of JIT Frees
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_free_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END( \
kbdev, \
kcpu_queue \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_ERROR_BARRIER - KCPU Queue executes an Error Barrier
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_ERROR_BARRIER( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_error_barrier( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_ERROR_BARRIER( \
kbdev, \
kcpu_queue \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_START - KCPU Queue starts a group suspend
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_START( \
kbdev, \
kcpu_queue \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_group_suspend_start( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_START( \
kbdev, \
kcpu_queue \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_END - KCPU Queue ends a group suspend
*
* @kbdev: Kbase device
* @kcpu_queue: KCPU queue
* @execute_error: Non-zero error code if KCPU Queue item completed with error, else zero
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_END( \
kbdev, \
kcpu_queue, \
execute_error \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_kcpuqueue_execute_group_suspend_end( \
__TL_DISPATCH_STREAM(kbdev, obj), \
kcpu_queue, \
execute_error \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_END( \
kbdev, \
kcpu_queue, \
execute_error \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_RELOADING - CSF FW is being reloaded
*
* @kbdev: Kbase device
* @csffw_cycle: Cycle number of a CSFFW event
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_RELOADING( \
kbdev, \
csffw_cycle \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_csffw_fw_reloading( \
__TL_DISPATCH_STREAM(kbdev, obj), \
csffw_cycle \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_RELOADING( \
kbdev, \
csffw_cycle \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_ENABLING - CSF FW is being enabled
*
* @kbdev: Kbase device
* @csffw_cycle: Cycle number of a CSFFW event
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_ENABLING( \
kbdev, \
csffw_cycle \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_csffw_fw_enabling( \
__TL_DISPATCH_STREAM(kbdev, obj), \
csffw_cycle \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_ENABLING( \
kbdev, \
csffw_cycle \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_SLEEP - CSF FW sleep is requested
*
* @kbdev: Kbase device
* @csffw_cycle: Cycle number of a CSFFW event
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_SLEEP( \
kbdev, \
csffw_cycle \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_csffw_fw_request_sleep( \
__TL_DISPATCH_STREAM(kbdev, obj), \
csffw_cycle \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_SLEEP( \
kbdev, \
csffw_cycle \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_WAKEUP - CSF FW wake up is requested
*
* @kbdev: Kbase device
* @csffw_cycle: Cycle number of a CSFFW event
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_WAKEUP( \
kbdev, \
csffw_cycle \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_csffw_fw_request_wakeup( \
__TL_DISPATCH_STREAM(kbdev, obj), \
csffw_cycle \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_WAKEUP( \
kbdev, \
csffw_cycle \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_HALT - CSF FW halt is requested
*
* @kbdev: Kbase device
* @csffw_cycle: Cycle number of a CSFFW event
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_HALT( \
kbdev, \
csffw_cycle \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_csffw_fw_request_halt( \
__TL_DISPATCH_STREAM(kbdev, obj), \
csffw_cycle \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_HALT( \
kbdev, \
csffw_cycle \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_DISABLING - CSF FW is being disabled
*
* @kbdev: Kbase device
* @csffw_cycle: Cycle number of a CSFFW event
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_DISABLING( \
kbdev, \
csffw_cycle \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_csffw_fw_disabling( \
__TL_DISPATCH_STREAM(kbdev, obj), \
csffw_cycle \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_DISABLING( \
kbdev, \
csffw_cycle \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_OFF - CSF FW is off
*
* @kbdev: Kbase device
* @csffw_cycle: Cycle number of a CSFFW event
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_OFF( \
kbdev, \
csffw_cycle \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_csffw_fw_off( \
__TL_DISPATCH_STREAM(kbdev, obj), \
csffw_cycle \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_OFF( \
kbdev, \
csffw_cycle \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_TL_KBASE_CSFFW_TLSTREAM_OVERFLOW - An overflow has happened with the CSFFW Timeline stream
*
* @kbdev: Kbase device
* @csffw_timestamp: Timestamp of a CSFFW event
* @csffw_cycle: Cycle number of a CSFFW event
*/
#if MALI_USE_CSF
#define KBASE_TLSTREAM_TL_KBASE_CSFFW_TLSTREAM_OVERFLOW( \
kbdev, \
csffw_timestamp, \
csffw_cycle \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS) \
__kbase_tlstream_tl_kbase_csffw_tlstream_overflow( \
__TL_DISPATCH_STREAM(kbdev, obj), \
csffw_timestamp, \
csffw_cycle \
); \
} while (0)
#else
#define KBASE_TLSTREAM_TL_KBASE_CSFFW_TLSTREAM_OVERFLOW( \
kbdev, \
csffw_timestamp, \
csffw_cycle \
) \
do { } while (0)
#endif /* MALI_USE_CSF */
/**
* KBASE_TLSTREAM_AUX_PM_STATE - PM state
*
* @kbdev: Kbase device
* @core_type: Core type (shader, tiler, l2 cache, l3 cache)
* @core_state_bitset: 64bits bitmask reporting power state of the cores (1-ON, 0-OFF)
*/
#define KBASE_TLSTREAM_AUX_PM_STATE( \
kbdev, \
core_type, \
core_state_bitset \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_pm_state( \
__TL_DISPATCH_STREAM(kbdev, aux), \
core_type, \
core_state_bitset \
); \
} while (0)
/**
* KBASE_TLSTREAM_AUX_PAGEFAULT - Page fault
*
* @kbdev: Kbase device
* @ctx_nr: Kernel context number
* @as_nr: Address space number
* @page_cnt_change: Number of pages to be added
*/
#define KBASE_TLSTREAM_AUX_PAGEFAULT( \
kbdev, \
ctx_nr, \
as_nr, \
page_cnt_change \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_pagefault( \
__TL_DISPATCH_STREAM(kbdev, aux), \
ctx_nr, \
as_nr, \
page_cnt_change \
); \
} while (0)
/**
* KBASE_TLSTREAM_AUX_PAGESALLOC - Total alloc pages change
*
* @kbdev: Kbase device
* @ctx_nr: Kernel context number
* @page_cnt: Number of pages used by the context
*/
#define KBASE_TLSTREAM_AUX_PAGESALLOC( \
kbdev, \
ctx_nr, \
page_cnt \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_pagesalloc( \
__TL_DISPATCH_STREAM(kbdev, aux), \
ctx_nr, \
page_cnt \
); \
} while (0)
/**
* KBASE_TLSTREAM_AUX_DEVFREQ_TARGET - New device frequency target
*
* @kbdev: Kbase device
* @target_freq: New target frequency
*/
#define KBASE_TLSTREAM_AUX_DEVFREQ_TARGET( \
kbdev, \
target_freq \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_devfreq_target( \
__TL_DISPATCH_STREAM(kbdev, aux), \
target_freq \
); \
} while (0)
/**
* KBASE_TLSTREAM_AUX_JIT_STATS - per-bin JIT statistics
*
* @kbdev: Kbase device
* @ctx_nr: Kernel context number
* @bid: JIT bin id
* @max_allocs: Maximum allocations allowed in this bin.
* @allocs: Number of active allocations in this bin
* @va_pages: Number of virtual pages allocated in this bin
* @ph_pages: Number of physical pages allocated in this bin
*/
#define KBASE_TLSTREAM_AUX_JIT_STATS( \
kbdev, \
ctx_nr, \
bid, \
max_allocs, \
allocs, \
va_pages, \
ph_pages \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_jit_stats( \
__TL_DISPATCH_STREAM(kbdev, aux), \
ctx_nr, \
bid, \
max_allocs, \
allocs, \
va_pages, \
ph_pages \
); \
} while (0)
/**
* KBASE_TLSTREAM_AUX_TILER_HEAP_STATS - Tiler Heap statistics
*
* @kbdev: Kbase device
* @ctx_nr: Kernel context number
* @heap_id: Unique id used to represent a heap under a context
* @va_pages: Number of virtual pages allocated in this bin
* @ph_pages: Number of physical pages allocated in this bin
* @max_chunks: The maximum number of chunks that the heap should be allowed to use
* @chunk_size: Size of each chunk in tiler heap, in bytes
* @chunk_count: The number of chunks currently allocated in the tiler heap
* @target_in_flight: Number of render-passes that the driver should attempt to keep in flight for which allocation of new chunks is allowed
* @nr_in_flight: Number of render-passes that are in flight
*/
#define KBASE_TLSTREAM_AUX_TILER_HEAP_STATS( \
kbdev, \
ctx_nr, \
heap_id, \
va_pages, \
ph_pages, \
max_chunks, \
chunk_size, \
chunk_count, \
target_in_flight, \
nr_in_flight \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_tiler_heap_stats( \
__TL_DISPATCH_STREAM(kbdev, aux), \
ctx_nr, \
heap_id, \
va_pages, \
ph_pages, \
max_chunks, \
chunk_size, \
chunk_count, \
target_in_flight, \
nr_in_flight \
); \
} while (0)
/**
* KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT - event on a given job slot
*
* @kbdev: Kbase device
* @ctx: Name of the context object
* @slot_nr: Job slot number
* @atom_nr: Sequential number of an atom
* @event: Event type. One of TL_JS_EVENT values
*/
#define KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT( \
kbdev, \
ctx, \
slot_nr, \
atom_nr, \
event \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_event_job_slot( \
__TL_DISPATCH_STREAM(kbdev, aux), \
ctx, \
slot_nr, \
atom_nr, \
event \
); \
} while (0)
/**
* KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START - enter protected mode start
*
* @kbdev: Kbase device
* @gpu: Name of the GPU object
*/
#define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START( \
kbdev, \
gpu \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_protected_enter_start( \
__TL_DISPATCH_STREAM(kbdev, aux), \
gpu \
); \
} while (0)
/**
* KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END - enter protected mode end
*
* @kbdev: Kbase device
* @gpu: Name of the GPU object
*/
#define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END( \
kbdev, \
gpu \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_protected_enter_end( \
__TL_DISPATCH_STREAM(kbdev, aux), \
gpu \
); \
} while (0)
/**
* KBASE_TLSTREAM_AUX_MMU_COMMAND - mmu commands with synchronicity info
*
* @kbdev: Kbase device
* @kernel_ctx_id: Unique ID for the KBase Context
* @mmu_cmd_id: MMU Command ID (e.g AS_COMMAND_UPDATE)
* @mmu_synchronicity: Indicates whether the command is related to current running job that needs to be resolved to make it progress (synchronous, e.g. grow on page fault, JIT) or not (asynchronous, e.g. IOCTL calls from user-space). This param will be 0 if it is an asynchronous operation.
* @mmu_lock_addr: start address of regions to be locked/unlocked/invalidated
* @mmu_lock_page_num: number of pages to be locked/unlocked/invalidated
*/
#define KBASE_TLSTREAM_AUX_MMU_COMMAND( \
kbdev, \
kernel_ctx_id, \
mmu_cmd_id, \
mmu_synchronicity, \
mmu_lock_addr, \
mmu_lock_page_num \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_mmu_command( \
__TL_DISPATCH_STREAM(kbdev, aux), \
kernel_ctx_id, \
mmu_cmd_id, \
mmu_synchronicity, \
mmu_lock_addr, \
mmu_lock_page_num \
); \
} while (0)
/**
* KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START - leave protected mode start
*
* @kbdev: Kbase device
* @gpu: Name of the GPU object
*/
#define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START( \
kbdev, \
gpu \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
__kbase_tlstream_aux_protected_leave_start( \
__TL_DISPATCH_STREAM(kbdev, aux), \
gpu \
); \
} while (0)
/**
* KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END - leave protected mode end
*
* @kbdev: Kbase device
* @gpu: Name of the GPU object
*/
#define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END( \
kbdev, \
gpu \
) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \
__kbase_tlstream_aux_protected_leave_end( \
__TL_DISPATCH_STREAM(kbdev, aux), \
gpu \
); \
} while (0)
/* Gator tracepoints are hooked into TLSTREAM interface.
* When the following tracepoints are called, corresponding
* Gator tracepoint will be called as well.
*/
#if defined(CONFIG_MALI_GATOR_SUPPORT)
/* `event` is one of TL_JS_EVENT values here.
* The values of TL_JS_EVENT are guaranteed to match
* with corresponding GATOR_JOB_SLOT values.
*/
#undef KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT
#define KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT(kbdev, \
context, slot_nr, atom_nr, event) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
kbase_trace_mali_job_slots_event(kbdev->id, \
GATOR_MAKE_EVENT(event, slot_nr), \
context, (u8) atom_nr); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_event_job_slot( \
__TL_DISPATCH_STREAM(kbdev, aux), \
context, slot_nr, atom_nr, event); \
} while (0)
#undef KBASE_TLSTREAM_AUX_PM_STATE
#define KBASE_TLSTREAM_AUX_PM_STATE(kbdev, core_type, state) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
kbase_trace_mali_pm_status(kbdev->id, \
core_type, state); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_pm_state( \
__TL_DISPATCH_STREAM(kbdev, aux), \
core_type, state); \
} while (0)
#undef KBASE_TLSTREAM_AUX_PAGEFAULT
#define KBASE_TLSTREAM_AUX_PAGEFAULT(kbdev, \
ctx_nr, as_nr, page_cnt_change) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
kbase_trace_mali_page_fault_insert_pages(kbdev->id, \
as_nr, \
page_cnt_change); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_pagefault( \
__TL_DISPATCH_STREAM(kbdev, aux), \
ctx_nr, as_nr, page_cnt_change); \
} while (0)
/* kbase_trace_mali_total_alloc_pages_change is handled differently here.
* We stream the total amount of pages allocated for `kbdev` rather
* than `page_count`, which is per-context.
*/
#undef KBASE_TLSTREAM_AUX_PAGESALLOC
#define KBASE_TLSTREAM_AUX_PAGESALLOC(kbdev, ctx_nr, page_cnt) \
do { \
int enabled = atomic_read(&kbdev->timeline_flags); \
u32 global_pages_count = \
atomic_read(&kbdev->memdev.used_pages); \
\
kbase_trace_mali_total_alloc_pages_change(kbdev->id, \
global_pages_count); \
if (enabled & TLSTREAM_ENABLED) \
__kbase_tlstream_aux_pagesalloc( \
__TL_DISPATCH_STREAM(kbdev, aux), \
ctx_nr, page_cnt); \
} while (0)
#endif /* CONFIG_MALI_GATOR_SUPPORT */
/* clang-format on */
#endif