mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-01-07 21:40:20 +01:00
venus: update tracepoints to align with later optimizations
We can remove redundant TPs no longer needed while updating and adding some trivial ones helpful for next things. Signed-off-by: Yiwei Zhang <zzyiwei@chromium.org> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/27450>
This commit is contained in:
parent
a81189c796
commit
d32d010c24
7 changed files with 5 additions and 30 deletions
|
|
@ -358,7 +358,6 @@ vn_CreateBuffer(VkDevice device,
|
|||
const VkAllocationCallbacks *pAllocator,
|
||||
VkBuffer *pBuffer)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
|
@ -403,7 +402,6 @@ vn_DestroyBuffer(VkDevice device,
|
|||
VkBuffer buffer,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_buffer *buf = vn_buffer_from_handle(buffer);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
|
|
|
|||
|
|
@ -619,7 +619,6 @@ vn_AllocateDescriptorSets(VkDevice device,
|
|||
const VkDescriptorSetAllocateInfo *pAllocateInfo,
|
||||
VkDescriptorSet *pDescriptorSets)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_descriptor_pool *pool =
|
||||
vn_descriptor_pool_from_handle(pAllocateInfo->descriptorPool);
|
||||
|
|
@ -738,7 +737,6 @@ vn_FreeDescriptorSets(VkDevice device,
|
|||
uint32_t descriptorSetCount,
|
||||
const VkDescriptorSet *pDescriptorSets)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_descriptor_pool *pool =
|
||||
vn_descriptor_pool_from_handle(descriptorPool);
|
||||
|
|
@ -993,7 +991,6 @@ vn_UpdateDescriptorSets(VkDevice device,
|
|||
uint32_t descriptorCopyCount,
|
||||
const VkCopyDescriptorSet *pDescriptorCopies)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
|
||||
|
||||
|
|
@ -1297,7 +1294,6 @@ vn_UpdateDescriptorSetWithTemplate(
|
|||
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
|
||||
const void *pData)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_descriptor_update_template *templ =
|
||||
vn_descriptor_update_template_from_handle(descriptorUpdateTemplate);
|
||||
|
|
|
|||
|
|
@ -553,7 +553,6 @@ vn_AllocateMemory(VkDevice device,
|
|||
const VkAllocationCallbacks *pAllocator,
|
||||
VkDeviceMemory *pMemory)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
|
||||
/* see vn_physical_device_init_memory_properties */
|
||||
|
|
@ -616,7 +615,6 @@ vn_FreeMemory(VkDevice device,
|
|||
VkDeviceMemory memory,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_device_memory *mem = vn_device_memory_from_handle(memory);
|
||||
if (!mem)
|
||||
|
|
@ -711,7 +709,6 @@ vn_MapMemory(VkDevice device,
|
|||
void
|
||||
vn_UnmapMemory(VkDevice device, VkDeviceMemory memory)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
}
|
||||
|
||||
VkResult
|
||||
|
|
@ -719,7 +716,6 @@ vn_FlushMappedMemoryRanges(VkDevice device,
|
|||
uint32_t memoryRangeCount,
|
||||
const VkMappedMemoryRange *pMemoryRanges)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
|
||||
for (uint32_t i = 0; i < memoryRangeCount; i++) {
|
||||
|
|
@ -742,7 +738,6 @@ vn_InvalidateMappedMemoryRanges(VkDevice device,
|
|||
uint32_t memoryRangeCount,
|
||||
const VkMappedMemoryRange *pMemoryRanges)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
|
||||
for (uint32_t i = 0; i < memoryRangeCount; i++) {
|
||||
|
|
|
|||
|
|
@ -198,8 +198,6 @@ vn_feedback_pool_alloc_locked(struct vn_feedback_pool *pool,
|
|||
uint32_t size,
|
||||
uint32_t *out_offset)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
|
||||
/* Default values of pool->used and pool->alignment are used to trigger the
|
||||
* initial pool grow, and will be properly initialized after that.
|
||||
*/
|
||||
|
|
@ -222,9 +220,6 @@ struct vn_feedback_slot *
|
|||
vn_feedback_pool_alloc(struct vn_feedback_pool *pool,
|
||||
enum vn_feedback_type type)
|
||||
{
|
||||
/* TODO Make slot size variable for VkQueryPool feedback. Currently it's
|
||||
* MAX2(sizeof(VkResult), sizeof(uint64_t)).
|
||||
*/
|
||||
static const uint32_t slot_size = 8;
|
||||
struct vn_feedback_buffer *feedback_buf;
|
||||
uint32_t offset;
|
||||
|
|
|
|||
|
|
@ -561,7 +561,6 @@ vn_CreateImage(VkDevice device,
|
|||
const VkAllocationCallbacks *pAllocator,
|
||||
VkImage *pImage)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
|
@ -648,7 +647,6 @@ vn_DestroyImage(VkDevice device,
|
|||
VkImage image,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_image *img = vn_image_from_handle(image);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
|
|
|
|||
|
|
@ -390,7 +390,6 @@ vn_CreatePipelineCache(VkDevice device,
|
|||
const VkAllocationCallbacks *pAllocator,
|
||||
VkPipelineCache *pPipelineCache)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
|
@ -429,7 +428,6 @@ vn_DestroyPipelineCache(VkDevice device,
|
|||
VkPipelineCache pipelineCache,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_pipeline_cache *cache =
|
||||
vn_pipeline_cache_from_handle(pipelineCache);
|
||||
|
|
@ -477,7 +475,6 @@ vn_GetPipelineCacheData(VkDevice device,
|
|||
size_t *pDataSize,
|
||||
void *pData)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_physical_device *physical_dev = dev->physical_device;
|
||||
struct vn_ring *target_ring = vn_get_target_ring(dev);
|
||||
|
|
@ -525,7 +522,6 @@ vn_MergePipelineCaches(VkDevice device,
|
|||
uint32_t srcCacheCount,
|
||||
const VkPipelineCache *pSrcCaches)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
|
||||
vn_async_vkMergePipelineCaches(dev->primary_ring, device, dstCache,
|
||||
|
|
@ -1454,7 +1450,7 @@ vn_fix_graphics_pipeline_create_infos(
|
|||
}
|
||||
|
||||
/* tell whether fixes are applied in tracing */
|
||||
VN_TRACE_SCOPE("apply_fixes");
|
||||
VN_TRACE_SCOPE("sanitize pipeline");
|
||||
|
||||
struct vn_graphics_pipeline_fix_tmp *fix_tmp =
|
||||
vn_graphics_pipeline_fix_tmp_alloc(alloc, info_count, pnext_mask);
|
||||
|
|
@ -1511,7 +1507,6 @@ vn_CreateGraphicsPipelines(VkDevice device,
|
|||
const VkAllocationCallbacks *pAllocator,
|
||||
VkPipeline *pPipelines)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
|
@ -1568,6 +1563,10 @@ vn_CreateGraphicsPipelines(VkDevice device,
|
|||
}
|
||||
|
||||
if (want_sync || target_ring != dev->primary_ring) {
|
||||
if (target_ring == dev->primary_ring) {
|
||||
VN_TRACE_SCOPE("want sync");
|
||||
}
|
||||
|
||||
result = vn_call_vkCreateGraphicsPipelines(
|
||||
target_ring, device, pipelineCache, createInfoCount, pCreateInfos,
|
||||
NULL, pPipelines);
|
||||
|
|
@ -1593,7 +1592,6 @@ vn_CreateComputePipelines(VkDevice device,
|
|||
const VkAllocationCallbacks *pAllocator,
|
||||
VkPipeline *pPipelines)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
|
@ -1648,7 +1646,6 @@ vn_DestroyPipeline(VkDevice device,
|
|||
VkPipeline _pipeline,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_pipeline *pipeline = vn_pipeline_from_handle(_pipeline);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
|
|
|
|||
|
|
@ -24,7 +24,6 @@ vn_CreateQueryPool(VkDevice device,
|
|||
const VkAllocationCallbacks *pAllocator,
|
||||
VkQueryPool *pQueryPool)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
|
@ -136,7 +135,6 @@ vn_DestroyQueryPool(VkDevice device,
|
|||
VkQueryPool queryPool,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_query_pool *pool = vn_query_pool_from_handle(queryPool);
|
||||
const VkAllocationCallbacks *alloc;
|
||||
|
|
@ -161,7 +159,6 @@ vn_ResetQueryPool(VkDevice device,
|
|||
uint32_t firstQuery,
|
||||
uint32_t queryCount)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_query_pool *pool = vn_query_pool_from_handle(queryPool);
|
||||
|
||||
|
|
@ -292,7 +289,6 @@ vn_GetQueryPoolResults(VkDevice device,
|
|||
VkDeviceSize stride,
|
||||
VkQueryResultFlags flags)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_query_pool *pool = vn_query_pool_from_handle(queryPool);
|
||||
const VkAllocationCallbacks *alloc = &pool->allocator;
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue