gallium/radeon/winsyses: boolean -> bool, TRUE -> true, FALSE -> false

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Vedran Miletić <vedran@miletic.net>
Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
This commit is contained in:
Marek Olšák 2016-06-21 21:29:39 +02:00
parent d5383a7d31
commit 1c5a10497a
9 changed files with 134 additions and 132 deletions

View file

@ -254,10 +254,10 @@ struct radeon_info {
uint64_t gart_size;
uint64_t vram_size;
bool has_dedicated_vram;
boolean has_virtual_memory;
bool has_virtual_memory;
bool gfx_ib_pad_with_type2;
boolean has_sdma;
boolean has_uvd;
bool has_sdma;
bool has_uvd;
uint32_t vce_fw_version;
uint32_t vce_harvest_config;
uint32_t clock_crystal_freq;
@ -266,7 +266,7 @@ struct radeon_info {
uint32_t drm_major; /* version */
uint32_t drm_minor;
uint32_t drm_patchlevel;
boolean has_userptr;
bool has_userptr;
/* Shader cores. */
uint32_t r600_max_quad_pipes; /* wave size / 16 */
@ -279,7 +279,7 @@ struct radeon_info {
uint32_t r300_num_gb_pipes;
uint32_t r300_num_z_pipes;
uint32_t r600_gb_backend_map; /* R600 harvest config */
boolean r600_gb_backend_map_valid;
bool r600_gb_backend_map_valid;
uint32_t r600_num_banks;
uint32_t num_render_backends;
uint32_t num_tile_pipes; /* pipe count from PIPE_CONFIG */
@ -554,12 +554,12 @@ struct radeon_winsys {
* \param buf A winsys buffer object to get the handle from.
* \param whandle A winsys handle pointer.
* \param stride A stride of the buffer in bytes, for texturing.
* \return TRUE on success.
* \return true on success.
*/
boolean (*buffer_get_handle)(struct pb_buffer *buf,
unsigned stride, unsigned offset,
unsigned slice_size,
struct winsys_handle *whandle);
bool (*buffer_get_handle)(struct pb_buffer *buf,
unsigned stride, unsigned offset,
unsigned slice_size,
struct winsys_handle *whandle);
/**
* Return the virtual address of a buffer.
@ -676,14 +676,14 @@ struct radeon_winsys {
struct pb_buffer *buf);
/**
* Return TRUE if there is enough memory in VRAM and GTT for the buffers
* Return true if there is enough memory in VRAM and GTT for the buffers
* added so far. If the validation fails, all buffers which have
* been added since the last call of cs_validate will be removed and
* the CS will be flushed (provided there are still any buffers).
*
* \param cs A command stream to validate.
*/
boolean (*cs_validate)(struct radeon_winsys_cs *cs);
bool (*cs_validate)(struct radeon_winsys_cs *cs);
/**
* Check whether the given number of dwords is available in the IB.
@ -695,14 +695,15 @@ struct radeon_winsys {
bool (*cs_check_space)(struct radeon_winsys_cs *cs, unsigned dw);
/**
* Return TRUE if there is enough memory in VRAM and GTT for the buffers
* Return true if there is enough memory in VRAM and GTT for the buffers
* added so far.
*
* \param cs A command stream to validate.
* \param vram VRAM memory size pending to be use
* \param gtt GTT memory size pending to be use
*/
boolean (*cs_memory_below_limit)(struct radeon_winsys_cs *cs, uint64_t vram, uint64_t gtt);
bool (*cs_memory_below_limit)(struct radeon_winsys_cs *cs,
uint64_t vram, uint64_t gtt);
uint64_t (*cs_query_memory_usage)(struct radeon_winsys_cs *cs);
@ -729,14 +730,14 @@ struct radeon_winsys {
struct pipe_fence_handle **fence);
/**
* Return TRUE if a buffer is referenced by a command stream.
* Return true if a buffer is referenced by a command stream.
*
* \param cs A command stream.
* \param buf A winsys buffer.
*/
boolean (*cs_is_buffer_referenced)(struct radeon_winsys_cs *cs,
struct pb_buffer *buf,
enum radeon_bo_usage usage);
bool (*cs_is_buffer_referenced)(struct radeon_winsys_cs *cs,
struct pb_buffer *buf,
enum radeon_bo_usage usage);
/**
* Request access to a feature for a command stream.
@ -745,9 +746,9 @@ struct radeon_winsys {
* \param fid Feature ID, one of RADEON_FID_*
* \param enable Whether to enable or disable the feature.
*/
boolean (*cs_request_feature)(struct radeon_winsys_cs *cs,
enum radeon_feature_id fid,
boolean enable);
bool (*cs_request_feature)(struct radeon_winsys_cs *cs,
enum radeon_feature_id fid,
bool enable);
/**
* Make sure all asynchronous flush of the cs have completed
*

View file

@ -619,10 +619,10 @@ error:
return NULL;
}
static boolean amdgpu_bo_get_handle(struct pb_buffer *buffer,
unsigned stride, unsigned offset,
unsigned slice_size,
struct winsys_handle *whandle)
static bool amdgpu_bo_get_handle(struct pb_buffer *buffer,
unsigned stride, unsigned offset,
unsigned slice_size,
struct winsys_handle *whandle)
{
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
enum amdgpu_bo_handle_type type;
@ -641,18 +641,18 @@ static boolean amdgpu_bo_get_handle(struct pb_buffer *buffer,
type = amdgpu_bo_handle_type_kms;
break;
default:
return FALSE;
return false;
}
r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
if (r)
return FALSE;
return false;
whandle->stride = stride;
whandle->offset = offset;
whandle->offset += slice_size * whandle->layer;
bo->is_shared = true;
return TRUE;
return true;
}
static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,

View file

@ -118,7 +118,7 @@ bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
&expired);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
return FALSE;
return false;
}
if (expired) {
@ -497,8 +497,8 @@ static void amdgpu_ib_finalize(struct amdgpu_ib *ib)
ib->max_ib_size = MAX2(ib->max_ib_size, ib->base.prev_dw + ib->base.current.cdw);
}
static boolean amdgpu_init_cs_context(struct amdgpu_cs_context *cs,
enum ring_type ring_type)
static bool amdgpu_init_cs_context(struct amdgpu_cs_context *cs,
enum ring_type ring_type)
{
int i;
@ -529,20 +529,20 @@ static boolean amdgpu_init_cs_context(struct amdgpu_cs_context *cs,
cs->buffers = (struct amdgpu_cs_buffer*)
CALLOC(1, cs->max_num_buffers * sizeof(struct amdgpu_cs_buffer));
if (!cs->buffers) {
return FALSE;
return false;
}
cs->handles = CALLOC(1, cs->max_num_buffers * sizeof(amdgpu_bo_handle));
if (!cs->handles) {
FREE(cs->buffers);
return FALSE;
return false;
}
cs->flags = CALLOC(1, cs->max_num_buffers);
if (!cs->flags) {
FREE(cs->handles);
FREE(cs->buffers);
return FALSE;
return false;
}
for (i = 0; i < ARRAY_SIZE(cs->buffer_indices_hashlist); i++) {
@ -556,7 +556,7 @@ static boolean amdgpu_init_cs_context(struct amdgpu_cs_context *cs,
cs->ib[IB_CONST_PREAMBLE].flags = AMDGPU_IB_FLAG_CE |
AMDGPU_IB_FLAG_PREAMBLE;
return TRUE;
return true;
}
static void amdgpu_cs_context_cleanup(struct amdgpu_cs_context *cs)
@ -698,9 +698,9 @@ static int amdgpu_cs_lookup_buffer(struct radeon_winsys_cs *rcs,
return amdgpu_lookup_buffer(cs->csc, (struct amdgpu_winsys_bo*)buf);
}
static boolean amdgpu_cs_validate(struct radeon_winsys_cs *rcs)
static bool amdgpu_cs_validate(struct radeon_winsys_cs *rcs)
{
return TRUE;
return true;
}
static bool amdgpu_cs_check_space(struct radeon_winsys_cs *rcs, unsigned dw)
@ -784,7 +784,8 @@ static bool amdgpu_cs_check_space(struct radeon_winsys_cs *rcs, unsigned dw)
return true;
}
static boolean amdgpu_cs_memory_below_limit(struct radeon_winsys_cs *rcs, uint64_t vram, uint64_t gtt)
static bool amdgpu_cs_memory_below_limit(struct radeon_winsys_cs *rcs,
uint64_t vram, uint64_t gtt)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
struct amdgpu_winsys *ws = cs->ctx->ws;
@ -823,7 +824,7 @@ static unsigned amdgpu_cs_get_buffer_list(struct radeon_winsys_cs *rcs,
return cs->num_buffers;
}
DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", FALSE)
DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", false)
/* Since the kernel driver doesn't synchronize execution between different
* rings automatically, we have to add fence dependencies manually.
@ -965,7 +966,7 @@ void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs)
util_queue_job_wait(&cs->flush_completed);
}
DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", FALSE)
DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", false)
static void amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
unsigned flags,
@ -1088,9 +1089,9 @@ static void amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
FREE(cs);
}
static boolean amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
struct pb_buffer *_buf,
enum radeon_bo_usage usage)
static bool amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
struct pb_buffer *_buf,
enum radeon_bo_usage usage)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)_buf;

View file

@ -182,7 +182,7 @@ amdgpu_cs_from_ib(struct amdgpu_ib *ib)
}
}
static inline boolean
static inline bool
amdgpu_bo_is_referenced_by_cs(struct amdgpu_cs *cs,
struct amdgpu_winsys_bo *bo)
{
@ -191,7 +191,7 @@ amdgpu_bo_is_referenced_by_cs(struct amdgpu_cs *cs,
(num_refs && amdgpu_lookup_buffer(cs->csc, bo) != -1);
}
static inline boolean
static inline bool
amdgpu_bo_is_referenced_by_cs_with_usage(struct amdgpu_cs *cs,
struct amdgpu_winsys_bo *bo,
enum radeon_bo_usage usage)
@ -199,16 +199,16 @@ amdgpu_bo_is_referenced_by_cs_with_usage(struct amdgpu_cs *cs,
int index;
if (!bo->num_cs_references)
return FALSE;
return false;
index = amdgpu_lookup_buffer(cs->csc, bo);
if (index == -1)
return FALSE;
return false;
return (cs->csc->buffers[index].usage & usage) != 0;
}
static inline boolean
static inline bool
amdgpu_bo_is_referenced_by_any_cs(struct amdgpu_winsys_bo *bo)
{
return bo->num_cs_references != 0;

View file

@ -93,7 +93,7 @@ static unsigned cik_get_num_tile_pipes(struct amdgpu_gpu_info *info)
}
/* Helper function to do the ioctls needed for setup and init. */
static boolean do_winsys_init(struct amdgpu_winsys *ws, int fd)
static bool do_winsys_init(struct amdgpu_winsys *ws, int fd)
{
struct amdgpu_buffer_size_alignments alignment_info = {};
struct amdgpu_heap_info vram, gtt;
@ -270,12 +270,12 @@ static boolean do_winsys_init(struct amdgpu_winsys *ws, int fd)
ws->info.has_uvd = uvd.available_rings != 0;
ws->info.vce_fw_version =
vce.available_rings ? vce_version : 0;
ws->info.has_userptr = TRUE;
ws->info.has_userptr = true;
ws->info.num_render_backends = ws->amdinfo.rb_pipes;
ws->info.clock_crystal_freq = ws->amdinfo.gpu_counter_freq;
ws->info.num_tile_pipes = cik_get_num_tile_pipes(&ws->amdinfo);
ws->info.pipe_interleave_bytes = 256 << ((ws->amdinfo.gb_addr_cfg >> 4) & 0x7);
ws->info.has_virtual_memory = TRUE;
ws->info.has_virtual_memory = true;
ws->info.has_sdma = dma.available_rings != 0;
/* Get the number of good compute units. */
@ -296,14 +296,14 @@ static boolean do_winsys_init(struct amdgpu_winsys *ws, int fd)
ws->check_vm = strstr(debug_get_option("R600_DEBUG", ""), "check_vm") != NULL;
return TRUE;
return true;
fail:
if (ws->addrlib)
AddrDestroy(ws->addrlib);
amdgpu_device_deinitialize(ws->dev);
ws->dev = NULL;
return FALSE;
return false;
}
static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
@ -327,11 +327,11 @@ static void amdgpu_winsys_query_info(struct radeon_winsys *rws,
*info = ((struct amdgpu_winsys *)rws)->info;
}
static boolean amdgpu_cs_request_feature(struct radeon_winsys_cs *rcs,
enum radeon_feature_id fid,
boolean enable)
static bool amdgpu_cs_request_feature(struct radeon_winsys_cs *rcs,
enum radeon_feature_id fid,
bool enable)
{
return FALSE;
return false;
}
static uint64_t amdgpu_query_value(struct radeon_winsys *rws,
@ -397,7 +397,7 @@ static int compare_dev(void *key1, void *key2)
return key1 != key2;
}
DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", TRUE)
DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", true)
static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
{

View file

@ -998,10 +998,10 @@ fail:
return NULL;
}
static boolean radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
unsigned stride, unsigned offset,
unsigned slice_size,
struct winsys_handle *whandle)
static bool radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
unsigned stride, unsigned offset,
unsigned slice_size,
struct winsys_handle *whandle)
{
struct drm_gem_flink flink;
struct radeon_bo *bo = radeon_bo(buffer);
@ -1016,7 +1016,7 @@ static boolean radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
flink.handle = bo->handle;
if (ioctl(ws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
return FALSE;
return false;
}
bo->flink_name = flink.name;
@ -1030,14 +1030,14 @@ static boolean radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
whandle->handle = bo->handle;
} else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
if (drmPrimeHandleToFD(ws->fd, bo->handle, DRM_CLOEXEC, (int*)&whandle->handle))
return FALSE;
return false;
}
whandle->stride = stride;
whandle->offset = offset;
whandle->offset += slice_size * whandle->layer;
return TRUE;
return true;
}
static bool radeon_winsys_bo_is_user_ptr(struct pb_buffer *buf)

View file

@ -92,8 +92,8 @@ static void radeon_drm_ctx_destroy(struct radeon_winsys_ctx *ctx)
/* No context support here. */
}
static boolean radeon_init_cs_context(struct radeon_cs_context *csc,
struct radeon_drm_winsys *ws)
static bool radeon_init_cs_context(struct radeon_cs_context *csc,
struct radeon_drm_winsys *ws)
{
int i;
@ -102,14 +102,14 @@ static boolean radeon_init_cs_context(struct radeon_cs_context *csc,
csc->relocs_bo = (struct radeon_bo_item*)
CALLOC(1, csc->nrelocs * sizeof(csc->relocs_bo[0]));
if (!csc->relocs_bo) {
return FALSE;
return false;
}
csc->relocs = (struct drm_radeon_cs_reloc*)
CALLOC(1, csc->nrelocs * sizeof(struct drm_radeon_cs_reloc));
if (!csc->relocs) {
FREE(csc->relocs_bo);
return FALSE;
return false;
}
csc->chunks[0].chunk_id = RADEON_CHUNK_ID_IB;
@ -131,7 +131,7 @@ static boolean radeon_init_cs_context(struct radeon_cs_context *csc,
for (i = 0; i < ARRAY_SIZE(csc->reloc_indices_hashlist); i++) {
csc->reloc_indices_hashlist[i] = -1;
}
return TRUE;
return true;
}
static void radeon_cs_context_cleanup(struct radeon_cs_context *csc)
@ -347,10 +347,10 @@ static int radeon_drm_cs_lookup_buffer(struct radeon_winsys_cs *rcs,
return radeon_lookup_buffer(cs->csc, (struct radeon_bo*)buf);
}
static boolean radeon_drm_cs_validate(struct radeon_winsys_cs *rcs)
static bool radeon_drm_cs_validate(struct radeon_winsys_cs *rcs)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
boolean status =
bool status =
cs->csc->used_gart < cs->ws->info.gart_size * 0.8 &&
cs->csc->used_vram < cs->ws->info.vram_size * 0.8;
@ -389,7 +389,7 @@ static bool radeon_drm_cs_check_space(struct radeon_winsys_cs *rcs, unsigned dw)
return rcs->current.max_dw - rcs->current.cdw >= dw;
}
static boolean radeon_drm_cs_memory_below_limit(struct radeon_winsys_cs *rcs, uint64_t vram, uint64_t gtt)
static bool radeon_drm_cs_memory_below_limit(struct radeon_winsys_cs *rcs, uint64_t vram, uint64_t gtt)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
@ -438,7 +438,7 @@ void radeon_drm_cs_emit_ioctl_oneshot(void *job, int thread_index)
if (r) {
if (r == -ENOMEM)
fprintf(stderr, "radeon: Not enough memory for command submission.\n");
else if (debug_get_bool_option("RADEON_DUMP_CS", FALSE)) {
else if (debug_get_bool_option("RADEON_DUMP_CS", false)) {
unsigned i;
fprintf(stderr, "radeon: The kernel rejected CS, dumping...\n");
@ -469,7 +469,7 @@ void radeon_drm_cs_sync_flush(struct radeon_winsys_cs *rcs)
util_queue_job_wait(&cs->flush_completed);
}
DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", FALSE)
DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", false)
static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
unsigned flags,
@ -618,27 +618,27 @@ static void radeon_drm_cs_destroy(struct radeon_winsys_cs *rcs)
FREE(cs);
}
static boolean radeon_bo_is_referenced(struct radeon_winsys_cs *rcs,
struct pb_buffer *_buf,
enum radeon_bo_usage usage)
static bool radeon_bo_is_referenced(struct radeon_winsys_cs *rcs,
struct pb_buffer *_buf,
enum radeon_bo_usage usage)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
struct radeon_bo *bo = (struct radeon_bo*)_buf;
int index;
if (!bo->num_cs_references)
return FALSE;
return false;
index = radeon_lookup_buffer(cs->csc, bo);
if (index == -1)
return FALSE;
return false;
if ((usage & RADEON_USAGE_WRITE) && cs->csc->relocs[index].write_domain)
return TRUE;
return true;
if ((usage & RADEON_USAGE_READ) && cs->csc->relocs[index].read_domains)
return TRUE;
return true;
return FALSE;
return false;
}
/* FENCES */

View file

@ -89,7 +89,7 @@ radeon_drm_cs(struct radeon_winsys_cs *base)
return (struct radeon_drm_cs*)base;
}
static inline boolean
static inline bool
radeon_bo_is_referenced_by_cs(struct radeon_drm_cs *cs,
struct radeon_bo *bo)
{
@ -98,23 +98,23 @@ radeon_bo_is_referenced_by_cs(struct radeon_drm_cs *cs,
(num_refs && radeon_lookup_buffer(cs->csc, bo) != -1);
}
static inline boolean
static inline bool
radeon_bo_is_referenced_by_cs_for_write(struct radeon_drm_cs *cs,
struct radeon_bo *bo)
{
int index;
if (!bo->num_cs_references)
return FALSE;
return false;
index = radeon_lookup_buffer(cs->csc, bo);
if (index == -1)
return FALSE;
return false;
return cs->csc->relocs[index].write_domain != 0;
}
static inline boolean
static inline bool
radeon_bo_is_referenced_by_any_cs(struct radeon_bo *bo)
{
return bo->num_cs_references != 0;

View file

@ -67,16 +67,16 @@ static struct util_hash_table *fd_tab = NULL;
pipe_static_mutex(fd_tab_mutex);
/* Enable/disable feature access for one command stream.
* If enable == TRUE, return TRUE on success.
* Otherwise, return FALSE.
* If enable == true, return true on success.
* Otherwise, return false.
*
* We basically do the same thing kernel does, because we have to deal
* with multiple contexts (here command streams) backed by one winsys. */
static boolean radeon_set_fd_access(struct radeon_drm_cs *applier,
struct radeon_drm_cs **owner,
pipe_mutex *mutex,
unsigned request, const char *request_name,
boolean enable)
static bool radeon_set_fd_access(struct radeon_drm_cs *applier,
struct radeon_drm_cs **owner,
pipe_mutex *mutex,
unsigned request, const char *request_name,
bool enable)
{
struct drm_radeon_info info;
unsigned value = enable ? 1 : 0;
@ -89,12 +89,12 @@ static boolean radeon_set_fd_access(struct radeon_drm_cs *applier,
if (enable) {
if (*owner) {
pipe_mutex_unlock(*mutex);
return FALSE;
return false;
}
} else {
if (*owner != applier) {
pipe_mutex_unlock(*mutex);
return FALSE;
return false;
}
}
@ -104,7 +104,7 @@ static boolean radeon_set_fd_access(struct radeon_drm_cs *applier,
if (drmCommandWriteRead(applier->ws->fd, DRM_RADEON_INFO,
&info, sizeof(info)) != 0) {
pipe_mutex_unlock(*mutex);
return FALSE;
return false;
}
/* Update the rights in the winsys. */
@ -112,18 +112,18 @@ static boolean radeon_set_fd_access(struct radeon_drm_cs *applier,
if (value) {
*owner = applier;
pipe_mutex_unlock(*mutex);
return TRUE;
return true;
}
} else {
*owner = NULL;
}
pipe_mutex_unlock(*mutex);
return FALSE;
return false;
}
static boolean radeon_get_drm_value(int fd, unsigned request,
const char *errname, uint32_t *out)
static bool radeon_get_drm_value(int fd, unsigned request,
const char *errname, uint32_t *out)
{
struct drm_radeon_info info;
int retval;
@ -139,13 +139,13 @@ static boolean radeon_get_drm_value(int fd, unsigned request,
fprintf(stderr, "radeon: Failed to get %s, error number %d\n",
errname, retval);
}
return FALSE;
return false;
}
return TRUE;
return true;
}
/* Helper function to do the ioctls needed for setup and init. */
static boolean do_winsys_init(struct radeon_drm_winsys *ws)
static bool do_winsys_init(struct radeon_drm_winsys *ws)
{
struct drm_radeon_gem_info gem_info;
int retval;
@ -182,7 +182,7 @@ static boolean do_winsys_init(struct radeon_drm_winsys *ws)
version->version_minor,
version->version_patchlevel);
drmFreeVersion(version);
return FALSE;
return false;
}
ws->info.drm_major = version->version_major;
@ -193,7 +193,7 @@ static boolean do_winsys_init(struct radeon_drm_winsys *ws)
/* Get PCI ID. */
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_DEVICE_ID, "PCI ID",
&ws->info.pci_id))
return FALSE;
return false;
/* Check PCI ID. */
switch (ws->info.pci_id) {
@ -211,14 +211,14 @@ static boolean do_winsys_init(struct radeon_drm_winsys *ws)
default:
fprintf(stderr, "radeon: Invalid PCI ID.\n");
return FALSE;
return false;
}
switch (ws->info.family) {
default:
case CHIP_UNKNOWN:
fprintf(stderr, "radeon: Unknown family.\n");
return FALSE;
return false;
case CHIP_R300:
case CHIP_R350:
case CHIP_RV350:
@ -322,14 +322,14 @@ static boolean do_winsys_init(struct radeon_drm_winsys *ws)
}
/* Check for dma */
ws->info.has_sdma = FALSE;
ws->info.has_sdma = false;
/* DMA is disabled on R700. There is IB corruption and hangs. */
if (ws->info.chip_class >= EVERGREEN && ws->info.drm_minor >= 27) {
ws->info.has_sdma = TRUE;
ws->info.has_sdma = true;
}
/* Check for UVD and VCE */
ws->info.has_uvd = FALSE;
ws->info.has_uvd = false;
ws->info.vce_fw_version = 0x00000000;
if (ws->info.drm_minor >= 32) {
uint32_t value = RADEON_CS_RING_UVD;
@ -368,7 +368,7 @@ static boolean do_winsys_init(struct radeon_drm_winsys *ws)
if (retval) {
fprintf(stderr, "radeon: Failed to get MM info, error number %d\n",
retval);
return FALSE;
return false;
}
ws->info.gart_size = gem_info.gart_size;
ws->info.vram_size = gem_info.vram_size;
@ -388,12 +388,12 @@ static boolean do_winsys_init(struct radeon_drm_winsys *ws)
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_GB_PIPES,
"GB pipe count",
&ws->info.r300_num_gb_pipes))
return FALSE;
return false;
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_Z_PIPES,
"Z pipe count",
&ws->info.r300_num_z_pipes))
return FALSE;
return false;
}
else if (ws->gen >= DRV_R600) {
uint32_t tiling_config = 0;
@ -401,7 +401,7 @@ static boolean do_winsys_init(struct radeon_drm_winsys *ws)
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_BACKENDS,
"num backends",
&ws->info.num_render_backends))
return FALSE;
return false;
/* get the GPU counter frequency, failure is not fatal */
radeon_get_drm_value(ws->fd, RADEON_INFO_CLOCK_CRYSTAL_FREQ, NULL,
@ -437,24 +437,24 @@ static boolean do_winsys_init(struct radeon_drm_winsys *ws)
if (radeon_get_drm_value(ws->fd, RADEON_INFO_BACKEND_MAP, NULL,
&ws->info.r600_gb_backend_map))
ws->info.r600_gb_backend_map_valid = TRUE;
ws->info.r600_gb_backend_map_valid = true;
ws->info.has_virtual_memory = FALSE;
ws->info.has_virtual_memory = false;
if (ws->info.drm_minor >= 13) {
uint32_t ib_vm_max_size;
ws->info.has_virtual_memory = TRUE;
ws->info.has_virtual_memory = true;
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_VA_START, NULL,
&ws->va_start))
ws->info.has_virtual_memory = FALSE;
ws->info.has_virtual_memory = false;
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_IB_VM_MAX_SIZE, NULL,
&ib_vm_max_size))
ws->info.has_virtual_memory = FALSE;
ws->info.has_virtual_memory = false;
radeon_get_drm_value(ws->fd, RADEON_INFO_VA_UNMAP_WORKING, NULL,
&ws->va_unmap_working);
}
if (ws->gen == DRV_R600 && !debug_get_bool_option("RADEON_VA", FALSE))
ws->info.has_virtual_memory = FALSE;
if (ws->gen == DRV_R600 && !debug_get_bool_option("RADEON_VA", false))
ws->info.has_virtual_memory = false;
}
/* Get max pipes, this is only needed for compute shaders. All evergreen+
@ -501,14 +501,14 @@ static boolean do_winsys_init(struct radeon_drm_winsys *ws)
"returned accel_working2 value %u is smaller than 2. "
"Please install a newer kernel.\n",
ws->accel_working2);
return FALSE;
return false;
}
if (ws->info.chip_class == CIK) {
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_CIK_MACROTILE_MODE_ARRAY, NULL,
ws->info.cik_macrotile_mode_array)) {
fprintf(stderr, "radeon: Kernel 3.13 is required for CIK support.\n");
return FALSE;
return false;
}
}
@ -516,7 +516,7 @@ static boolean do_winsys_init(struct radeon_drm_winsys *ws)
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_SI_TILE_MODE_ARRAY, NULL,
ws->info.si_tile_mode_array)) {
fprintf(stderr, "radeon: Kernel 3.10 is required for SI support.\n");
return FALSE;
return false;
}
}
@ -529,7 +529,7 @@ static boolean do_winsys_init(struct radeon_drm_winsys *ws)
ws->check_vm = strstr(debug_get_option("R600_DEBUG", ""), "check_vm") != NULL;
return TRUE;
return true;
}
static void radeon_winsys_destroy(struct radeon_winsys *rws)
@ -566,9 +566,9 @@ static void radeon_query_info(struct radeon_winsys *rws,
*info = ((struct radeon_drm_winsys *)rws)->info;
}
static boolean radeon_cs_request_feature(struct radeon_winsys_cs *rcs,
enum radeon_feature_id fid,
boolean enable)
static bool radeon_cs_request_feature(struct radeon_winsys_cs *rcs,
enum radeon_feature_id fid,
bool enable)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
@ -585,7 +585,7 @@ static boolean radeon_cs_request_feature(struct radeon_winsys_cs *rcs,
RADEON_INFO_WANT_CMASK, "AA optimizations",
enable);
}
return FALSE;
return false;
}
static uint64_t radeon_query_value(struct radeon_winsys *rws,
@ -683,7 +683,7 @@ static int compare_fd(void *key1, void *key2)
stat1.st_rdev != stat2.st_rdev;
}
DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", TRUE)
DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", true)
static bool radeon_winsys_unref(struct radeon_winsys *ws)
{