mirror of
https://gitlab.freedesktop.org/mesa/drm.git
synced 2025-12-20 14:00:11 +01:00
Merge commit 'origin/drm-gem' into modesetting-gem
Lots of conflicts, seems to load ok, but I'm sure some bugs snuck in. Conflicts: linux-core/drmP.h linux-core/drm_lock.c linux-core/i915_gem.c shared-core/drm.h shared-core/i915_dma.c shared-core/i915_drv.h shared-core/i915_irq.c
This commit is contained in:
commit
86accbcb34
24 changed files with 874 additions and 977 deletions
|
|
@ -19,7 +19,7 @@
|
|||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
AC_PREREQ(2.57)
|
||||
AC_INIT([libdrm], 2.3.1, [dri-devel@lists.sourceforge.net], libdrm)
|
||||
AC_INIT([libdrm], 2.4.0, [dri-devel@lists.sourceforge.net], libdrm)
|
||||
AC_CONFIG_SRCDIR([Makefile.am])
|
||||
AM_INIT_AUTOMAKE([dist-bzip2])
|
||||
|
||||
|
|
|
|||
|
|
@ -252,7 +252,7 @@ alloc_block(dri_bo *bo)
|
|||
|
||||
sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
|
||||
|
||||
block->mem = drmmmAllocMem(bufmgr_fake->heap, sz, align_log2, 0);
|
||||
block->mem = mmAllocMem(bufmgr_fake->heap, sz, align_log2, 0);
|
||||
if (!block->mem) {
|
||||
free(block);
|
||||
return 0;
|
||||
|
|
@ -300,7 +300,7 @@ static void free_block(dri_bufmgr_fake *bufmgr_fake, struct block *block)
|
|||
DBG(" - free immediately\n");
|
||||
DRMLISTDEL(block);
|
||||
|
||||
drmmmFreeMem(block->mem);
|
||||
mmFreeMem(block->mem);
|
||||
free(block);
|
||||
}
|
||||
}
|
||||
|
|
@ -415,7 +415,7 @@ static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
|
|||
DBG("delayed free: offset %x sz %x\n",
|
||||
block->mem->ofs, block->mem->size);
|
||||
DRMLISTDEL(block);
|
||||
drmmmFreeMem(block->mem);
|
||||
mmFreeMem(block->mem);
|
||||
free(block);
|
||||
}
|
||||
else {
|
||||
|
|
@ -923,7 +923,7 @@ dri_fake_destroy(dri_bufmgr *bufmgr)
|
|||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
|
||||
|
||||
drmmmDestroy(bufmgr_fake->heap);
|
||||
mmDestroy(bufmgr_fake->heap);
|
||||
free(bufmgr);
|
||||
}
|
||||
|
||||
|
|
@ -1062,7 +1062,7 @@ dri_fake_process_relocs(dri_bo *batch_buf)
|
|||
|
||||
dri_fake_calculate_domains(batch_buf);
|
||||
|
||||
batch_fake->read_domains = DRM_GEM_DOMAIN_I915_COMMAND;
|
||||
batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND;
|
||||
|
||||
/* we've ran out of RAM so blow the whole lot away and retry */
|
||||
restart:
|
||||
|
|
@ -1074,7 +1074,7 @@ dri_fake_process_relocs(dri_bo *batch_buf)
|
|||
bufmgr_fake->fail = 0;
|
||||
goto restart;
|
||||
} else /* dump out the memory here */
|
||||
drmmmDumpMemInfo(bufmgr_fake->heap);
|
||||
mmDumpMemInfo(bufmgr_fake->heap);
|
||||
}
|
||||
|
||||
assert(ret == 0);
|
||||
|
|
@ -1193,7 +1193,7 @@ intel_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
|
|||
bufmgr_fake->low_offset = low_offset;
|
||||
bufmgr_fake->virtual = low_virtual;
|
||||
bufmgr_fake->size = size;
|
||||
bufmgr_fake->heap = drmmmInit(low_offset, size);
|
||||
bufmgr_fake->heap = mmInit(low_offset, size);
|
||||
|
||||
/* Hook in methods */
|
||||
bufmgr_fake->bufmgr.bo_alloc = dri_fake_bo_alloc;
|
||||
|
|
|
|||
|
|
@ -299,7 +299,7 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
|
|||
}
|
||||
|
||||
if (!alloc_from_cache) {
|
||||
struct drm_gem_create create;
|
||||
struct drm_i915_gem_create create;
|
||||
|
||||
bo_gem = calloc(1, sizeof(*bo_gem));
|
||||
if (!bo_gem)
|
||||
|
|
@ -309,7 +309,7 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
|
|||
memset(&create, 0, sizeof(create));
|
||||
create.size = bo_size;
|
||||
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CREATE, &create);
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
|
||||
bo_gem->gem_handle = create.handle;
|
||||
if (ret != 0) {
|
||||
free(bo_gem);
|
||||
|
|
@ -455,7 +455,7 @@ dri_gem_bo_map(dri_bo *bo, int write_enable)
|
|||
{
|
||||
dri_bufmgr_gem *bufmgr_gem;
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
struct drm_gem_set_domain set_domain;
|
||||
struct drm_i915_gem_set_domain set_domain;
|
||||
int ret;
|
||||
|
||||
bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
|
|
@ -470,13 +470,13 @@ dri_gem_bo_map(dri_bo *bo, int write_enable)
|
|||
DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
|
||||
|
||||
if (bo_gem->virtual == NULL) {
|
||||
struct drm_gem_mmap mmap_arg;
|
||||
struct drm_i915_gem_mmap mmap_arg;
|
||||
|
||||
memset(&mmap_arg, 0, sizeof(mmap_arg));
|
||||
mmap_arg.handle = bo_gem->gem_handle;
|
||||
mmap_arg.offset = 0;
|
||||
mmap_arg.size = bo->size;
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_MMAP, &mmap_arg);
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n",
|
||||
__FILE__, __LINE__,
|
||||
|
|
@ -491,9 +491,12 @@ dri_gem_bo_map(dri_bo *bo, int write_enable)
|
|||
|
||||
if (!bo_gem->cpu_domain_set) {
|
||||
set_domain.handle = bo_gem->gem_handle;
|
||||
set_domain.read_domains = DRM_GEM_DOMAIN_CPU;
|
||||
set_domain.write_domain = write_enable ? DRM_GEM_DOMAIN_CPU : 0;
|
||||
ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_SET_DOMAIN, &set_domain);
|
||||
set_domain.read_domains = I915_GEM_DOMAIN_CPU;
|
||||
set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_CPU : 0;
|
||||
do {
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
|
||||
&set_domain);
|
||||
} while (ret == -1 && errno == EINTR);
|
||||
if (ret != 0) {
|
||||
fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
|
||||
__FILE__, __LINE__,
|
||||
|
|
@ -525,7 +528,7 @@ dri_gem_bo_subdata (dri_bo *bo, unsigned long offset,
|
|||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
struct drm_gem_pwrite pwrite;
|
||||
struct drm_i915_gem_pwrite pwrite;
|
||||
int ret;
|
||||
|
||||
memset (&pwrite, 0, sizeof (pwrite));
|
||||
|
|
@ -533,7 +536,9 @@ dri_gem_bo_subdata (dri_bo *bo, unsigned long offset,
|
|||
pwrite.offset = offset;
|
||||
pwrite.size = size;
|
||||
pwrite.data_ptr = (uint64_t) (uintptr_t) data;
|
||||
ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_PWRITE, &pwrite);
|
||||
do {
|
||||
ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
|
||||
} while (ret == -1 && errno == EINTR);
|
||||
if (ret != 0) {
|
||||
fprintf (stderr, "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
|
||||
__FILE__, __LINE__,
|
||||
|
|
@ -549,7 +554,7 @@ dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset,
|
|||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
struct drm_gem_pread pread;
|
||||
struct drm_i915_gem_pread pread;
|
||||
int ret;
|
||||
|
||||
memset (&pread, 0, sizeof (pread));
|
||||
|
|
@ -557,7 +562,9 @@ dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset,
|
|||
pread.offset = offset;
|
||||
pread.size = size;
|
||||
pread.data_ptr = (uint64_t) (uintptr_t) data;
|
||||
ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_PREAD, &pread);
|
||||
do {
|
||||
ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
|
||||
} while (ret == -1 && errno == EINTR);
|
||||
if (ret != 0) {
|
||||
fprintf (stderr, "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
|
||||
__FILE__, __LINE__,
|
||||
|
|
@ -572,13 +579,13 @@ dri_gem_bo_wait_rendering(dri_bo *bo)
|
|||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
struct drm_gem_set_domain set_domain;
|
||||
struct drm_i915_gem_set_domain set_domain;
|
||||
int ret;
|
||||
|
||||
set_domain.handle = bo_gem->gem_handle;
|
||||
set_domain.read_domains = DRM_GEM_DOMAIN_CPU;
|
||||
set_domain.read_domains = I915_GEM_DOMAIN_CPU;
|
||||
set_domain.write_domain = 0;
|
||||
ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_SET_DOMAIN, &set_domain);
|
||||
ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
|
||||
if (ret != 0) {
|
||||
fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
|
||||
__FILE__, __LINE__,
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@
|
|||
#include "mm.h"
|
||||
|
||||
void
|
||||
drmmmDumpMemInfo(const struct mem_block *heap)
|
||||
mmDumpMemInfo(const struct mem_block *heap)
|
||||
{
|
||||
drmMsg("Memory heap %p:\n", (void *)heap);
|
||||
if (heap == 0) {
|
||||
|
|
@ -56,7 +56,7 @@ drmmmDumpMemInfo(const struct mem_block *heap)
|
|||
}
|
||||
|
||||
struct mem_block *
|
||||
drmmmInit(int ofs, int size)
|
||||
mmInit(int ofs, int size)
|
||||
{
|
||||
struct mem_block *heap, *block;
|
||||
|
||||
|
|
@ -163,7 +163,7 @@ SliceBlock(struct mem_block *p,
|
|||
|
||||
|
||||
struct mem_block *
|
||||
drmmmAllocMem(struct mem_block *heap, int size, int align2, int startSearch)
|
||||
mmAllocMem(struct mem_block *heap, int size, int align2, int startSearch)
|
||||
{
|
||||
struct mem_block *p;
|
||||
const int mask = (1 << align2)-1;
|
||||
|
|
@ -196,7 +196,7 @@ drmmmAllocMem(struct mem_block *heap, int size, int align2, int startSearch)
|
|||
|
||||
|
||||
struct mem_block *
|
||||
drmmmFindBlock(struct mem_block *heap, int start)
|
||||
mmFindBlock(struct mem_block *heap, int start)
|
||||
{
|
||||
struct mem_block *p;
|
||||
|
||||
|
|
@ -235,7 +235,7 @@ Join2Blocks(struct mem_block *p)
|
|||
}
|
||||
|
||||
int
|
||||
drmmmFreeMem(struct mem_block *b)
|
||||
mmFreeMem(struct mem_block *b)
|
||||
{
|
||||
if (!b)
|
||||
return 0;
|
||||
|
|
@ -264,7 +264,7 @@ drmmmFreeMem(struct mem_block *b)
|
|||
|
||||
|
||||
void
|
||||
drmmmDestroy(struct mem_block *heap)
|
||||
mmDestroy(struct mem_block *heap)
|
||||
{
|
||||
struct mem_block *p;
|
||||
|
||||
|
|
|
|||
|
|
@ -40,13 +40,21 @@ struct mem_block {
|
|||
unsigned int reserved:1;
|
||||
};
|
||||
|
||||
|
||||
/* Rename the variables in the drm copy of this code so that it doesn't
|
||||
* conflict with mesa or whoever else has copied it around.
|
||||
*/
|
||||
#define mmInit drm_mmInit
|
||||
#define mmAllocMem drm_mmAllocMem
|
||||
#define mmFreeMem drm_mmFreeMem
|
||||
#define mmFindBlock drm_mmFindBlock
|
||||
#define mmDestroy drm_mmDestroy
|
||||
#define mmDumpMemInfo drm_mmDumpMemInfo
|
||||
|
||||
/**
|
||||
* input: total size in bytes
|
||||
* return: a heap pointer if OK, NULL if error
|
||||
*/
|
||||
extern struct mem_block *drmmmInit(int ofs, int size);
|
||||
extern struct mem_block *mmInit(int ofs, int size);
|
||||
|
||||
/**
|
||||
* Allocate 'size' bytes with 2^align2 bytes alignment,
|
||||
|
|
@ -58,7 +66,7 @@ extern struct mem_block *drmmmInit(int ofs, int size);
|
|||
* startSearch = linear offset from start of heap to begin search
|
||||
* return: pointer to the allocated block, 0 if error
|
||||
*/
|
||||
extern struct mem_block *drmmmAllocMem(struct mem_block *heap, int size,
|
||||
extern struct mem_block *mmAllocMem(struct mem_block *heap, int size,
|
||||
int align2, int startSearch);
|
||||
|
||||
/**
|
||||
|
|
@ -66,23 +74,23 @@ extern struct mem_block *drmmmAllocMem(struct mem_block *heap, int size,
|
|||
* input: pointer to a block
|
||||
* return: 0 if OK, -1 if error
|
||||
*/
|
||||
extern int drmmmFreeMem(struct mem_block *b);
|
||||
extern int mmFreeMem(struct mem_block *b);
|
||||
|
||||
/**
|
||||
* Free block starts at offset
|
||||
* input: pointer to a heap, start offset
|
||||
* return: pointer to a block
|
||||
*/
|
||||
extern struct mem_block *drmmmFindBlock(struct mem_block *heap, int start);
|
||||
extern struct mem_block *mmFindBlock(struct mem_block *heap, int start);
|
||||
|
||||
/**
|
||||
* destroy MM
|
||||
*/
|
||||
extern void drmmmDestroy(struct mem_block *mmInit);
|
||||
extern void mmDestroy(struct mem_block *mmInit);
|
||||
|
||||
/**
|
||||
* For debuging purpose.
|
||||
*/
|
||||
extern void drmmmDumpMemInfo(const struct mem_block *mmInit);
|
||||
extern void mmDumpMemInfo(const struct mem_block *mmInit);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
172
libdrm/xf86drm.c
172
libdrm/xf86drm.c
|
|
@ -174,6 +174,19 @@ static char *drmStrdup(const char *s)
|
|||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* Call ioctl, restarting if it is interupted
|
||||
*/
|
||||
static int
|
||||
drmIoctl(int fd, int request, void *arg)
|
||||
{
|
||||
int ret;
|
||||
|
||||
do {
|
||||
ret = ioctl(fd, request, arg);
|
||||
} while (ret == -1 && (errno == EINTR || errno == EAGAIN));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned long drmGetKeyFromFd(int fd)
|
||||
{
|
||||
|
|
@ -675,7 +688,7 @@ drmVersionPtr drmGetVersion(int fd)
|
|||
version->desc_len = 0;
|
||||
version->desc = NULL;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_VERSION, version)) {
|
||||
if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) {
|
||||
drmFreeKernelVersion(version);
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -687,7 +700,7 @@ drmVersionPtr drmGetVersion(int fd)
|
|||
if (version->desc_len)
|
||||
version->desc = drmMalloc(version->desc_len + 1);
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_VERSION, version)) {
|
||||
if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) {
|
||||
drmMsg("DRM_IOCTL_VERSION: %s\n", strerror(errno));
|
||||
drmFreeKernelVersion(version);
|
||||
return NULL;
|
||||
|
|
@ -773,10 +786,10 @@ char *drmGetBusid(int fd)
|
|||
u.unique_len = 0;
|
||||
u.unique = NULL;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
|
||||
if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
|
||||
return NULL;
|
||||
u.unique = drmMalloc(u.unique_len + 1);
|
||||
if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
|
||||
if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
|
||||
return NULL;
|
||||
u.unique[u.unique_len] = '\0';
|
||||
|
||||
|
|
@ -803,7 +816,7 @@ int drmSetBusid(int fd, const char *busid)
|
|||
u.unique = (char *)busid;
|
||||
u.unique_len = strlen(busid);
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_SET_UNIQUE, &u)) {
|
||||
if (drmIoctl(fd, DRM_IOCTL_SET_UNIQUE, &u)) {
|
||||
return -errno;
|
||||
}
|
||||
return 0;
|
||||
|
|
@ -814,7 +827,7 @@ int drmGetMagic(int fd, drm_magic_t * magic)
|
|||
drm_auth_t auth;
|
||||
|
||||
*magic = 0;
|
||||
if (ioctl(fd, DRM_IOCTL_GET_MAGIC, &auth))
|
||||
if (drmIoctl(fd, DRM_IOCTL_GET_MAGIC, &auth))
|
||||
return -errno;
|
||||
*magic = auth.magic;
|
||||
return 0;
|
||||
|
|
@ -825,7 +838,7 @@ int drmAuthMagic(int fd, drm_magic_t magic)
|
|||
drm_auth_t auth;
|
||||
|
||||
auth.magic = magic;
|
||||
if (ioctl(fd, DRM_IOCTL_AUTH_MAGIC, &auth))
|
||||
if (drmIoctl(fd, DRM_IOCTL_AUTH_MAGIC, &auth))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -890,7 +903,7 @@ int drmAddMap(int fd, drm_handle_t offset, drmSize size, drmMapType type,
|
|||
map.handle = 0;
|
||||
map.type = type;
|
||||
map.flags = flags;
|
||||
if (ioctl(fd, DRM_IOCTL_ADD_MAP, &map))
|
||||
if (drmIoctl(fd, DRM_IOCTL_ADD_MAP, &map))
|
||||
return -errno;
|
||||
if (handle)
|
||||
*handle = (drm_handle_t)map.handle;
|
||||
|
|
@ -903,7 +916,7 @@ int drmRmMap(int fd, drm_handle_t handle)
|
|||
|
||||
map.handle = (void *)handle;
|
||||
|
||||
if(ioctl(fd, DRM_IOCTL_RM_MAP, &map))
|
||||
if(drmIoctl(fd, DRM_IOCTL_RM_MAP, &map))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -936,7 +949,7 @@ int drmAddBufs(int fd, int count, int size, drmBufDescFlags flags,
|
|||
request.flags = flags;
|
||||
request.agp_start = agp_offset;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_ADD_BUFS, &request))
|
||||
if (drmIoctl(fd, DRM_IOCTL_ADD_BUFS, &request))
|
||||
return -errno;
|
||||
return request.count;
|
||||
}
|
||||
|
|
@ -949,7 +962,7 @@ int drmMarkBufs(int fd, double low, double high)
|
|||
info.count = 0;
|
||||
info.list = NULL;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info))
|
||||
if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info))
|
||||
return -EINVAL;
|
||||
|
||||
if (!info.count)
|
||||
|
|
@ -958,7 +971,7 @@ int drmMarkBufs(int fd, double low, double high)
|
|||
if (!(info.list = drmMalloc(info.count * sizeof(*info.list))))
|
||||
return -ENOMEM;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
|
||||
if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
|
||||
int retval = -errno;
|
||||
drmFree(info.list);
|
||||
return retval;
|
||||
|
|
@ -967,7 +980,7 @@ int drmMarkBufs(int fd, double low, double high)
|
|||
for (i = 0; i < info.count; i++) {
|
||||
info.list[i].low_mark = low * info.list[i].count;
|
||||
info.list[i].high_mark = high * info.list[i].count;
|
||||
if (ioctl(fd, DRM_IOCTL_MARK_BUFS, &info.list[i])) {
|
||||
if (drmIoctl(fd, DRM_IOCTL_MARK_BUFS, &info.list[i])) {
|
||||
int retval = -errno;
|
||||
drmFree(info.list);
|
||||
return retval;
|
||||
|
|
@ -999,7 +1012,7 @@ int drmFreeBufs(int fd, int count, int *list)
|
|||
|
||||
request.count = count;
|
||||
request.list = list;
|
||||
if (ioctl(fd, DRM_IOCTL_FREE_BUFS, &request))
|
||||
if (drmIoctl(fd, DRM_IOCTL_FREE_BUFS, &request))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1088,14 +1101,14 @@ drmBufInfoPtr drmGetBufInfo(int fd)
|
|||
info.count = 0;
|
||||
info.list = NULL;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info))
|
||||
if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info))
|
||||
return NULL;
|
||||
|
||||
if (info.count) {
|
||||
if (!(info.list = drmMalloc(info.count * sizeof(*info.list))))
|
||||
return NULL;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
|
||||
if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
|
||||
drmFree(info.list);
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -1139,7 +1152,7 @@ drmBufMapPtr drmMapBufs(int fd)
|
|||
bufs.count = 0;
|
||||
bufs.list = NULL;
|
||||
bufs.virtual = NULL;
|
||||
if (ioctl(fd, DRM_IOCTL_MAP_BUFS, &bufs))
|
||||
if (drmIoctl(fd, DRM_IOCTL_MAP_BUFS, &bufs))
|
||||
return NULL;
|
||||
|
||||
if (!bufs.count)
|
||||
|
|
@ -1148,7 +1161,7 @@ drmBufMapPtr drmMapBufs(int fd)
|
|||
if (!(bufs.list = drmMalloc(bufs.count * sizeof(*bufs.list))))
|
||||
return NULL;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) {
|
||||
if (drmIoctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) {
|
||||
drmFree(bufs.list);
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -1263,7 +1276,7 @@ int drmGetLock(int fd, drm_context_t context, drmLockFlags flags)
|
|||
if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES;
|
||||
if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES;
|
||||
|
||||
while (ioctl(fd, DRM_IOCTL_LOCK, &lock))
|
||||
while (drmIoctl(fd, DRM_IOCTL_LOCK, &lock))
|
||||
;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1286,7 +1299,7 @@ int drmUnlock(int fd, drm_context_t context)
|
|||
|
||||
lock.context = context;
|
||||
lock.flags = 0;
|
||||
return ioctl(fd, DRM_IOCTL_UNLOCK, &lock);
|
||||
return drmIoctl(fd, DRM_IOCTL_UNLOCK, &lock);
|
||||
}
|
||||
|
||||
drm_context_t *drmGetReservedContextList(int fd, int *count)
|
||||
|
|
@ -1298,7 +1311,7 @@ drm_context_t *drmGetReservedContextList(int fd, int *count)
|
|||
|
||||
res.count = 0;
|
||||
res.contexts = NULL;
|
||||
if (ioctl(fd, DRM_IOCTL_RES_CTX, &res))
|
||||
if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res))
|
||||
return NULL;
|
||||
|
||||
if (!res.count)
|
||||
|
|
@ -1312,7 +1325,7 @@ drm_context_t *drmGetReservedContextList(int fd, int *count)
|
|||
}
|
||||
|
||||
res.contexts = list;
|
||||
if (ioctl(fd, DRM_IOCTL_RES_CTX, &res))
|
||||
if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res))
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < res.count; i++)
|
||||
|
|
@ -1351,7 +1364,7 @@ int drmCreateContext(int fd, drm_context_t *handle)
|
|||
drm_ctx_t ctx;
|
||||
|
||||
ctx.flags = 0; /* Modified with functions below */
|
||||
if (ioctl(fd, DRM_IOCTL_ADD_CTX, &ctx))
|
||||
if (drmIoctl(fd, DRM_IOCTL_ADD_CTX, &ctx))
|
||||
return -errno;
|
||||
*handle = ctx.handle;
|
||||
return 0;
|
||||
|
|
@ -1362,7 +1375,7 @@ int drmSwitchToContext(int fd, drm_context_t context)
|
|||
drm_ctx_t ctx;
|
||||
|
||||
ctx.handle = context;
|
||||
if (ioctl(fd, DRM_IOCTL_SWITCH_CTX, &ctx))
|
||||
if (drmIoctl(fd, DRM_IOCTL_SWITCH_CTX, &ctx))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1383,7 +1396,7 @@ int drmSetContextFlags(int fd, drm_context_t context, drm_context_tFlags flags)
|
|||
ctx.flags |= _DRM_CONTEXT_PRESERVED;
|
||||
if (flags & DRM_CONTEXT_2DONLY)
|
||||
ctx.flags |= _DRM_CONTEXT_2DONLY;
|
||||
if (ioctl(fd, DRM_IOCTL_MOD_CTX, &ctx))
|
||||
if (drmIoctl(fd, DRM_IOCTL_MOD_CTX, &ctx))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1394,7 +1407,7 @@ int drmGetContextFlags(int fd, drm_context_t context,
|
|||
drm_ctx_t ctx;
|
||||
|
||||
ctx.handle = context;
|
||||
if (ioctl(fd, DRM_IOCTL_GET_CTX, &ctx))
|
||||
if (drmIoctl(fd, DRM_IOCTL_GET_CTX, &ctx))
|
||||
return -errno;
|
||||
*flags = 0;
|
||||
if (ctx.flags & _DRM_CONTEXT_PRESERVED)
|
||||
|
|
@ -1425,7 +1438,7 @@ int drmDestroyContext(int fd, drm_context_t handle)
|
|||
{
|
||||
drm_ctx_t ctx;
|
||||
ctx.handle = handle;
|
||||
if (ioctl(fd, DRM_IOCTL_RM_CTX, &ctx))
|
||||
if (drmIoctl(fd, DRM_IOCTL_RM_CTX, &ctx))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1433,7 +1446,7 @@ int drmDestroyContext(int fd, drm_context_t handle)
|
|||
int drmCreateDrawable(int fd, drm_drawable_t *handle)
|
||||
{
|
||||
drm_draw_t draw;
|
||||
if (ioctl(fd, DRM_IOCTL_ADD_DRAW, &draw))
|
||||
if (drmIoctl(fd, DRM_IOCTL_ADD_DRAW, &draw))
|
||||
return -errno;
|
||||
*handle = draw.handle;
|
||||
return 0;
|
||||
|
|
@ -1443,7 +1456,7 @@ int drmDestroyDrawable(int fd, drm_drawable_t handle)
|
|||
{
|
||||
drm_draw_t draw;
|
||||
draw.handle = handle;
|
||||
if (ioctl(fd, DRM_IOCTL_RM_DRAW, &draw))
|
||||
if (drmIoctl(fd, DRM_IOCTL_RM_DRAW, &draw))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1459,7 +1472,7 @@ int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
|
|||
update.num = num;
|
||||
update.data = (unsigned long long)(unsigned long)data;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_UPDATE_DRAW, &update))
|
||||
if (drmIoctl(fd, DRM_IOCTL_UPDATE_DRAW, &update))
|
||||
return -errno;
|
||||
|
||||
return 0;
|
||||
|
|
@ -1479,7 +1492,7 @@ int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
|
|||
*/
|
||||
int drmAgpAcquire(int fd)
|
||||
{
|
||||
if (ioctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL))
|
||||
if (drmIoctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1497,7 +1510,7 @@ int drmAgpAcquire(int fd)
|
|||
*/
|
||||
int drmAgpRelease(int fd)
|
||||
{
|
||||
if (ioctl(fd, DRM_IOCTL_AGP_RELEASE, NULL))
|
||||
if (drmIoctl(fd, DRM_IOCTL_AGP_RELEASE, NULL))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1520,7 +1533,7 @@ int drmAgpEnable(int fd, unsigned long mode)
|
|||
drm_agp_mode_t m;
|
||||
|
||||
m.mode = mode;
|
||||
if (ioctl(fd, DRM_IOCTL_AGP_ENABLE, &m))
|
||||
if (drmIoctl(fd, DRM_IOCTL_AGP_ENABLE, &m))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1551,7 +1564,7 @@ int drmAgpAlloc(int fd, unsigned long size, unsigned long type,
|
|||
b.size = size;
|
||||
b.handle = 0;
|
||||
b.type = type;
|
||||
if (ioctl(fd, DRM_IOCTL_AGP_ALLOC, &b))
|
||||
if (drmIoctl(fd, DRM_IOCTL_AGP_ALLOC, &b))
|
||||
return -errno;
|
||||
if (address != 0UL)
|
||||
*address = b.physical;
|
||||
|
|
@ -1578,7 +1591,7 @@ int drmAgpFree(int fd, drm_handle_t handle)
|
|||
|
||||
b.size = 0;
|
||||
b.handle = handle;
|
||||
if (ioctl(fd, DRM_IOCTL_AGP_FREE, &b))
|
||||
if (drmIoctl(fd, DRM_IOCTL_AGP_FREE, &b))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1603,7 +1616,7 @@ int drmAgpBind(int fd, drm_handle_t handle, unsigned long offset)
|
|||
|
||||
b.handle = handle;
|
||||
b.offset = offset;
|
||||
if (ioctl(fd, DRM_IOCTL_AGP_BIND, &b))
|
||||
if (drmIoctl(fd, DRM_IOCTL_AGP_BIND, &b))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1627,7 +1640,7 @@ int drmAgpUnbind(int fd, drm_handle_t handle)
|
|||
|
||||
b.handle = handle;
|
||||
b.offset = 0;
|
||||
if (ioctl(fd, DRM_IOCTL_AGP_UNBIND, &b))
|
||||
if (drmIoctl(fd, DRM_IOCTL_AGP_UNBIND, &b))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1648,7 +1661,7 @@ int drmAgpVersionMajor(int fd)
|
|||
{
|
||||
drm_agp_info_t i;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
|
||||
if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
|
||||
return -errno;
|
||||
return i.agp_version_major;
|
||||
}
|
||||
|
|
@ -1669,7 +1682,7 @@ int drmAgpVersionMinor(int fd)
|
|||
{
|
||||
drm_agp_info_t i;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
|
||||
if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
|
||||
return -errno;
|
||||
return i.agp_version_minor;
|
||||
}
|
||||
|
|
@ -1690,7 +1703,7 @@ unsigned long drmAgpGetMode(int fd)
|
|||
{
|
||||
drm_agp_info_t i;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
|
||||
if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
|
||||
return 0;
|
||||
return i.mode;
|
||||
}
|
||||
|
|
@ -1711,7 +1724,7 @@ unsigned long drmAgpBase(int fd)
|
|||
{
|
||||
drm_agp_info_t i;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
|
||||
if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
|
||||
return 0;
|
||||
return i.aperture_base;
|
||||
}
|
||||
|
|
@ -1732,7 +1745,7 @@ unsigned long drmAgpSize(int fd)
|
|||
{
|
||||
drm_agp_info_t i;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
|
||||
if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
|
||||
return 0;
|
||||
return i.aperture_size;
|
||||
}
|
||||
|
|
@ -1753,7 +1766,7 @@ unsigned long drmAgpMemoryUsed(int fd)
|
|||
{
|
||||
drm_agp_info_t i;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
|
||||
if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
|
||||
return 0;
|
||||
return i.memory_used;
|
||||
}
|
||||
|
|
@ -1774,7 +1787,7 @@ unsigned long drmAgpMemoryAvail(int fd)
|
|||
{
|
||||
drm_agp_info_t i;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
|
||||
if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
|
||||
return 0;
|
||||
return i.memory_allowed;
|
||||
}
|
||||
|
|
@ -1795,7 +1808,7 @@ unsigned int drmAgpVendorId(int fd)
|
|||
{
|
||||
drm_agp_info_t i;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
|
||||
if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
|
||||
return 0;
|
||||
return i.id_vendor;
|
||||
}
|
||||
|
|
@ -1816,7 +1829,7 @@ unsigned int drmAgpDeviceId(int fd)
|
|||
{
|
||||
drm_agp_info_t i;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
|
||||
if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
|
||||
return 0;
|
||||
return i.id_device;
|
||||
}
|
||||
|
|
@ -1828,7 +1841,7 @@ int drmScatterGatherAlloc(int fd, unsigned long size, drm_handle_t *handle)
|
|||
*handle = 0;
|
||||
sg.size = size;
|
||||
sg.handle = 0;
|
||||
if (ioctl(fd, DRM_IOCTL_SG_ALLOC, &sg))
|
||||
if (drmIoctl(fd, DRM_IOCTL_SG_ALLOC, &sg))
|
||||
return -errno;
|
||||
*handle = sg.handle;
|
||||
return 0;
|
||||
|
|
@ -1840,7 +1853,7 @@ int drmScatterGatherFree(int fd, drm_handle_t handle)
|
|||
|
||||
sg.size = 0;
|
||||
sg.handle = handle;
|
||||
if (ioctl(fd, DRM_IOCTL_SG_FREE, &sg))
|
||||
if (drmIoctl(fd, DRM_IOCTL_SG_FREE, &sg))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1861,7 +1874,7 @@ int drmWaitVBlank(int fd, drmVBlankPtr vbl)
|
|||
int ret;
|
||||
|
||||
do {
|
||||
ret = ioctl(fd, DRM_IOCTL_WAIT_VBLANK, vbl);
|
||||
ret = drmIoctl(fd, DRM_IOCTL_WAIT_VBLANK, vbl);
|
||||
vbl->request.type &= ~DRM_VBLANK_RELATIVE;
|
||||
} while (ret && errno == EINTR);
|
||||
|
||||
|
|
@ -1911,7 +1924,7 @@ int drmCtlInstHandler(int fd, int irq)
|
|||
|
||||
ctl.func = DRM_INST_HANDLER;
|
||||
ctl.irq = irq;
|
||||
if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl))
|
||||
if (drmIoctl(fd, DRM_IOCTL_CONTROL, &ctl))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1934,7 +1947,7 @@ int drmCtlUninstHandler(int fd)
|
|||
|
||||
ctl.func = DRM_UNINST_HANDLER;
|
||||
ctl.irq = 0;
|
||||
if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl))
|
||||
if (drmIoctl(fd, DRM_IOCTL_CONTROL, &ctl))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1951,7 +1964,7 @@ int drmFinish(int fd, int context, drmLockFlags flags)
|
|||
if (flags & DRM_LOCK_FLUSH_ALL) lock.flags |= _DRM_LOCK_FLUSH_ALL;
|
||||
if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES;
|
||||
if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES;
|
||||
if (ioctl(fd, DRM_IOCTL_FINISH, &lock))
|
||||
if (drmIoctl(fd, DRM_IOCTL_FINISH, &lock))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1977,7 +1990,7 @@ int drmGetInterruptFromBusID(int fd, int busnum, int devnum, int funcnum)
|
|||
p.busnum = busnum;
|
||||
p.devnum = devnum;
|
||||
p.funcnum = funcnum;
|
||||
if (ioctl(fd, DRM_IOCTL_IRQ_BUSID, &p))
|
||||
if (drmIoctl(fd, DRM_IOCTL_IRQ_BUSID, &p))
|
||||
return -errno;
|
||||
return p.irq;
|
||||
}
|
||||
|
|
@ -2019,7 +2032,7 @@ int drmAddContextPrivateMapping(int fd, drm_context_t ctx_id,
|
|||
map.ctx_id = ctx_id;
|
||||
map.handle = (void *)handle;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map))
|
||||
if (drmIoctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -2031,7 +2044,7 @@ int drmGetContextPrivateMapping(int fd, drm_context_t ctx_id,
|
|||
|
||||
map.ctx_id = ctx_id;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map))
|
||||
if (drmIoctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map))
|
||||
return -errno;
|
||||
if (handle)
|
||||
*handle = (drm_handle_t)map.handle;
|
||||
|
|
@ -2046,7 +2059,7 @@ int drmGetMap(int fd, int idx, drm_handle_t *offset, drmSize *size,
|
|||
drm_map_t map;
|
||||
|
||||
map.offset = idx;
|
||||
if (ioctl(fd, DRM_IOCTL_GET_MAP, &map))
|
||||
if (drmIoctl(fd, DRM_IOCTL_GET_MAP, &map))
|
||||
return -errno;
|
||||
*offset = map.offset;
|
||||
*size = map.size;
|
||||
|
|
@ -2063,7 +2076,7 @@ int drmGetClient(int fd, int idx, int *auth, int *pid, int *uid,
|
|||
drm_client_t client;
|
||||
|
||||
client.idx = idx;
|
||||
if (ioctl(fd, DRM_IOCTL_GET_CLIENT, &client))
|
||||
if (drmIoctl(fd, DRM_IOCTL_GET_CLIENT, &client))
|
||||
return -errno;
|
||||
*auth = client.auth;
|
||||
*pid = client.pid;
|
||||
|
|
@ -2078,7 +2091,7 @@ int drmGetStats(int fd, drmStatsT *stats)
|
|||
drm_stats_t s;
|
||||
int i;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_GET_STATS, &s))
|
||||
if (drmIoctl(fd, DRM_IOCTL_GET_STATS, &s))
|
||||
return -errno;
|
||||
|
||||
stats->count = 0;
|
||||
|
|
@ -2220,7 +2233,7 @@ int drmSetInterfaceVersion(int fd, drmSetVersion *version)
|
|||
sv.drm_dd_major = version->drm_dd_major;
|
||||
sv.drm_dd_minor = version->drm_dd_minor;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_SET_VERSION, &sv)) {
|
||||
if (drmIoctl(fd, DRM_IOCTL_SET_VERSION, &sv)) {
|
||||
retcode = -errno;
|
||||
}
|
||||
|
||||
|
|
@ -2251,7 +2264,7 @@ int drmCommandNone(int fd, unsigned long drmCommandIndex)
|
|||
|
||||
request = DRM_IO( DRM_COMMAND_BASE + drmCommandIndex);
|
||||
|
||||
if (ioctl(fd, request, data)) {
|
||||
if (drmIoctl(fd, request, data)) {
|
||||
return -errno;
|
||||
}
|
||||
return 0;
|
||||
|
|
@ -2280,7 +2293,7 @@ int drmCommandRead(int fd, unsigned long drmCommandIndex, void *data,
|
|||
request = DRM_IOC( DRM_IOC_READ, DRM_IOCTL_BASE,
|
||||
DRM_COMMAND_BASE + drmCommandIndex, size);
|
||||
|
||||
if (ioctl(fd, request, data)) {
|
||||
if (drmIoctl(fd, request, data)) {
|
||||
return -errno;
|
||||
}
|
||||
return 0;
|
||||
|
|
@ -2309,7 +2322,7 @@ int drmCommandWrite(int fd, unsigned long drmCommandIndex, void *data,
|
|||
request = DRM_IOC( DRM_IOC_WRITE, DRM_IOCTL_BASE,
|
||||
DRM_COMMAND_BASE + drmCommandIndex, size);
|
||||
|
||||
if (ioctl(fd, request, data)) {
|
||||
if (drmIoctl(fd, request, data)) {
|
||||
return -errno;
|
||||
}
|
||||
return 0;
|
||||
|
|
@ -2338,9 +2351,8 @@ int drmCommandWriteRead(int fd, unsigned long drmCommandIndex, void *data,
|
|||
request = DRM_IOC( DRM_IOC_READ|DRM_IOC_WRITE, DRM_IOCTL_BASE,
|
||||
DRM_COMMAND_BASE + drmCommandIndex, size);
|
||||
|
||||
if (ioctl(fd, request, data)) {
|
||||
if (drmIoctl(fd, request, data))
|
||||
return -errno;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -2362,7 +2374,7 @@ int drmFenceCreate(int fd, unsigned flags, int fence_class, unsigned type,
|
|||
arg.type = type;
|
||||
arg.fence_class = fence_class;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_FENCE_CREATE, &arg))
|
||||
if (drmIoctl(fd, DRM_IOCTL_FENCE_CREATE, &arg))
|
||||
return -errno;
|
||||
fence->handle = arg.handle;
|
||||
fence->fence_class = arg.fence_class;
|
||||
|
|
@ -2386,7 +2398,7 @@ int drmFenceBuffers(int fd, unsigned flags, uint32_t fence_class, drmFence *fenc
|
|||
arg.flags = flags;
|
||||
arg.fence_class = fence_class;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_FENCE_BUFFERS, &arg))
|
||||
if (drmIoctl(fd, DRM_IOCTL_FENCE_BUFFERS, &arg))
|
||||
return -errno;
|
||||
fence->handle = arg.handle;
|
||||
fence->fence_class = arg.fence_class;
|
||||
|
|
@ -2404,7 +2416,7 @@ int drmFenceReference(int fd, unsigned handle, drmFence *fence)
|
|||
memset(&arg, 0, sizeof(arg));
|
||||
arg.handle = handle;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_FENCE_REFERENCE, &arg))
|
||||
if (drmIoctl(fd, DRM_IOCTL_FENCE_REFERENCE, &arg))
|
||||
return -errno;
|
||||
fence->handle = arg.handle;
|
||||
fence->fence_class = arg.fence_class;
|
||||
|
|
@ -2421,7 +2433,7 @@ int drmFenceUnreference(int fd, const drmFence *fence)
|
|||
memset(&arg, 0, sizeof(arg));
|
||||
arg.handle = fence->handle;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_FENCE_UNREFERENCE, &arg))
|
||||
if (drmIoctl(fd, DRM_IOCTL_FENCE_UNREFERENCE, &arg))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -2434,7 +2446,7 @@ int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type)
|
|||
arg.handle = fence->handle;
|
||||
arg.type = flush_type;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_FENCE_FLUSH, &arg))
|
||||
if (drmIoctl(fd, DRM_IOCTL_FENCE_FLUSH, &arg))
|
||||
return -errno;
|
||||
fence->fence_class = arg.fence_class;
|
||||
fence->type = arg.type;
|
||||
|
|
@ -2449,7 +2461,7 @@ int drmFenceUpdate(int fd, drmFence *fence)
|
|||
memset(&arg, 0, sizeof(arg));
|
||||
arg.handle = fence->handle;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_FENCE_SIGNALED, &arg))
|
||||
if (drmIoctl(fd, DRM_IOCTL_FENCE_SIGNALED, &arg))
|
||||
return -errno;
|
||||
fence->fence_class = arg.fence_class;
|
||||
fence->type = arg.type;
|
||||
|
|
@ -2489,7 +2501,7 @@ int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type)
|
|||
arg.handle = fence->handle;
|
||||
arg.type = emit_type;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_FENCE_EMIT, &arg))
|
||||
if (drmIoctl(fd, DRM_IOCTL_FENCE_EMIT, &arg))
|
||||
return -errno;
|
||||
fence->fence_class = arg.fence_class;
|
||||
fence->type = arg.type;
|
||||
|
|
@ -2527,7 +2539,7 @@ drmIoctlTimeout(int fd, unsigned long request, void *argp)
|
|||
int ret;
|
||||
|
||||
do {
|
||||
ret = ioctl(fd, request, argp);
|
||||
ret = drmIoctl(fd, request, argp);
|
||||
if (ret != 0 && errno == EAGAIN) {
|
||||
if (!haveThen) {
|
||||
gettimeofday(&then, NULL);
|
||||
|
|
@ -2637,7 +2649,7 @@ int drmBOReference(int fd, unsigned handle, drmBO *buf)
|
|||
memset(&arg, 0, sizeof(arg));
|
||||
req->handle = handle;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_BO_REFERENCE, &arg))
|
||||
if (drmIoctl(fd, DRM_IOCTL_BO_REFERENCE, &arg))
|
||||
return -errno;
|
||||
|
||||
drmBOCopyReply(rep, buf);
|
||||
|
|
@ -2661,7 +2673,7 @@ int drmBOUnreference(int fd, drmBO *buf)
|
|||
memset(&arg, 0, sizeof(arg));
|
||||
arg.handle = buf->handle;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_BO_UNREFERENCE, &arg))
|
||||
if (drmIoctl(fd, DRM_IOCTL_BO_UNREFERENCE, &arg))
|
||||
return -errno;
|
||||
|
||||
buf->handle = 0;
|
||||
|
|
@ -2731,7 +2743,7 @@ int drmBOUnmap(int fd, drmBO *buf)
|
|||
memset(&arg, 0, sizeof(arg));
|
||||
arg.handle = buf->handle;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_BO_UNMAP, &arg)) {
|
||||
if (drmIoctl(fd, DRM_IOCTL_BO_UNMAP, &arg)) {
|
||||
return -errno;
|
||||
}
|
||||
buf->mapCount--;
|
||||
|
|
@ -2777,7 +2789,7 @@ int drmBOInfo(int fd, drmBO *buf)
|
|||
memset(&arg, 0, sizeof(arg));
|
||||
req->handle = buf->handle;
|
||||
|
||||
ret = ioctl(fd, DRM_IOCTL_BO_INFO, &arg);
|
||||
ret = drmIoctl(fd, DRM_IOCTL_BO_INFO, &arg);
|
||||
if (ret)
|
||||
return -errno;
|
||||
|
||||
|
|
@ -2832,7 +2844,7 @@ int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
|
|||
arg.p_size = pSize;
|
||||
arg.mem_type = memType;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
|
||||
if (drmIoctl(fd, DRM_IOCTL_MM_INIT, &arg))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -2844,7 +2856,7 @@ int drmMMTakedown(int fd, unsigned memType)
|
|||
memset(&arg, 0, sizeof(arg));
|
||||
arg.mem_type = memType;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_MM_TAKEDOWN, &arg))
|
||||
if (drmIoctl(fd, DRM_IOCTL_MM_TAKEDOWN, &arg))
|
||||
return -errno;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -2886,7 +2898,7 @@ int drmMMInfo(int fd, unsigned memType, uint64_t *size)
|
|||
|
||||
arg.mem_type = memType;
|
||||
|
||||
if (ioctl(fd, DRM_IOCTL_MM_INFO, &arg))
|
||||
if (drmIoctl(fd, DRM_IOCTL_MM_INFO, &arg))
|
||||
return -errno;
|
||||
|
||||
*size = arg.p_size;
|
||||
|
|
@ -2901,7 +2913,7 @@ int drmBOVersion(int fd, unsigned int *major,
|
|||
int ret;
|
||||
|
||||
memset(&arg, 0, sizeof(arg));
|
||||
ret = ioctl(fd, DRM_IOCTL_BO_VERSION, &arg);
|
||||
ret = drmIoctl(fd, DRM_IOCTL_BO_VERSION, &arg);
|
||||
if (ret)
|
||||
return -errno;
|
||||
|
||||
|
|
|
|||
|
|
@ -30,7 +30,6 @@
|
|||
#
|
||||
# make DRM_MODULES="r128 radeon"
|
||||
#
|
||||
DRM_MODULES=i915
|
||||
|
||||
SHELL=/bin/sh
|
||||
|
||||
|
|
@ -118,7 +117,7 @@ V := $(shell if [ -f $(BOOTVERSION_PREFIX)version.h ]; then \
|
|||
|
||||
ifeq ($(V),"$(RUNNING_REL)")
|
||||
HEADERFROMBOOT := 1
|
||||
GETCONFIG := MAKEFILES=$(shell pwd)/.config
|
||||
GETCONFIG := MAKEFILES=$(shell /bin/pwd)/.config
|
||||
HAVECONFIG := y
|
||||
endif
|
||||
|
||||
|
|
@ -165,7 +164,7 @@ endif
|
|||
all: modules
|
||||
|
||||
modules: includes
|
||||
+make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules
|
||||
+make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`/bin/pwd` DRMSRCDIR=`/bin/pwd` modules
|
||||
|
||||
ifeq ($(HEADERFROMBOOT),1)
|
||||
|
||||
|
|
@ -241,7 +240,7 @@ drmstat: drmstat.c
|
|||
$(CC) $(PRGCFLAGS) $< -o $@ $(DRMSTATLIBS)
|
||||
|
||||
install:
|
||||
make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules_install
|
||||
make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`/bin/pwd` DRMSRCDIR=`/bin/pwd` modules_install
|
||||
|
||||
else
|
||||
|
||||
|
|
|
|||
|
|
@ -819,21 +819,6 @@ struct drm_driver {
|
|||
int (*gem_init_object) (struct drm_gem_object *obj);
|
||||
void (*gem_free_object) (struct drm_gem_object *obj);
|
||||
|
||||
/**
|
||||
* Driver-specific callback to set memory domains from userspace
|
||||
*/
|
||||
int (*gem_set_domain) (struct drm_gem_object *obj,
|
||||
struct drm_file *file_priv,
|
||||
uint32_t read_domains,
|
||||
uint32_t write_domain);
|
||||
|
||||
/**
|
||||
* Driver-specific callback to flush pwrite through chipset
|
||||
*/
|
||||
int (*gem_flush_pwrite) (struct drm_gem_object *obj,
|
||||
uint64_t offset,
|
||||
uint64_t size);
|
||||
|
||||
struct drm_fence_driver *fence_driver;
|
||||
struct drm_bo_driver *bo_driver;
|
||||
|
||||
|
|
@ -1037,6 +1022,12 @@ struct drm_device {
|
|||
spinlock_t object_name_lock;
|
||||
struct idr object_name_idr;
|
||||
atomic_t object_count;
|
||||
atomic_t object_memory;
|
||||
atomic_t pin_count;
|
||||
atomic_t pin_memory;
|
||||
atomic_t gtt_count;
|
||||
atomic_t gtt_memory;
|
||||
uint32_t gtt_total;
|
||||
uint32_t invalidate_domains; /* domains pending invalidation */
|
||||
uint32_t flush_domains; /* domains pending flush */
|
||||
/*@} */
|
||||
|
|
@ -1252,10 +1243,6 @@ extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
|
|||
extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
|
||||
extern void drm_idlelock_take(struct drm_lock_data *lock_data);
|
||||
extern void drm_idlelock_release(struct drm_lock_data *lock_data);
|
||||
extern int drm_client_lock_take(struct drm_device *dev,
|
||||
struct drm_file *file_priv);
|
||||
extern void drm_client_lock_release(struct drm_device *dev,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
/*
|
||||
* These are exported to drivers so that they can implement fencing using
|
||||
|
|
@ -1472,6 +1459,11 @@ static inline void drm_gem_object_unreference(struct drm_gem_object *obj)
|
|||
kref_put (&obj->refcount, drm_gem_object_free);
|
||||
}
|
||||
|
||||
int
|
||||
drm_gem_handle_create(struct drm_file *file_priv,
|
||||
struct drm_gem_object *obj,
|
||||
int *handlep);
|
||||
|
||||
static inline void drm_gem_object_handle_reference (struct drm_gem_object *obj)
|
||||
{
|
||||
drm_gem_object_reference (obj);
|
||||
|
|
@ -1495,37 +1487,16 @@ static inline void drm_gem_object_handle_unreference (struct drm_gem_object *obj
|
|||
struct drm_gem_object *
|
||||
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
|
||||
int handle);
|
||||
int drm_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int drm_gem_close_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int drm_gem_pread_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int drm_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int drm_gem_open_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int drm_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
|
||||
void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
|
||||
|
||||
|
||||
/*
|
||||
* Given the new read/write domains for an object,
|
||||
* compute the invalidate/flush domains for the whole device.
|
||||
*
|
||||
*/
|
||||
int drm_gem_object_set_domain (struct drm_gem_object *object,
|
||||
uint32_t read_domains,
|
||||
uint32_t write_domains);
|
||||
|
||||
|
||||
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
|
||||
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
|
||||
|
||||
|
|
|
|||
|
|
@ -175,15 +175,6 @@ static struct drm_ioctl_desc drm_ioctls[] = {
|
|||
DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
|
||||
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0),
|
||||
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CREATE, drm_gem_create_ioctl, 0),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GEM_PREAD, drm_gem_pread_ioctl, 0),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GEM_PWRITE, drm_gem_pwrite_ioctl, 0),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GEM_MMAP, drm_gem_mmap_ioctl, 0),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GEM_SET_DOMAIN, drm_gem_set_domain_ioctl, DRM_AUTH),
|
||||
};
|
||||
|
||||
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
|
||||
|
|
|
|||
|
|
@ -74,6 +74,11 @@ drm_gem_init(struct drm_device *dev)
|
|||
spin_lock_init(&dev->object_name_lock);
|
||||
idr_init(&dev->object_name_idr);
|
||||
atomic_set(&dev->object_count, 0);
|
||||
atomic_set(&dev->object_memory, 0);
|
||||
atomic_set(&dev->pin_count, 0);
|
||||
atomic_set(&dev->pin_memory, 0);
|
||||
atomic_set(&dev->gtt_count, 0);
|
||||
atomic_set(&dev->gtt_memory, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -99,15 +104,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
|
|||
kref_init(&obj->refcount);
|
||||
kref_init(&obj->handlecount);
|
||||
obj->size = size;
|
||||
|
||||
/*
|
||||
* We've just allocated pages from the kernel,
|
||||
* so they've just been written by the CPU with
|
||||
* zeros. They'll need to be clflushed before we
|
||||
* use them with the GPU.
|
||||
*/
|
||||
obj->write_domain = DRM_GEM_DOMAIN_CPU;
|
||||
obj->read_domains = DRM_GEM_DOMAIN_CPU;
|
||||
if (dev->driver->gem_init_object != NULL &&
|
||||
dev->driver->gem_init_object(obj) != 0) {
|
||||
fput(obj->filp);
|
||||
|
|
@ -115,55 +111,17 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
|
|||
return NULL;
|
||||
}
|
||||
atomic_inc(&dev->object_count);
|
||||
atomic_add(obj->size, &dev->object_memory);
|
||||
return obj;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_object_alloc);
|
||||
|
||||
/**
|
||||
* Removes the mapping from handle to filp for this object.
|
||||
*/
|
||||
static int
|
||||
drm_gem_handle_delete(struct drm_file *filp, int handle)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct drm_gem_object *obj;
|
||||
|
||||
/* This is gross. The idr system doesn't let us try a delete and
|
||||
* return an error code. It just spews if you fail at deleting.
|
||||
* So, we have to grab a lock around finding the object and then
|
||||
* doing the delete on it and dropping the refcount, or the user
|
||||
* could race us to double-decrement the refcount and cause a
|
||||
* use-after-free later. Given the frequency of our handle lookups,
|
||||
* we may want to use ida for number allocation and a hash table
|
||||
* for the pointers, anyway.
|
||||
*/
|
||||
spin_lock(&filp->table_lock);
|
||||
|
||||
/* Check if we currently have a reference on the object */
|
||||
obj = idr_find(&filp->object_idr, handle);
|
||||
if (obj == NULL) {
|
||||
spin_unlock(&filp->table_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev = obj->dev;
|
||||
|
||||
/* Release reference and decrement refcount. */
|
||||
idr_remove(&filp->object_idr, handle);
|
||||
spin_unlock(&filp->table_lock);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_handle_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a handle for this object. This adds a handle reference
|
||||
* to the object, which includes a regular reference count. Callers
|
||||
* will likely want to dereference the object afterwards.
|
||||
*/
|
||||
static int
|
||||
int
|
||||
drm_gem_handle_create(struct drm_file *file_priv,
|
||||
struct drm_gem_object *obj,
|
||||
int *handlep)
|
||||
|
|
@ -191,6 +149,7 @@ again:
|
|||
drm_gem_object_handle_reference(obj);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_handle_create);
|
||||
|
||||
/** Returns a reference to the object named by the handle. */
|
||||
struct drm_gem_object *
|
||||
|
|
@ -216,334 +175,6 @@ drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_gem_object_lookup);
|
||||
|
||||
/**
|
||||
* Creates a new mm object and returns a handle to it.
|
||||
*/
|
||||
int
|
||||
drm_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_gem_create *args = data;
|
||||
struct drm_gem_object *obj;
|
||||
int handle, ret;
|
||||
|
||||
if (!(dev->driver->driver_features & DRIVER_GEM))
|
||||
return -ENODEV;
|
||||
|
||||
args->size = roundup(args->size, PAGE_SIZE);
|
||||
|
||||
/* Allocate the new object */
|
||||
obj = drm_gem_object_alloc(dev, args->size);
|
||||
if (obj == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = drm_gem_handle_create(file_priv, obj, &handle);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_handle_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
args->handle = handle;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Releases the handle to an mm object.
|
||||
*/
|
||||
int
|
||||
drm_gem_close_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_gem_close *args = data;
|
||||
int ret;
|
||||
|
||||
if (!(dev->driver->driver_features & DRIVER_GEM))
|
||||
return -ENODEV;
|
||||
|
||||
ret = drm_gem_handle_delete(file_priv, args->handle);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads data from the object referenced by handle.
|
||||
*
|
||||
* On error, the contents of *data are undefined.
|
||||
*/
|
||||
int
|
||||
drm_gem_pread_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_gem_pread *args = data;
|
||||
struct drm_gem_object *obj;
|
||||
ssize_t read;
|
||||
loff_t offset;
|
||||
int ret;
|
||||
|
||||
if (!(dev->driver->driver_features & DRIVER_GEM))
|
||||
return -ENODEV;
|
||||
|
||||
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
||||
if (obj == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (dev->driver->gem_set_domain) {
|
||||
ret = dev->driver->gem_set_domain(obj, file_priv,
|
||||
DRM_GEM_DOMAIN_CPU,
|
||||
0);
|
||||
if (ret) {
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
offset = args->offset;
|
||||
|
||||
read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
|
||||
args->size, &offset);
|
||||
if (read != args->size) {
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (read < 0)
|
||||
return read;
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Maps the contents of an object, returning the address it is mapped
|
||||
* into.
|
||||
*
|
||||
* While the mapping holds a reference on the contents of the object, it doesn't
|
||||
* imply a ref on the object itself.
|
||||
*/
|
||||
int
|
||||
drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_gem_mmap *args = data;
|
||||
struct drm_gem_object *obj;
|
||||
loff_t offset;
|
||||
unsigned long addr;
|
||||
|
||||
if (!(dev->driver->driver_features & DRIVER_GEM))
|
||||
return -ENODEV;
|
||||
|
||||
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
||||
if (obj == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
offset = args->offset;
|
||||
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
addr = do_mmap(obj->filp, 0, args->size,
|
||||
PROT_READ | PROT_WRITE, MAP_SHARED,
|
||||
args->offset);
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (IS_ERR((void *)addr))
|
||||
return addr;
|
||||
|
||||
args->addr_ptr = (uint64_t) addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes data to the object referenced by handle.
|
||||
*
|
||||
* On error, the contents of the buffer that were to be modified are undefined.
|
||||
*/
|
||||
int
|
||||
drm_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_gem_pwrite *args = data;
|
||||
struct drm_gem_object *obj;
|
||||
ssize_t written;
|
||||
loff_t offset;
|
||||
int ret;
|
||||
|
||||
if (!(dev->driver->driver_features & DRIVER_GEM))
|
||||
return -ENODEV;
|
||||
|
||||
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
||||
if (obj == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (dev->driver->gem_set_domain) {
|
||||
ret = dev->driver->gem_set_domain(obj, file_priv,
|
||||
DRM_GEM_DOMAIN_CPU,
|
||||
DRM_GEM_DOMAIN_CPU);
|
||||
if (ret) {
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
offset = args->offset;
|
||||
|
||||
written = vfs_write(obj->filp,
|
||||
(char __user *)(uintptr_t) args->data_ptr,
|
||||
args->size, &offset);
|
||||
|
||||
if (written != args->size) {
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (written < 0)
|
||||
return written;
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev->driver->gem_flush_pwrite)
|
||||
dev->driver->gem_flush_pwrite(obj,
|
||||
args->offset,
|
||||
args->size);
|
||||
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a global name for an object, returning the name.
|
||||
*
|
||||
* Note that the name does not hold a reference; when the object
|
||||
* is freed, the name goes away.
|
||||
*/
|
||||
int
|
||||
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_gem_flink *args = data;
|
||||
struct drm_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
if (!(dev->driver->driver_features & DRIVER_GEM))
|
||||
return -ENODEV;
|
||||
|
||||
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
||||
if (obj == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
again:
|
||||
if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&dev->object_name_lock);
|
||||
if (obj->name) {
|
||||
spin_unlock(&dev->object_name_lock);
|
||||
return -EEXIST;
|
||||
}
|
||||
ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
|
||||
&obj->name);
|
||||
spin_unlock(&dev->object_name_lock);
|
||||
if (ret == -EAGAIN)
|
||||
goto again;
|
||||
|
||||
if (ret != 0) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Leave the reference from the lookup around as the
|
||||
* name table now holds one
|
||||
*/
|
||||
args->name = (uint64_t) obj->name;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Open an object using the global name, returning a handle and the size.
|
||||
*
|
||||
* This handle (of course) holds a reference to the object, so the object
|
||||
* will not go away until the handle is deleted.
|
||||
*/
|
||||
int
|
||||
drm_gem_open_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_gem_open *args = data;
|
||||
struct drm_gem_object *obj;
|
||||
int ret;
|
||||
int handle;
|
||||
|
||||
if (!(dev->driver->driver_features & DRIVER_GEM))
|
||||
return -ENODEV;
|
||||
|
||||
spin_lock(&dev->object_name_lock);
|
||||
obj = idr_find(&dev->object_name_idr, (int) args->name);
|
||||
if (obj)
|
||||
drm_gem_object_reference(obj);
|
||||
spin_unlock(&dev->object_name_lock);
|
||||
if (!obj)
|
||||
return -ENOENT;
|
||||
|
||||
ret = drm_gem_handle_create(file_priv, obj, &handle);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
args->handle = handle;
|
||||
args->size = obj->size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when user space prepares to use an object
|
||||
*/
|
||||
int
|
||||
drm_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_gem_set_domain *args = data;
|
||||
struct drm_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
if (!(dev->driver->driver_features & DRIVER_GEM))
|
||||
return -ENODEV;
|
||||
|
||||
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
||||
if (obj == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (dev->driver->gem_set_domain) {
|
||||
ret = dev->driver->gem_set_domain(obj, file_priv,
|
||||
args->read_domains,
|
||||
args->write_domain);
|
||||
} else {
|
||||
obj->read_domains = args->read_domains;
|
||||
obj->write_domain = args->write_domain;
|
||||
ret = 0;
|
||||
}
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Called at device open time, sets up the structure for handling refcounting
|
||||
* of mm objects.
|
||||
|
|
@ -603,6 +234,7 @@ drm_gem_object_free(struct kref *kref)
|
|||
|
||||
fput(obj->filp);
|
||||
atomic_dec(&dev->object_count);
|
||||
atomic_sub(obj->size, &dev->object_memory);
|
||||
kfree(obj);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_object_free);
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
|
|||
p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
|
||||
return -EINVAL;
|
||||
|
||||
p->irq = dev->irq;
|
||||
p->irq = dev->pdev->irq;
|
||||
|
||||
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
|
||||
p->irq);
|
||||
|
|
@ -285,7 +285,7 @@ int drm_irq_install(struct drm_device * dev)
|
|||
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
|
||||
return -EINVAL;
|
||||
|
||||
if (dev->irq == 0)
|
||||
if (dev->pdev->irq == 0)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
|
@ -303,7 +303,7 @@ int drm_irq_install(struct drm_device * dev)
|
|||
dev->irq_enabled = 1;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
DRM_DEBUG("irq=%d\n", dev->irq);
|
||||
DRM_DEBUG("irq=%d\n", dev->pdev->irq);
|
||||
|
||||
/* Before installing handler */
|
||||
dev->driver->irq_preinstall(dev);
|
||||
|
|
@ -312,7 +312,7 @@ int drm_irq_install(struct drm_device * dev)
|
|||
if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
|
||||
sh_flags = IRQF_SHARED;
|
||||
|
||||
ret = request_irq(dev->irq, dev->driver->irq_handler,
|
||||
ret = request_irq(dev->pdev->irq, dev->driver->irq_handler,
|
||||
sh_flags, dev->devname, dev);
|
||||
if (ret < 0) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
|
@ -320,6 +320,10 @@ int drm_irq_install(struct drm_device * dev)
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
/* Expose the device irq to device drivers that want to export it for
|
||||
* whatever reason.
|
||||
*/
|
||||
dev->irq = dev->pdev->irq;
|
||||
|
||||
/* After installing handler */
|
||||
ret = dev->driver->irq_postinstall(dev);
|
||||
|
|
@ -355,11 +359,11 @@ int drm_irq_uninstall(struct drm_device * dev)
|
|||
if (!irq_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
DRM_DEBUG("irq=%d\n", dev->irq);
|
||||
DRM_DEBUG("irq=%d\n", dev->pdev->irq);
|
||||
|
||||
dev->driver->irq_uninstall(dev);
|
||||
|
||||
free_irq(dev->irq, dev);
|
||||
free_irq(dev->pdev->irq, dev);
|
||||
|
||||
drm_vblank_cleanup(dev);
|
||||
|
||||
|
|
@ -397,7 +401,7 @@ int drm_control(struct drm_device *dev, void *data,
|
|||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return 0;
|
||||
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
|
||||
ctl->irq != dev->irq)
|
||||
ctl->irq != dev->pdev->irq)
|
||||
return -EINVAL;
|
||||
return drm_irq_install(dev);
|
||||
case DRM_UNINST_HANDLER:
|
||||
|
|
@ -580,7 +584,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
|
|||
int ret = 0;
|
||||
unsigned int flags, seq, crtc;
|
||||
|
||||
if ((!dev->irq) || (!dev->irq_enabled))
|
||||
if ((!dev->pdev->irq) || (!dev->irq_enabled))
|
||||
return -EINVAL;
|
||||
|
||||
if (vblwait->request.type &
|
||||
|
|
|
|||
|
|
@ -384,65 +384,6 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_idlelock_release);
|
||||
|
||||
/**
|
||||
* Takes the lock on behalf of the client if needed, using the kernel context.
|
||||
*
|
||||
* This allows us to hide the hardware lock when it's required for protection
|
||||
* of data structures (such as command ringbuffer) shared with the X Server, an
|
||||
|
||||
* a way for us to transition to lockless for those requests when the X Server
|
||||
* stops accessing the ringbuffer directly, without having to update the
|
||||
* other userland clients.
|
||||
*/
|
||||
int drm_client_lock_take(struct drm_device *dev, struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_master *master = file_priv->master;
|
||||
int ret;
|
||||
unsigned long irqflags;
|
||||
|
||||
/* If the client has the lock, we're already done. */
|
||||
if (drm_i_have_hw_lock(dev, file_priv))
|
||||
return 0;
|
||||
|
||||
mutex_unlock (&dev->struct_mutex);
|
||||
/* Client doesn't hold the lock. Block taking the lock with the kernel
|
||||
* context on behalf of the client, and return whether we were
|
||||
* successful.
|
||||
*/
|
||||
spin_lock_irqsave(&master->lock.spinlock, irqflags);
|
||||
master->lock.user_waiters++;
|
||||
spin_unlock_irqrestore(&master->lock.spinlock, irqflags);
|
||||
ret = wait_event_interruptible(master->lock.lock_queue,
|
||||
drm_lock_take(&master->lock,
|
||||
DRM_KERNEL_CONTEXT));
|
||||
spin_lock_irqsave(&master->lock.spinlock, irqflags);
|
||||
master->lock.user_waiters--;
|
||||
if (ret != 0) {
|
||||
spin_unlock_irqrestore(&master->lock.spinlock, irqflags);
|
||||
} else {
|
||||
master->lock.file_priv = file_priv;
|
||||
master->lock.lock_time = jiffies;
|
||||
master->lock.kernel_held = 1;
|
||||
file_priv->lock_count++;
|
||||
spin_unlock_irqrestore(&master->lock.spinlock, irqflags);
|
||||
}
|
||||
mutex_lock (&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_client_lock_take);
|
||||
|
||||
void drm_client_lock_release(struct drm_device *dev, struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_master *master = file_priv->master;
|
||||
|
||||
if (master->lock.kernel_held) {
|
||||
master->lock.kernel_held = 0;
|
||||
master->lock.file_priv = NULL;
|
||||
drm_lock_free(&master->lock, DRM_KERNEL_CONTEXT);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_client_lock_release);
|
||||
|
||||
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_master *master = file_priv->master;
|
||||
|
|
|
|||
|
|
@ -658,7 +658,12 @@ static int drm_gem_object_info(char *buf, char **start, off_t offset,
|
|||
|
||||
*start = &buf[offset];
|
||||
*eof = 0;
|
||||
DRM_PROC_PRINT ("%d objects\n", atomic_read (&dev->object_count));
|
||||
DRM_PROC_PRINT("%d objects\n", atomic_read (&dev->object_count));
|
||||
DRM_PROC_PRINT("%d object bytes\n", atomic_read (&dev->object_memory));
|
||||
DRM_PROC_PRINT("%d pinned\n", atomic_read (&dev->pin_count));
|
||||
DRM_PROC_PRINT("%d pin bytes\n", atomic_read (&dev->pin_memory));
|
||||
DRM_PROC_PRINT("%d gtt bytes\n", atomic_read (&dev->gtt_memory));
|
||||
DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
|
||||
if (len > request + offset)
|
||||
return request;
|
||||
*eof = 1;
|
||||
|
|
|
|||
|
|
@ -569,6 +569,8 @@ static int i915_resume(struct drm_device *dev)
|
|||
}
|
||||
|
||||
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
|
||||
static void remove(struct pci_dev *pdev);
|
||||
|
||||
static struct drm_driver driver = {
|
||||
/* don't use mtrr's here, the Xserver or user space app should
|
||||
* deal with them for intel hardware.
|
||||
|
|
@ -579,8 +581,10 @@ static struct drm_driver driver = {
|
|||
.load = i915_driver_load,
|
||||
.unload = i915_driver_unload,
|
||||
.firstopen = i915_driver_firstopen,
|
||||
.open = i915_driver_open,
|
||||
.lastclose = i915_driver_lastclose,
|
||||
.preclose = i915_driver_preclose,
|
||||
.postclose = i915_driver_postclose,
|
||||
.suspend = i915_suspend,
|
||||
.resume = i915_resume,
|
||||
.device_is_agp = i915_driver_device_is_agp,
|
||||
|
|
@ -599,8 +603,6 @@ static struct drm_driver driver = {
|
|||
.ioctls = i915_ioctls,
|
||||
.gem_init_object = i915_gem_init_object,
|
||||
.gem_free_object = i915_gem_free_object,
|
||||
.gem_set_domain = i915_gem_set_domain,
|
||||
.gem_flush_pwrite = i915_gem_flush_pwrite,
|
||||
.fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
|
|
@ -617,7 +619,7 @@ static struct drm_driver driver = {
|
|||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
.probe = probe,
|
||||
.remove = __devexit_p(drm_cleanup_pci),
|
||||
.remove = remove,
|
||||
},
|
||||
#if defined(I915_HAVE_FENCE) && defined(I915_TTM)
|
||||
.fence_driver = &i915_fence_driver,
|
||||
|
|
@ -635,7 +637,28 @@ static struct drm_driver driver = {
|
|||
|
||||
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
return drm_get_dev(pdev, ent, &driver);
|
||||
int ret;
|
||||
|
||||
/* On the 945G/GM, the chipset reports the MSI capability on the
|
||||
* integrated graphics even though the support isn't actually there
|
||||
* according to the published specs. It doesn't appear to function
|
||||
* correctly in testing on 945G.
|
||||
* This may be a side effect of MSI having been made available for PEG
|
||||
* and the registers being closely associated.
|
||||
*/
|
||||
if (pdev->device != 0x2772 && pdev->device != 0x27A2)
|
||||
(void )pci_enable_msi(pdev);
|
||||
|
||||
ret = drm_get_dev(pdev, ent, &driver);
|
||||
if (ret && pdev->msi_enabled)
|
||||
pci_disable_msi(pdev);
|
||||
return ret;
|
||||
}
|
||||
static void remove(struct pci_dev *pdev)
|
||||
{
|
||||
if (pdev->msi_enabled)
|
||||
pci_disable_msi(pdev);
|
||||
drm_cleanup_pci(pdev);
|
||||
}
|
||||
|
||||
static int __init i915_init(void)
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -993,84 +993,6 @@ struct drm_mm_info_arg {
|
|||
uint64_t p_size;
|
||||
};
|
||||
|
||||
struct drm_gem_create {
|
||||
/**
|
||||
* Requested size for the object.
|
||||
*
|
||||
* The (page-aligned) allocated size for the object will be returned.
|
||||
*/
|
||||
uint64_t size;
|
||||
/**
|
||||
* Returned handle for the object.
|
||||
*
|
||||
* Object handles are nonzero.
|
||||
*/
|
||||
uint32_t handle;
|
||||
uint32_t pad;
|
||||
};
|
||||
|
||||
struct drm_gem_close {
|
||||
/** Handle of the object to be closed. */
|
||||
uint32_t handle;
|
||||
uint32_t pad;
|
||||
};
|
||||
|
||||
struct drm_gem_pread {
|
||||
/** Handle for the object being read. */
|
||||
uint32_t handle;
|
||||
uint32_t pad;
|
||||
/** Offset into the object to read from */
|
||||
uint64_t offset;
|
||||
/** Length of data to read */
|
||||
uint64_t size;
|
||||
/** Pointer to write the data into. */
|
||||
uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
|
||||
};
|
||||
|
||||
struct drm_gem_pwrite {
|
||||
/** Handle for the object being written to. */
|
||||
uint32_t handle;
|
||||
uint32_t pad;
|
||||
/** Offset into the object to write to */
|
||||
uint64_t offset;
|
||||
/** Length of data to write */
|
||||
uint64_t size;
|
||||
/** Pointer to read the data from. */
|
||||
uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
|
||||
};
|
||||
|
||||
struct drm_gem_mmap {
|
||||
/** Handle for the object being mapped. */
|
||||
uint32_t handle;
|
||||
uint32_t pad;
|
||||
/** Offset in the object to map. */
|
||||
uint64_t offset;
|
||||
/**
|
||||
* Length of data to map.
|
||||
*
|
||||
* The value will be page-aligned.
|
||||
*/
|
||||
uint64_t size;
|
||||
/** Returned pointer the data was mapped at */
|
||||
uint64_t addr_ptr; /* void *, but pointers are not 32/64 compatible */
|
||||
};
|
||||
|
||||
struct drm_gem_flink {
|
||||
/** Handle for the object being named */
|
||||
uint32_t handle;
|
||||
/** Returned global name */
|
||||
uint32_t name;
|
||||
};
|
||||
|
||||
struct drm_gem_open {
|
||||
/** Name of object being opened */
|
||||
uint32_t name;
|
||||
/** Returned handle for the object */
|
||||
uint32_t handle;
|
||||
/** Returned size of the object */
|
||||
uint64_t size;
|
||||
};
|
||||
|
||||
struct drm_gem_set_domain {
|
||||
/** Handle for the object */
|
||||
uint32_t handle;
|
||||
|
|
@ -1298,6 +1220,10 @@ struct drm_mode_crtc_lut {
|
|||
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
|
||||
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
|
||||
|
||||
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
|
||||
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
|
||||
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
|
||||
|
||||
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
|
||||
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
|
||||
#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
|
||||
|
|
@ -1348,15 +1274,6 @@ struct drm_mode_crtc_lut {
|
|||
|
||||
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
|
||||
|
||||
#define DRM_IOCTL_GEM_CREATE DRM_IOWR(0x09, struct drm_gem_create)
|
||||
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x0a, struct drm_gem_close)
|
||||
#define DRM_IOCTL_GEM_PREAD DRM_IOW (0x0b, struct drm_gem_pread)
|
||||
#define DRM_IOCTL_GEM_PWRITE DRM_IOW (0x0c, struct drm_gem_pwrite)
|
||||
#define DRM_IOCTL_GEM_MMAP DRM_IOWR(0x0d, struct drm_gem_mmap)
|
||||
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0e, struct drm_gem_flink)
|
||||
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0f, struct drm_gem_open)
|
||||
#define DRM_IOCTL_GEM_SET_DOMAIN DRM_IOW (0xb7, struct drm_gem_set_domain)
|
||||
|
||||
#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
|
||||
#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
|
||||
#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
|
||||
|
|
|
|||
|
|
@ -121,7 +121,7 @@ int i915_dma_cleanup(struct drm_device * dev)
|
|||
* may not have been called from userspace and after dev_private
|
||||
* is freed, it's too late.
|
||||
*/
|
||||
if (dev->irq)
|
||||
if (dev->irq_enabled)
|
||||
drm_irq_uninstall(dev);
|
||||
|
||||
if (dev_priv->ring.virtual_start) {
|
||||
|
|
@ -885,7 +885,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
|||
|
||||
switch (param->param) {
|
||||
case I915_PARAM_IRQ_ACTIVE:
|
||||
value = dev->irq ? 1 : 0;
|
||||
value = dev->irq_enabled ? 1 : 0;
|
||||
break;
|
||||
case I915_PARAM_ALLOW_BATCHBUFFER:
|
||||
value = dev_priv->allow_batchbuffer ? 1 : 0;
|
||||
|
|
@ -1053,9 +1053,8 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
|
|||
|
||||
memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
|
||||
I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset);
|
||||
DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
|
||||
dev_priv->hws_agpoffset);
|
||||
DRM_DEBUG("load hws at %p\n", dev_priv->hws_vaddr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -1089,6 +1088,11 @@ struct drm_ioctl_desc i915_ioctls[] = {
|
|||
DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
|
||||
DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
|
||||
DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
|
||||
DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
|
||||
DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
|
||||
};
|
||||
|
||||
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
|
||||
|
|
|
|||
|
|
@ -183,7 +183,12 @@ typedef struct drm_i915_sarea {
|
|||
#define DRM_I915_GEM_BUSY 0x17
|
||||
#define DRM_I915_GEM_THROTTLE 0x18
|
||||
#define DRM_I915_GEM_ENTERVT 0x19
|
||||
#define DRM_I915_GEM_LEAVEVT 0x20
|
||||
#define DRM_I915_GEM_LEAVEVT 0x1a
|
||||
#define DRM_I915_GEM_CREATE 0x1b
|
||||
#define DRM_I915_GEM_PREAD 0x1c
|
||||
#define DRM_I915_GEM_PWRITE 0x1d
|
||||
#define DRM_I915_GEM_MMAP 0x1e
|
||||
#define DRM_I915_GEM_SET_DOMAIN 0x1f
|
||||
|
||||
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
|
||||
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
|
||||
|
|
@ -211,6 +216,11 @@ typedef struct drm_i915_sarea {
|
|||
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
|
||||
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
|
||||
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
|
||||
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
|
||||
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
|
||||
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
|
||||
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
|
||||
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
|
||||
|
||||
/* Asynchronous page flipping:
|
||||
*/
|
||||
|
|
@ -428,6 +438,73 @@ struct drm_i915_gem_init {
|
|||
uint64_t gtt_end;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_create {
|
||||
/**
|
||||
* Requested size for the object.
|
||||
*
|
||||
* The (page-aligned) allocated size for the object will be returned.
|
||||
*/
|
||||
uint64_t size;
|
||||
/**
|
||||
* Returned handle for the object.
|
||||
*
|
||||
* Object handles are nonzero.
|
||||
*/
|
||||
uint32_t handle;
|
||||
uint32_t pad;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_pread {
|
||||
/** Handle for the object being read. */
|
||||
uint32_t handle;
|
||||
uint32_t pad;
|
||||
/** Offset into the object to read from */
|
||||
uint64_t offset;
|
||||
/** Length of data to read */
|
||||
uint64_t size;
|
||||
/** Pointer to write the data into. */
|
||||
uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
|
||||
};
|
||||
|
||||
struct drm_i915_gem_pwrite {
|
||||
/** Handle for the object being written to. */
|
||||
uint32_t handle;
|
||||
uint32_t pad;
|
||||
/** Offset into the object to write to */
|
||||
uint64_t offset;
|
||||
/** Length of data to write */
|
||||
uint64_t size;
|
||||
/** Pointer to read the data from. */
|
||||
uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
|
||||
};
|
||||
|
||||
struct drm_i915_gem_mmap {
|
||||
/** Handle for the object being mapped. */
|
||||
uint32_t handle;
|
||||
uint32_t pad;
|
||||
/** Offset in the object to map. */
|
||||
uint64_t offset;
|
||||
/**
|
||||
* Length of data to map.
|
||||
*
|
||||
* The value will be page-aligned.
|
||||
*/
|
||||
uint64_t size;
|
||||
/** Returned pointer the data was mapped at */
|
||||
uint64_t addr_ptr; /* void *, but pointers are not 32/64 compatible */
|
||||
};
|
||||
|
||||
struct drm_i915_gem_set_domain {
|
||||
/** Handle for the object */
|
||||
uint32_t handle;
|
||||
|
||||
/** New read domains */
|
||||
uint32_t read_domains;
|
||||
|
||||
/** New write domain */
|
||||
uint32_t write_domain;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_relocation_entry {
|
||||
/**
|
||||
* Handle of the buffer being pointed to by this relocation entry.
|
||||
|
|
@ -473,20 +550,26 @@ struct drm_i915_gem_relocation_entry {
|
|||
uint32_t write_domain;
|
||||
};
|
||||
|
||||
/**
|
||||
/** @{
|
||||
* Intel memory domains
|
||||
*
|
||||
* Most of these just align with the various caches in
|
||||
* the system and are used to flush and invalidate as
|
||||
* objects end up cached in different domains.
|
||||
*/
|
||||
|
||||
/* 0x00000001 is DRM_GEM_DOMAIN_CPU */
|
||||
#define DRM_GEM_DOMAIN_I915_RENDER 0x00000002 /* Render cache, used by 2D and 3D drawing */
|
||||
#define DRM_GEM_DOMAIN_I915_SAMPLER 0x00000004 /* Sampler cache, used by texture engine */
|
||||
#define DRM_GEM_DOMAIN_I915_COMMAND 0x00000008 /* Command queue, used to load batch buffers */
|
||||
#define DRM_GEM_DOMAIN_I915_INSTRUCTION 0x00000010 /* Instruction cache, used by shader programs */
|
||||
#define DRM_GEM_DOMAIN_I915_VERTEX 0x00000020 /* Vertex address cache */
|
||||
/** CPU cache */
|
||||
#define I915_GEM_DOMAIN_CPU 0x00000001
|
||||
/** Render cache, used by 2D and 3D drawing */
|
||||
#define I915_GEM_DOMAIN_RENDER 0x00000002
|
||||
/** Sampler cache, used by texture engine */
|
||||
#define I915_GEM_DOMAIN_SAMPLER 0x00000004
|
||||
/** Command queue, used to load batch buffers */
|
||||
#define I915_GEM_DOMAIN_COMMAND 0x00000008
|
||||
/** Instruction cache, used by shader programs */
|
||||
#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
|
||||
/** Vertex address cache */
|
||||
#define I915_GEM_DOMAIN_VERTEX 0x00000020
|
||||
/** @} */
|
||||
|
||||
struct drm_i915_gem_exec_object {
|
||||
/**
|
||||
|
|
@ -494,11 +577,15 @@ struct drm_i915_gem_exec_object {
|
|||
* operation.
|
||||
*/
|
||||
uint32_t handle;
|
||||
|
||||
/** List of relocations to be performed on this buffer */
|
||||
|
||||
/** Number of relocations to be performed on this buffer */
|
||||
uint32_t relocation_count;
|
||||
uint64_t relocs_ptr; /* struct drm_i915_gem_relocation_entry *relocs */
|
||||
|
||||
/**
|
||||
* Pointer to array of struct drm_i915_gem_relocation_entry containing
|
||||
* the relocations to be performed in this buffer.
|
||||
*/
|
||||
uint64_t relocs_ptr;
|
||||
|
||||
/** Required alignment in graphics aperture */
|
||||
uint64_t alignment;
|
||||
|
||||
|
|
@ -514,11 +601,13 @@ struct drm_i915_gem_execbuffer {
|
|||
* List of buffers to be validated with their relocations to be
|
||||
* performend on them.
|
||||
*
|
||||
* This is a pointer to an array of struct drm_i915_gem_validate_entry.
|
||||
*
|
||||
* These buffers must be listed in an order such that all relocations
|
||||
* a buffer is performing refer to buffers that have already appeared
|
||||
* in the validate list.
|
||||
*/
|
||||
uint64_t buffers_ptr; /* struct drm_i915_gem_validate_entry *buffers */
|
||||
uint64_t buffers_ptr;
|
||||
uint32_t buffer_count;
|
||||
|
||||
/** Offset in the batchbuffer to start execution from. */
|
||||
|
|
@ -535,7 +624,7 @@ struct drm_i915_gem_pin {
|
|||
/** Handle of the buffer to be pinned. */
|
||||
uint32_t handle;
|
||||
uint32_t pad;
|
||||
|
||||
|
||||
/** alignment required within the aperture */
|
||||
uint64_t alignment;
|
||||
|
||||
|
|
@ -552,7 +641,7 @@ struct drm_i915_gem_unpin {
|
|||
struct drm_i915_gem_busy {
|
||||
/** Handle of the buffer to check for busy */
|
||||
uint32_t handle;
|
||||
|
||||
|
||||
/** Return busy status (1 if busy, 0 if idle) */
|
||||
uint32_t busy;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@
|
|||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20080312"
|
||||
#define DRIVER_DATE "20080611"
|
||||
|
||||
#if defined(__linux__)
|
||||
#define I915_HAVE_FENCE
|
||||
|
|
@ -63,7 +63,7 @@
|
|||
*/
|
||||
#define DRIVER_MAJOR 1
|
||||
#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER)
|
||||
#define DRIVER_MINOR 13
|
||||
#define DRIVER_MINOR 14
|
||||
#else
|
||||
#define DRIVER_MINOR 6
|
||||
#endif
|
||||
|
|
@ -162,7 +162,7 @@ struct drm_i915_private {
|
|||
void *agp_iomap;
|
||||
unsigned int max_validate_buffers;
|
||||
struct mutex cmdbuf_mutex;
|
||||
size_t stolen_base;
|
||||
u32 stolen_base;
|
||||
struct drm_i915_validate_buffer *val_bufs;
|
||||
#endif
|
||||
|
||||
|
|
@ -231,8 +231,7 @@ struct drm_i915_private {
|
|||
* fire periodically while the ring is running. When it
|
||||
* fires, go retire requests.
|
||||
*/
|
||||
struct timer_list retire_timer;
|
||||
struct work_struct retire_task;
|
||||
struct delayed_work retire_work;
|
||||
|
||||
uint32_t next_gem_seqno;
|
||||
|
||||
|
|
@ -339,6 +338,13 @@ struct drm_i915_private {
|
|||
u8 saveCR[37];
|
||||
};
|
||||
|
||||
struct drm_i915_file_private {
|
||||
struct {
|
||||
uint32_t last_gem_seqno;
|
||||
uint32_t last_gem_throttle_seqno;
|
||||
} mm;
|
||||
};
|
||||
|
||||
enum intel_chip_family {
|
||||
CHIP_I8XX = 0x01,
|
||||
CHIP_I9XX = 0x02,
|
||||
|
|
@ -418,8 +424,11 @@ extern void i915_kernel_lost_context(struct drm_device * dev);
|
|||
extern int i915_driver_load(struct drm_device *, unsigned long flags);
|
||||
extern int i915_driver_unload(struct drm_device *dev);
|
||||
extern void i915_driver_lastclose(struct drm_device * dev);
|
||||
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
|
||||
extern void i915_driver_preclose(struct drm_device *dev,
|
||||
struct drm_file *file_priv);
|
||||
extern void i915_driver_postclose(struct drm_device *dev,
|
||||
struct drm_file *file_priv);
|
||||
extern int i915_driver_device_is_agp(struct drm_device * dev);
|
||||
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
|
|
@ -461,7 +470,6 @@ extern int i915_vblank_swap(struct drm_device *dev, void *data,
|
|||
struct drm_file *file_priv);
|
||||
extern void i915_user_irq_on(struct drm_device *dev);
|
||||
extern void i915_user_irq_off(struct drm_device *dev);
|
||||
extern void i915_user_interrupt_handler(struct work_struct *work);
|
||||
|
||||
/* i915_mem.c */
|
||||
extern int i915_mem_alloc(struct drm_device *dev, void *data,
|
||||
|
|
@ -503,6 +511,16 @@ int i915_execbuffer(struct drm_device *dev, void *data,
|
|||
/* i915_gem.c */
|
||||
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_execbuffer(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
|
||||
|
|
@ -521,20 +539,13 @@ int i915_gem_init_object(struct drm_gem_object *obj);
|
|||
void i915_gem_free_object(struct drm_gem_object *obj);
|
||||
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
|
||||
void i915_gem_object_unpin(struct drm_gem_object *obj);
|
||||
int i915_gem_set_domain(struct drm_gem_object *obj,
|
||||
struct drm_file *file_priv,
|
||||
uint32_t read_domains,
|
||||
uint32_t write_domain);
|
||||
int i915_gem_flush_pwrite(struct drm_gem_object *obj,
|
||||
uint64_t offset, uint64_t size);
|
||||
void i915_gem_lastclose(struct drm_device *dev);
|
||||
void i915_gem_retire_requests(struct drm_device *dev);
|
||||
void i915_gem_retire_timeout(unsigned long data);
|
||||
void i915_gem_retire_handler(struct work_struct *work);
|
||||
int i915_gem_init_ringbuffer(struct drm_device *dev);
|
||||
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
|
||||
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
|
||||
unsigned long end);
|
||||
void i915_gem_retire_work_handler(struct work_struct *work);
|
||||
#endif
|
||||
|
||||
extern unsigned int i915_fbpercrtc;
|
||||
|
|
|
|||
|
|
@ -294,10 +294,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
if (IS_I965G(dev) || IS_G33(dev))
|
||||
dev_priv->cursor_needs_physical = false;
|
||||
|
||||
if (IS_I9XX(dev)) {
|
||||
if (IS_I9XX(dev))
|
||||
pci_read_config_dword(dev->pdev, 0x5C, &dev_priv->stolen_base);
|
||||
DRM_DEBUG("stolen base %p\n", (void*)dev_priv->stolen_base);
|
||||
}
|
||||
|
||||
if (IS_I9XX(dev)) {
|
||||
dev_priv->mmiobase = drm_get_resource_start(dev, 0);
|
||||
|
|
@ -329,13 +327,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.request_list);
|
||||
dev_priv->mm.retire_timer.function = i915_gem_retire_timeout;
|
||||
dev_priv->mm.retire_timer.data = (unsigned long) dev;
|
||||
init_timer_deferrable (&dev_priv->mm.retire_timer);
|
||||
INIT_WORK(&dev_priv->mm.retire_task,
|
||||
i915_gem_retire_handler);
|
||||
INIT_WORK(&dev_priv->user_interrupt_task,
|
||||
i915_user_interrupt_handler);
|
||||
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
|
||||
i915_gem_retire_work_handler);
|
||||
dev_priv->mm.next_gem_seqno = 1;
|
||||
|
||||
#ifdef __linux__
|
||||
|
|
@ -457,6 +450,32 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
|
|||
master->driver_priv = NULL;
|
||||
}
|
||||
|
||||
int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_i915_file_private *i915_file_priv;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
i915_file_priv = (struct drm_i915_file_private *)
|
||||
drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
|
||||
|
||||
if (!i915_file_priv)
|
||||
return -ENOMEM;
|
||||
|
||||
file_priv->driver_priv = i915_file_priv;
|
||||
|
||||
i915_file_priv->mm.last_gem_seqno = 0;
|
||||
i915_file_priv->mm.last_gem_throttle_seqno = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
|
||||
|
||||
drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
|
||||
}
|
||||
|
||||
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
|
|
|||
|
|
@ -35,6 +35,13 @@
|
|||
|
||||
#define MAX_NOPID ((u32)~0)
|
||||
|
||||
/*
|
||||
* These are the interrupts used by the driver
|
||||
*/
|
||||
#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
|
||||
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
|
||||
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
|
||||
|
||||
/**
|
||||
* i915_get_pipe - return the the pipe associated with a given plane
|
||||
* @dev: DRM device
|
||||
|
|
@ -493,28 +500,13 @@ static int i915_run_hotplug_tasklet(struct drm_device *dev, uint32_t stat)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
i915_user_interrupt_handler(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct drm_device *dev;
|
||||
|
||||
dev_priv = container_of(work, struct drm_i915_private,
|
||||
user_interrupt_task);
|
||||
dev = dev_priv->dev;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_retire_requests(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
struct drm_i915_master_private *master_priv;
|
||||
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
|
||||
u32 iir;
|
||||
u32 pipea_stats = 0, pipeb_stats, tvdac;
|
||||
u32 pipea_stats = 0, pipeb_stats = 0, tvdac;
|
||||
int hotplug = 0;
|
||||
int vblank = 0;
|
||||
|
||||
|
|
@ -524,22 +516,11 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
|||
else
|
||||
iir = I915_READ16(IIR);
|
||||
|
||||
iir &= (dev_priv->irq_mask_reg | I915_USER_INTERRUPT);
|
||||
if (dev->pdev->msi_enabled)
|
||||
I915_WRITE(IER, 0);
|
||||
|
||||
#if 0
|
||||
DRM_DEBUG("flag=%08x\n", iir);
|
||||
#endif
|
||||
if (iir == 0) {
|
||||
#if 0
|
||||
DRM_DEBUG ("iir 0x%08x im 0x%08x ie 0x%08x pipea 0x%08x pipeb 0x%08x\n",
|
||||
iir,
|
||||
I915_READ(IMR),
|
||||
I915_READ(IER),
|
||||
I915_READ(PIPEASTAT),
|
||||
I915_READ(PIPEBSTAT));
|
||||
#endif
|
||||
if (!iir)
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear the PIPE(A|B)STAT regs before the IIR otherwise
|
||||
|
|
@ -598,10 +579,19 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
|||
DRM_WAKEUP(&dev_priv->irq_queue);
|
||||
#ifdef I915_HAVE_FENCE
|
||||
i915_fence_handler(dev);
|
||||
schedule_work(&dev_priv->user_interrupt_task);
|
||||
#endif
|
||||
}
|
||||
|
||||
if (pipea_stats & (I915_START_VBLANK_INTERRUPT_STATUS|
|
||||
I915_VBLANK_INTERRUPT_STATUS)) {
|
||||
vblank = 1;
|
||||
drm_handle_vblank(dev, i915_get_plane(dev, 0));
|
||||
}
|
||||
if (pipeb_stats & (I915_START_VBLANK_INTERRUPT_STATUS|
|
||||
I915_VBLANK_INTERRUPT_STATUS)) {
|
||||
vblank = 1;
|
||||
drm_handle_vblank(dev, i915_get_plane(dev, 1));
|
||||
}
|
||||
if (vblank) {
|
||||
if (dev_priv->swaps_pending > 0)
|
||||
drm_locked_tasklet(dev, i915_vblank_tasklet);
|
||||
|
|
@ -626,6 +616,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
|||
i915_run_hotplug_tasklet(dev, temp2);
|
||||
}
|
||||
|
||||
if (dev->pdev->msi_enabled)
|
||||
I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
|
@ -697,8 +690,17 @@ int i915_wait_irq(struct drm_device * dev, int irq_nr)
|
|||
DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
|
||||
READ_BREADCRUMB(dev_priv));
|
||||
|
||||
if (READ_BREADCRUMB(dev_priv) >= irq_nr)
|
||||
master_priv = dev->primary->master->driver_priv;
|
||||
|
||||
if (!master_priv) {
|
||||
DRM_ERROR("no master priv?\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
|
||||
master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
|
||||
return 0;
|
||||
}
|
||||
|
||||
i915_user_irq_on(dev);
|
||||
DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
|
||||
|
|
@ -710,10 +712,8 @@ int i915_wait_irq(struct drm_device * dev, int irq_nr)
|
|||
READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
|
||||
}
|
||||
|
||||
if (dev->primary->master) {
|
||||
master_priv = dev->primary->master->driver_priv;
|
||||
if (READ_BREADCRUMB(dev_priv) >= irq_nr)
|
||||
master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@
|
|||
#include <errno.h>
|
||||
#include <sys/stat.h>
|
||||
#include "drm.h"
|
||||
#include "i915_drm.h"
|
||||
|
||||
static void
|
||||
test_bad_close(int fd)
|
||||
|
|
@ -52,7 +53,7 @@ test_bad_close(int fd)
|
|||
static void
|
||||
test_create_close(int fd)
|
||||
{
|
||||
struct drm_gem_create create;
|
||||
struct drm_i915_gem_create create;
|
||||
struct drm_gem_close close;
|
||||
int ret;
|
||||
|
||||
|
|
@ -60,7 +61,7 @@ test_create_close(int fd)
|
|||
|
||||
memset(&create, 0, sizeof(create));
|
||||
create.size = 16 * 1024;
|
||||
ret = ioctl(fd, DRM_IOCTL_GEM_CREATE, &create);
|
||||
ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
|
||||
assert(ret == 0);
|
||||
|
||||
close.handle = create.handle;
|
||||
|
|
@ -70,14 +71,14 @@ test_create_close(int fd)
|
|||
static void
|
||||
test_create_fd_close(int fd)
|
||||
{
|
||||
struct drm_gem_create create;
|
||||
struct drm_i915_gem_create create;
|
||||
int ret;
|
||||
|
||||
printf("Testing closing with an object allocated.\n");
|
||||
|
||||
memset(&create, 0, sizeof(create));
|
||||
create.size = 16 * 1024;
|
||||
ret = ioctl(fd, DRM_IOCTL_GEM_CREATE, &create);
|
||||
ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
|
||||
assert(ret == 0);
|
||||
|
||||
close(fd);
|
||||
|
|
|
|||
|
|
@ -34,12 +34,13 @@
|
|||
#include <errno.h>
|
||||
#include <sys/stat.h>
|
||||
#include "drm.h"
|
||||
#include "i915_drm.h"
|
||||
|
||||
#define OBJECT_SIZE 16384
|
||||
|
||||
int do_read(int fd, int handle, void *buf, int offset, int size)
|
||||
{
|
||||
struct drm_gem_pread read;
|
||||
struct drm_i915_gem_pread read;
|
||||
|
||||
/* Ensure that we don't have any convenient data in buf in case
|
||||
* we fail.
|
||||
|
|
@ -52,12 +53,12 @@ int do_read(int fd, int handle, void *buf, int offset, int size)
|
|||
read.size = size;
|
||||
read.offset = offset;
|
||||
|
||||
return ioctl(fd, DRM_IOCTL_GEM_PREAD, &read);
|
||||
return ioctl(fd, DRM_IOCTL_I915_GEM_PREAD, &read);
|
||||
}
|
||||
|
||||
int do_write(int fd, int handle, void *buf, int offset, int size)
|
||||
{
|
||||
struct drm_gem_pwrite write;
|
||||
struct drm_i915_gem_pwrite write;
|
||||
|
||||
memset(&write, 0, sizeof(write));
|
||||
write.handle = handle;
|
||||
|
|
@ -65,14 +66,14 @@ int do_write(int fd, int handle, void *buf, int offset, int size)
|
|||
write.size = size;
|
||||
write.offset = offset;
|
||||
|
||||
return ioctl(fd, DRM_IOCTL_GEM_PWRITE, &write);
|
||||
return ioctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &write);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int fd;
|
||||
struct drm_gem_create create;
|
||||
struct drm_gem_mmap mmap;
|
||||
struct drm_i915_gem_create create;
|
||||
struct drm_i915_gem_mmap mmap;
|
||||
struct drm_gem_close unref;
|
||||
uint8_t expected[OBJECT_SIZE];
|
||||
uint8_t buf[OBJECT_SIZE];
|
||||
|
|
@ -87,12 +88,12 @@ int main(int argc, char **argv)
|
|||
mmap.offset = 0;
|
||||
mmap.size = 4096;
|
||||
printf("Testing mmaping of bad object.\n");
|
||||
ret = ioctl(fd, DRM_IOCTL_GEM_MMAP, &mmap);
|
||||
ret = ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap);
|
||||
assert(ret == -1 && errno == EINVAL);
|
||||
|
||||
memset(&create, 0, sizeof(create));
|
||||
create.size = OBJECT_SIZE;
|
||||
ret = ioctl(fd, DRM_IOCTL_GEM_CREATE, &create);
|
||||
ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
|
||||
assert(ret == 0);
|
||||
handle = create.handle;
|
||||
|
||||
|
|
@ -100,7 +101,7 @@ int main(int argc, char **argv)
|
|||
mmap.handle = handle;
|
||||
mmap.offset = 0;
|
||||
mmap.size = OBJECT_SIZE;
|
||||
ret = ioctl(fd, DRM_IOCTL_GEM_MMAP, &mmap);
|
||||
ret = ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap);
|
||||
assert(ret == 0);
|
||||
addr = (uint8_t *)(uintptr_t)mmap.addr_ptr;
|
||||
|
||||
|
|
|
|||
|
|
@ -34,12 +34,13 @@
|
|||
#include <errno.h>
|
||||
#include <sys/stat.h>
|
||||
#include "drm.h"
|
||||
#include "i915_drm.h"
|
||||
|
||||
#define OBJECT_SIZE 16384
|
||||
|
||||
int do_read(int fd, int handle, void *buf, int offset, int size)
|
||||
{
|
||||
struct drm_gem_pread read;
|
||||
struct drm_i915_gem_pread read;
|
||||
|
||||
/* Ensure that we don't have any convenient data in buf in case
|
||||
* we fail.
|
||||
|
|
@ -52,12 +53,12 @@ int do_read(int fd, int handle, void *buf, int offset, int size)
|
|||
read.size = size;
|
||||
read.offset = offset;
|
||||
|
||||
return ioctl(fd, DRM_IOCTL_GEM_PREAD, &read);
|
||||
return ioctl(fd, DRM_IOCTL_I915_GEM_PREAD, &read);
|
||||
}
|
||||
|
||||
int do_write(int fd, int handle, void *buf, int offset, int size)
|
||||
{
|
||||
struct drm_gem_pwrite write;
|
||||
struct drm_i915_gem_pwrite write;
|
||||
|
||||
memset(&write, 0, sizeof(write));
|
||||
write.handle = handle;
|
||||
|
|
@ -65,13 +66,13 @@ int do_write(int fd, int handle, void *buf, int offset, int size)
|
|||
write.size = size;
|
||||
write.offset = offset;
|
||||
|
||||
return ioctl(fd, DRM_IOCTL_GEM_PWRITE, &write);
|
||||
return ioctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &write);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int fd;
|
||||
struct drm_gem_create create;
|
||||
struct drm_i915_gem_create create;
|
||||
uint8_t expected[OBJECT_SIZE];
|
||||
uint8_t buf[OBJECT_SIZE];
|
||||
int ret;
|
||||
|
|
@ -81,7 +82,7 @@ int main(int argc, char **argv)
|
|||
|
||||
memset(&create, 0, sizeof(create));
|
||||
create.size = OBJECT_SIZE;
|
||||
ret = ioctl(fd, DRM_IOCTL_GEM_CREATE, &create);
|
||||
ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
|
||||
assert(ret == 0);
|
||||
handle = create.handle;
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue