Added rmmap vma fixup routine so that the kernel does the right thing when

removing mappings that might still exist in a forked process.
This commit is contained in:
Jeff Hartmann 2001-02-19 18:25:26 +00:00
parent 18e03c15b7
commit 08cb65f7f7
6 changed files with 96 additions and 10 deletions

View file

@ -725,6 +725,7 @@ extern void DRM(vm_close)(struct vm_area_struct *vma);
extern int DRM(mmap_dma)(struct file *filp,
struct vm_area_struct *vma);
extern int DRM(mmap)(struct file *filp, struct vm_area_struct *vma);
extern void DRM(rmmap_fixup_vmas)(drm_device_t *dev, drm_map_t *map);
/* Proc support (proc.c) */

View file

@ -82,6 +82,14 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
return -EFAULT;
}
/* Only allow shared memory to be removable since we only keep enough
* book keeping information about shared memory to allow for removal
* when processes fork.
*/
if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
return -EINVAL;
}
DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
map->offset, map->size, map->type );
if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
@ -157,6 +165,8 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
return 0;
}
void DRM(rmmap_fixup_vmas)(drm_device_t *dev, drm_map_t *map)
/* Remove a map private from list and deallocate resources */
int DRM(rmmap)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
@ -192,10 +202,12 @@ int DRM(rmmap)(struct inode *inode, struct file *filp,
return -EINVAL;
}
map = r_list->map;
list_del(list);
list_del(list);
up(&dev->struct_sem);
DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
/* Zap any pages that are still mapped and remove no_page vm_op. */
DRM(rmmap_fixup_vmas)(dev, map);
switch (map->type) {
case _DRM_REGISTERS:

View file

@ -151,9 +151,7 @@ void DRM(vm_open)(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
#if DRM_DEBUG_CODE
drm_vma_entry_t *vma_entry;
#endif
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
@ -178,9 +176,7 @@ void DRM(vm_close)(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
#if DRM_DEBUG_CODE
drm_vma_entry_t *pt, *prev;
#endif
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
@ -335,3 +331,37 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
DRM(vm_open)(vma);
return 0;
}
/* Support for rmmap so we can safely delete mappings without forcing
* them to be unmapped (which isn't possible if the process forked)
* before we call rmmap.
*/
void DRM(rmmap_fixup_vmas)(drm_device_t *dev, drm_map_t *map)
{
drm_vma_entry_t *pt, *prev;
down(&dev->struct_sem);
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
#if LINUX_VERSION_CODE >= 0x020300
if (pt->vma->vm_private_data == (void *)map)
#else
if (pt->vma->vm_pte == (unsigned long)map)
#endif
{
/* Zap the mappings */
flush_cache_range(vma->vm_mm,
vma->vm_start,
vma->vm_end - vma->vm_start);
zap_page_range(vma->vm_mm,
vma->vm_start,
vma->vm_end - vma->vm_start);
flush_tlb_range(vma->vm_mm,
vma->vm_start,
vma->vm_end - vma->vm_start);
/* Change the vm_ops so no page isn't defined */
vma->vm_ops = &drm_vm_ops;
}
}
up(&dev->struct_sem);
}

View file

@ -725,6 +725,7 @@ extern void DRM(vm_close)(struct vm_area_struct *vma);
extern int DRM(mmap_dma)(struct file *filp,
struct vm_area_struct *vma);
extern int DRM(mmap)(struct file *filp, struct vm_area_struct *vma);
extern void DRM(rmmap_fixup_vmas)(drm_device_t *dev, drm_map_t *map);
/* Proc support (proc.c) */

View file

@ -82,6 +82,14 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
return -EFAULT;
}
/* Only allow shared memory to be removable since we only keep enough
* book keeping information about shared memory to allow for removal
* when processes fork.
*/
if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
return -EINVAL;
}
DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
map->offset, map->size, map->type );
if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
@ -157,6 +165,8 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
return 0;
}
void DRM(rmmap_fixup_vmas)(drm_device_t *dev, drm_map_t *map)
/* Remove a map private from list and deallocate resources */
int DRM(rmmap)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
@ -192,10 +202,12 @@ int DRM(rmmap)(struct inode *inode, struct file *filp,
return -EINVAL;
}
map = r_list->map;
list_del(list);
list_del(list);
up(&dev->struct_sem);
DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
/* Zap any pages that are still mapped and remove no_page vm_op. */
DRM(rmmap_fixup_vmas)(dev, map);
switch (map->type) {
case _DRM_REGISTERS:

View file

@ -151,9 +151,7 @@ void DRM(vm_open)(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
#if DRM_DEBUG_CODE
drm_vma_entry_t *vma_entry;
#endif
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
@ -178,9 +176,7 @@ void DRM(vm_close)(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
#if DRM_DEBUG_CODE
drm_vma_entry_t *pt, *prev;
#endif
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
@ -335,3 +331,37 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
DRM(vm_open)(vma);
return 0;
}
/* Support for rmmap so we can safely delete mappings without forcing
* them to be unmapped (which isn't possible if the process forked)
* before we call rmmap.
*/
void DRM(rmmap_fixup_vmas)(drm_device_t *dev, drm_map_t *map)
{
drm_vma_entry_t *pt, *prev;
down(&dev->struct_sem);
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
#if LINUX_VERSION_CODE >= 0x020300
if (pt->vma->vm_private_data == (void *)map)
#else
if (pt->vma->vm_pte == (unsigned long)map)
#endif
{
/* Zap the mappings */
flush_cache_range(vma->vm_mm,
vma->vm_start,
vma->vm_end - vma->vm_start);
zap_page_range(vma->vm_mm,
vma->vm_start,
vma->vm_end - vma->vm_start);
flush_tlb_range(vma->vm_mm,
vma->vm_start,
vma->vm_end - vma->vm_start);
/* Change the vm_ops so no page isn't defined */
vma->vm_ops = &drm_vm_ops;
}
}
up(&dev->struct_sem);
}