i915: Remove miscellanous uncalled gen4 code from formerly shared files.

Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
This commit is contained in:
Eric Anholt 2013-06-20 12:09:27 -07:00 committed by Kenneth Graunke
parent 6bdc5ecbba
commit 7f210bf535
9 changed files with 0 additions and 311 deletions

View file

@ -89,34 +89,12 @@ intel_batchbuffer_reset(struct intel_context *intel)
intel->batch.needs_sol_reset = false;
}
void
intel_batchbuffer_save_state(struct intel_context *intel)
{
intel->batch.saved.used = intel->batch.used;
intel->batch.saved.reloc_count =
drm_intel_gem_bo_get_reloc_count(intel->batch.bo);
}
void
intel_batchbuffer_reset_to_saved(struct intel_context *intel)
{
drm_intel_gem_bo_clear_relocs(intel->batch.bo, intel->batch.saved.reloc_count);
intel->batch.used = intel->batch.saved.used;
/* Cached batch state is dead, since we just cleared some unknown part of the
* batchbuffer. Assume that the caller resets any other state necessary.
*/
clear_cache(intel);
}
void
intel_batchbuffer_free(struct intel_context *intel)
{
free(intel->batch.cpu_map);
drm_intel_bo_unreference(intel->batch.last_bo);
drm_intel_bo_unreference(intel->batch.bo);
drm_intel_bo_unreference(intel->batch.workaround_bo);
clear_cache(intel);
}
@ -321,170 +299,6 @@ intel_batchbuffer_data(struct intel_context *intel,
intel->batch.used += bytes >> 2;
}
void
intel_batchbuffer_cached_advance(struct intel_context *intel)
{
struct cached_batch_item **prev = &intel->batch.cached_items, *item;
uint32_t sz = (intel->batch.used - intel->batch.emit) * sizeof(uint32_t);
uint32_t *start = intel->batch.map + intel->batch.emit;
uint16_t op = *start >> 16;
while (*prev) {
uint32_t *old;
item = *prev;
old = intel->batch.map + item->header;
if (op == *old >> 16) {
if (item->size == sz && memcmp(old, start, sz) == 0) {
if (prev != &intel->batch.cached_items) {
*prev = item->next;
item->next = intel->batch.cached_items;
intel->batch.cached_items = item;
}
intel->batch.used = intel->batch.emit;
return;
}
goto emit;
}
prev = &item->next;
}
item = malloc(sizeof(struct cached_batch_item));
if (item == NULL)
return;
item->next = intel->batch.cached_items;
intel->batch.cached_items = item;
emit:
item->size = sz;
item->header = intel->batch.emit;
}
/**
* Restriction [DevSNB, DevIVB]:
*
* Prior to changing Depth/Stencil Buffer state (i.e. any combination of
* 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS, 3DSTATE_STENCIL_BUFFER,
* 3DSTATE_HIER_DEPTH_BUFFER) SW must first issue a pipelined depth stall
* (PIPE_CONTROL with Depth Stall bit set), followed by a pipelined depth
* cache flush (PIPE_CONTROL with Depth Flush Bit set), followed by
* another pipelined depth stall (PIPE_CONTROL with Depth Stall bit set),
* unless SW can otherwise guarantee that the pipeline from WM onwards is
* already flushed (e.g., via a preceding MI_FLUSH).
*/
void
intel_emit_depth_stall_flushes(struct intel_context *intel)
{
assert(intel->gen >= 6 && intel->gen <= 7);
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
OUT_BATCH(PIPE_CONTROL_DEPTH_STALL);
OUT_BATCH(0); /* address */
OUT_BATCH(0); /* write data */
ADVANCE_BATCH()
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
OUT_BATCH(PIPE_CONTROL_DEPTH_CACHE_FLUSH);
OUT_BATCH(0); /* address */
OUT_BATCH(0); /* write data */
ADVANCE_BATCH();
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
OUT_BATCH(PIPE_CONTROL_DEPTH_STALL);
OUT_BATCH(0); /* address */
OUT_BATCH(0); /* write data */
ADVANCE_BATCH();
}
/**
* From the BSpec, volume 2a.03: VS Stage Input / State:
* "[DevIVB] A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
* stall needs to be sent just prior to any 3DSTATE_VS, 3DSTATE_URB_VS,
* 3DSTATE_CONSTANT_VS, 3DSTATE_BINDING_TABLE_POINTER_VS,
* 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one PIPE_CONTROL needs
* to be sent before any combination of VS associated 3DSTATE."
*/
void
gen7_emit_vs_workaround_flush(struct intel_context *intel)
{
assert(intel->gen == 7);
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
OUT_BATCH(PIPE_CONTROL_DEPTH_STALL | PIPE_CONTROL_WRITE_IMMEDIATE);
OUT_RELOC(intel->batch.workaround_bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
OUT_BATCH(0); /* write data */
ADVANCE_BATCH();
}
/**
* Emits a PIPE_CONTROL with a non-zero post-sync operation, for
* implementing two workarounds on gen6. From section 1.4.7.1
* "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
*
* [DevSNB-C+{W/A}] Before any depth stall flush (including those
* produced by non-pipelined state commands), software needs to first
* send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
* 0.
*
* [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
* =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
*
* And the workaround for these two requires this workaround first:
*
* [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
* BEFORE the pipe-control with a post-sync op and no write-cache
* flushes.
*
* And this last workaround is tricky because of the requirements on
* that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
* volume 2 part 1:
*
* "1 of the following must also be set:
* - Render Target Cache Flush Enable ([12] of DW1)
* - Depth Cache Flush Enable ([0] of DW1)
* - Stall at Pixel Scoreboard ([1] of DW1)
* - Depth Stall ([13] of DW1)
* - Post-Sync Operation ([13] of DW1)
* - Notify Enable ([8] of DW1)"
*
* The cache flushes require the workaround flush that triggered this
* one, so we can't use it. Depth stall would trigger the same.
* Post-sync nonzero is what triggered this second workaround, so we
* can't use that one either. Notify enable is IRQs, which aren't
* really our business. That leaves only stall at scoreboard.
*/
void
intel_emit_post_sync_nonzero_flush(struct intel_context *intel)
{
if (!intel->batch.need_workaround_flush)
return;
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
OUT_BATCH(PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD);
OUT_BATCH(0); /* address */
OUT_BATCH(0); /* write data */
ADVANCE_BATCH();
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
OUT_BATCH(PIPE_CONTROL_WRITE_IMMEDIATE);
OUT_RELOC(intel->batch.workaround_bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
OUT_BATCH(0); /* write data */
ADVANCE_BATCH();
intel->batch.need_workaround_flush = false;
}
/* Emit a pipelined flush to either flush render and texture cache for
* reading from a FBO-drawn texture, or flush so that frontbuffer
* render appears on the screen in DRI1.

View file

@ -26,8 +26,6 @@ struct intel_batchbuffer;
void intel_batchbuffer_init(struct intel_context *intel);
void intel_batchbuffer_free(struct intel_context *intel);
void intel_batchbuffer_save_state(struct intel_context *intel);
void intel_batchbuffer_reset_to_saved(struct intel_context *intel);
int _intel_batchbuffer_flush(struct intel_context *intel,
const char *file, int line);
@ -55,9 +53,6 @@ bool intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
uint32_t write_domain,
uint32_t offset);
void intel_batchbuffer_emit_mi_flush(struct intel_context *intel);
void intel_emit_post_sync_nonzero_flush(struct intel_context *intel);
void intel_emit_depth_stall_flushes(struct intel_context *intel);
void gen7_emit_vs_workaround_flush(struct intel_context *intel);
static INLINE uint32_t float_as_int(float f)
{
@ -144,8 +139,6 @@ intel_batchbuffer_advance(struct intel_context *intel)
#endif
}
void intel_batchbuffer_cached_advance(struct intel_context *intel);
/* Here are the crusty old macros, to be removed:
*/
#define BATCH_LOCALS

View file

@ -550,62 +550,6 @@ void intel_upload_data(struct intel_context *intel,
intel->upload.offset = base + size;
}
void *intel_upload_map(struct intel_context *intel, GLuint size, GLuint align)
{
GLuint base, delta;
char *ptr;
base = (intel->upload.offset + align - 1) / align * align;
if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
wrap_buffers(intel, size);
base = 0;
}
delta = base - intel->upload.offset;
if (intel->upload.buffer_len &&
intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
{
drm_intel_bo_subdata(intel->upload.bo,
intel->upload.buffer_offset,
intel->upload.buffer_len,
intel->upload.buffer);
intel->upload.buffer_len = 0;
}
if (size <= sizeof(intel->upload.buffer)) {
if (intel->upload.buffer_len == 0)
intel->upload.buffer_offset = base;
else
intel->upload.buffer_len += delta;
ptr = intel->upload.buffer + intel->upload.buffer_len;
intel->upload.buffer_len += size;
} else
ptr = malloc(size);
return ptr;
}
void intel_upload_unmap(struct intel_context *intel,
const void *ptr, GLuint size, GLuint align,
drm_intel_bo **return_bo,
GLuint *return_offset)
{
GLuint base;
base = (intel->upload.offset + align - 1) / align * align;
if (size > sizeof(intel->upload.buffer)) {
drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
free((void*)ptr);
}
drm_intel_bo_reference(intel->upload.bo);
*return_bo = intel->upload.bo;
*return_offset = base;
intel->upload.offset = base + size;
}
drm_intel_bo *
intel_bufferobj_source(struct intel_context *intel,
struct intel_buffer_object *intel_obj,

View file

@ -70,13 +70,6 @@ void intel_upload_data(struct intel_context *intel,
drm_intel_bo **return_bo,
GLuint *return_offset);
void *intel_upload_map(struct intel_context *intel,
GLuint size, GLuint align);
void intel_upload_unmap(struct intel_context *intel,
const void *ptr, GLuint size, GLuint align,
drm_intel_bo **return_bo,
GLuint *return_offset);
void intel_upload_finish(struct intel_context *intel);
/* Hook the bufferobject implementation into mesa:

View file

@ -34,20 +34,6 @@
#include "main/framebuffer.h"
#include "main/renderbuffer.h"
/**
* Return pointer to current color reading region, or NULL.
*/
struct intel_region *
intel_readbuf_region(struct intel_context *intel)
{
struct intel_renderbuffer *irb
= intel_renderbuffer(intel->ctx.ReadBuffer->_ColorReadBuffer);
if (irb && irb->mt)
return irb->mt->region;
else
return NULL;
}
/**
* Check if we're about to draw into the front color buffer.
* If so, set the intel->front_buffer_dirty field to true.

View file

@ -36,8 +36,6 @@
struct intel_context;
struct intel_framebuffer;
extern struct intel_region *intel_readbuf_region(struct intel_context *intel);
extern void intel_check_front_buffer_rendering(struct intel_context *intel);
static inline void

View file

@ -121,9 +121,6 @@ struct intel_batchbuffer {
drm_intel_bo *bo;
/** Last BO submitted to the hardware. Used for glFinish(). */
drm_intel_bo *last_bo;
/** BO for post-sync nonzero writes for gen6 workaround. */
drm_intel_bo *workaround_bo;
bool need_workaround_flush;
struct cached_batch_item *cached_items;
@ -136,11 +133,6 @@ struct intel_batchbuffer {
uint32_t state_batch_offset;
bool is_blit;
bool needs_sol_reset;
struct {
uint16_t used;
int reloc_count;
} saved;
};
/**

View file

@ -735,33 +735,6 @@ intel_blit_framebuffer(struct gl_context *ctx,
mask, filter);
}
void
intel_renderbuffer_move_to_temp(struct intel_context *intel,
struct intel_renderbuffer *irb,
bool invalidate)
{
struct gl_renderbuffer *rb =&irb->Base.Base;
struct intel_texture_image *intel_image = intel_texture_image(rb->TexImage);
struct intel_mipmap_tree *new_mt;
int width, height, depth;
intel_miptree_get_dimensions_for_image(rb->TexImage, &width, &height, &depth);
new_mt = intel_miptree_create(intel, rb->TexImage->TexObject->Target,
intel_image->base.Base.TexFormat,
intel_image->base.Base.Level,
intel_image->base.Base.Level,
width, height, depth,
true,
INTEL_MIPTREE_TILING_ANY);
intel_miptree_copy_teximage(intel, intel_image, new_mt, invalidate);
intel_miptree_reference(&irb->mt, intel_image->mt);
intel_renderbuffer_set_draw_offset(irb);
intel_miptree_release(&new_mt);
}
/**
* Do one-time context initializations related to GL_EXT_framebuffer_object.
* Hook in device driver functions.

View file

@ -161,10 +161,6 @@ intel_renderbuffer_get_tile_offsets(struct intel_renderbuffer *irb,
struct intel_region*
intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex);
void intel_renderbuffer_move_to_temp(struct intel_context *intel,
struct intel_renderbuffer *irb,
bool invalidate);
#ifdef __cplusplus
}
#endif