2011-01-18 17:16:49 -08:00
|
|
|
/*
|
|
|
|
|
* Copyright © 2010 Intel Corporation
|
|
|
|
|
*
|
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
|
*
|
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
|
* Software.
|
|
|
|
|
*
|
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
|
*
|
|
|
|
|
* Authors:
|
|
|
|
|
* Eric Anholt <eric@anholt.net>
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
|
2020-10-22 13:25:33 +03:00
|
|
|
#include "brw_eu.h"
|
2011-01-18 17:16:49 -08:00
|
|
|
#include "brw_fs.h"
|
2015-06-09 10:26:53 -07:00
|
|
|
#include "brw_fs_live_variables.h"
|
2014-08-31 21:19:47 -07:00
|
|
|
#include "brw_cfg.h"
|
|
|
|
|
#include "brw_shader.h"
|
2023-10-20 10:32:54 -07:00
|
|
|
#include <new>
|
2011-01-18 17:16:49 -08:00
|
|
|
|
2012-11-30 16:13:34 -08:00
|
|
|
using namespace brw;
|
|
|
|
|
|
2011-01-18 17:16:49 -08:00
|
|
|
/** @file brw_fs_schedule_instructions.cpp
|
|
|
|
|
*
|
|
|
|
|
* List scheduling of FS instructions.
|
|
|
|
|
*
|
|
|
|
|
* The basic model of the list scheduler is to take a basic block,
|
|
|
|
|
* compute a DAG of the dependencies (RAW ordering with latency, WAW
|
2013-03-28 10:46:17 -07:00
|
|
|
* ordering with latency, WAR ordering), and make a list of the DAG heads.
|
2011-01-18 17:16:49 -08:00
|
|
|
* Heuristically pick a DAG head, then put all the children that are
|
|
|
|
|
* now DAG heads into the list of things to schedule.
|
|
|
|
|
*
|
|
|
|
|
* The heuristic is the important part. We're trying to be cheap,
|
|
|
|
|
* since actually computing the optimal scheduling is NP complete.
|
|
|
|
|
* What we do is track a "current clock". When we schedule a node, we
|
|
|
|
|
* update the earliest-unblocked clock time of its children, and
|
|
|
|
|
* increment the clock. Then, when trying to schedule, we just pick
|
|
|
|
|
* the earliest-unblocked instruction to schedule.
|
|
|
|
|
*
|
|
|
|
|
* Note that often there will be many things which could execute
|
|
|
|
|
* immediately, and there are a range of heuristic options to choose
|
|
|
|
|
* from in picking among those.
|
|
|
|
|
*/
|
|
|
|
|
|
2012-12-04 13:52:19 -08:00
|
|
|
static bool debug = false;
|
|
|
|
|
|
2013-11-05 23:30:33 -08:00
|
|
|
class instruction_scheduler;
|
2023-10-15 23:38:56 -07:00
|
|
|
struct schedule_node_child;
|
2013-11-05 23:30:33 -08:00
|
|
|
|
2011-01-18 17:16:49 -08:00
|
|
|
class schedule_node : public exec_node
|
|
|
|
|
{
|
|
|
|
|
public:
|
2024-02-15 02:27:44 -08:00
|
|
|
void set_latency(const struct brw_isa_info *isa);
|
2012-12-05 15:24:07 -08:00
|
|
|
|
2013-04-29 14:05:33 -07:00
|
|
|
backend_instruction *inst;
|
2023-10-15 23:38:56 -07:00
|
|
|
schedule_node_child *children;
|
|
|
|
|
int children_count;
|
|
|
|
|
int children_cap;
|
2023-10-20 10:11:11 -07:00
|
|
|
int initial_parent_count;
|
|
|
|
|
int initial_unblocked_time;
|
2011-01-18 17:16:49 -08:00
|
|
|
int latency;
|
2013-10-28 00:11:45 -07:00
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* This is the sum of the instruction's latency plus the maximum delay of
|
|
|
|
|
* its children, or just the issue_time if it's a leaf node.
|
|
|
|
|
*/
|
|
|
|
|
int delay;
|
2016-08-16 00:56:04 -07:00
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Preferred exit node among the (direct or indirect) successors of this
|
|
|
|
|
* node. Among the scheduler nodes blocked by this node, this will be the
|
|
|
|
|
* one that may cause earliest program termination, or NULL if none of the
|
|
|
|
|
* successors is an exit node.
|
|
|
|
|
*/
|
|
|
|
|
schedule_node *exit;
|
2023-10-16 23:25:00 -07:00
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* How many cycles this instruction takes to issue.
|
|
|
|
|
*
|
|
|
|
|
* Instructions in gen hardware are handled one simd4 vector at a time,
|
|
|
|
|
* with 1 cycle per vector dispatched. Thus SIMD8 pixel shaders take 2
|
|
|
|
|
* cycles to dispatch and SIMD16 (compressed) instructions take 4.
|
|
|
|
|
*/
|
|
|
|
|
int issue_time;
|
2023-10-20 10:11:11 -07:00
|
|
|
|
|
|
|
|
/* Temporary data used during the scheduling process. */
|
|
|
|
|
struct {
|
|
|
|
|
int parent_count;
|
|
|
|
|
int unblocked_time;
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Which iteration of pushing groups of children onto the candidates list
|
|
|
|
|
* this node was a part of.
|
|
|
|
|
*/
|
|
|
|
|
unsigned cand_generation;
|
|
|
|
|
} tmp;
|
2011-01-18 17:16:49 -08:00
|
|
|
};
|
|
|
|
|
|
2023-10-15 23:38:56 -07:00
|
|
|
struct schedule_node_child {
|
|
|
|
|
schedule_node *n;
|
|
|
|
|
int effective_latency;
|
|
|
|
|
};
|
|
|
|
|
|
2023-10-20 10:11:11 -07:00
|
|
|
static inline void
|
|
|
|
|
reset_node_tmp(schedule_node *n)
|
|
|
|
|
{
|
|
|
|
|
n->tmp.parent_count = n->initial_parent_count;
|
|
|
|
|
n->tmp.unblocked_time = n->initial_unblocked_time;
|
|
|
|
|
n->tmp.cand_generation = 0;
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-16 00:56:04 -07:00
|
|
|
/**
|
|
|
|
|
* Lower bound of the scheduling time after which one of the instructions
|
|
|
|
|
* blocked by this node may lead to program termination.
|
|
|
|
|
*
|
|
|
|
|
* exit_unblocked_time() determines a strict partial ordering relation '«' on
|
|
|
|
|
* the set of scheduler nodes as follows:
|
|
|
|
|
*
|
|
|
|
|
* n « m <-> exit_unblocked_time(n) < exit_unblocked_time(m)
|
|
|
|
|
*
|
|
|
|
|
* which can be used to heuristically order nodes according to how early they
|
|
|
|
|
* can unblock an exit node and lead to program termination.
|
|
|
|
|
*/
|
|
|
|
|
static inline int
|
2023-10-20 10:11:11 -07:00
|
|
|
exit_tmp_unblocked_time(const schedule_node *n)
|
|
|
|
|
{
|
|
|
|
|
return n->exit ? n->exit->tmp.unblocked_time : INT_MAX;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
|
exit_initial_unblocked_time(const schedule_node *n)
|
2016-08-16 00:56:04 -07:00
|
|
|
{
|
2023-10-20 10:11:11 -07:00
|
|
|
return n->exit ? n->exit->initial_unblocked_time : INT_MAX;
|
2016-08-16 00:56:04 -07:00
|
|
|
}
|
|
|
|
|
|
2012-12-05 15:24:07 -08:00
|
|
|
void
|
2024-02-15 02:27:44 -08:00
|
|
|
schedule_node::set_latency(const struct brw_isa_info *isa)
|
2012-12-05 15:24:07 -08:00
|
|
|
{
|
2012-12-05 16:19:43 -08:00
|
|
|
switch (inst->opcode) {
|
|
|
|
|
case BRW_OPCODE_MAD:
|
2013-03-28 11:15:20 -07:00
|
|
|
/* 2 cycles
|
|
|
|
|
* (since the last two src operands are in different register banks):
|
2014-03-08 11:07:10 -08:00
|
|
|
* mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
|
2013-03-28 11:15:20 -07:00
|
|
|
*
|
|
|
|
|
* 3 cycles on IVB, 4 on HSW
|
|
|
|
|
* (since the last two src operands are in the same register bank):
|
2014-03-08 11:07:10 -08:00
|
|
|
* mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
|
2012-12-05 16:19:43 -08:00
|
|
|
*
|
2013-03-28 11:15:20 -07:00
|
|
|
* 18 cycles on IVB, 16 on HSW
|
|
|
|
|
* (since the last two src operands are in different register banks):
|
2014-03-08 11:07:10 -08:00
|
|
|
* mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
|
2013-03-28 11:15:20 -07:00
|
|
|
* mov(8) null g4<4,5,1>F { align16 WE_normal 1Q };
|
|
|
|
|
*
|
|
|
|
|
* 20 cycles on IVB, 18 on HSW
|
|
|
|
|
* (since the last two src operands are in the same register bank):
|
2014-03-08 11:07:10 -08:00
|
|
|
* mad(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
|
2012-12-05 16:19:43 -08:00
|
|
|
* mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
|
|
|
|
|
*/
|
2013-03-28 11:15:20 -07:00
|
|
|
|
|
|
|
|
/* Our register allocator doesn't know about register banks, so use the
|
|
|
|
|
* higher latency.
|
|
|
|
|
*/
|
2024-02-15 02:27:44 -08:00
|
|
|
latency = 18;
|
2012-12-05 16:19:43 -08:00
|
|
|
break;
|
|
|
|
|
|
2013-03-28 10:57:34 -07:00
|
|
|
case BRW_OPCODE_LRP:
|
|
|
|
|
/* 2 cycles
|
|
|
|
|
* (since the last two src operands are in different register banks):
|
2014-03-08 11:07:10 -08:00
|
|
|
* lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
|
2013-03-28 10:57:34 -07:00
|
|
|
*
|
|
|
|
|
* 3 cycles on IVB, 4 on HSW
|
|
|
|
|
* (since the last two src operands are in the same register bank):
|
2014-03-08 11:07:10 -08:00
|
|
|
* lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
|
2013-03-28 10:57:34 -07:00
|
|
|
*
|
|
|
|
|
* 16 cycles on IVB, 14 on HSW
|
|
|
|
|
* (since the last two src operands are in different register banks):
|
2014-03-08 11:07:10 -08:00
|
|
|
* lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
|
2013-03-28 10:57:34 -07:00
|
|
|
* mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
|
|
|
|
|
*
|
|
|
|
|
* 16 cycles
|
|
|
|
|
* (since the last two src operands are in the same register bank):
|
2014-03-08 11:07:10 -08:00
|
|
|
* lrp(8) g4<1>F g2.2<4,4,1>F.x g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
|
2013-03-28 10:57:34 -07:00
|
|
|
* mov(8) null g4<4,4,1>F { align16 WE_normal 1Q };
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/* Our register allocator doesn't know about register banks, so use the
|
|
|
|
|
* higher latency.
|
|
|
|
|
*/
|
|
|
|
|
latency = 14;
|
|
|
|
|
break;
|
|
|
|
|
|
2012-12-05 16:19:43 -08:00
|
|
|
case SHADER_OPCODE_RCP:
|
|
|
|
|
case SHADER_OPCODE_RSQ:
|
|
|
|
|
case SHADER_OPCODE_SQRT:
|
|
|
|
|
case SHADER_OPCODE_LOG2:
|
|
|
|
|
case SHADER_OPCODE_EXP2:
|
|
|
|
|
case SHADER_OPCODE_SIN:
|
|
|
|
|
case SHADER_OPCODE_COS:
|
|
|
|
|
/* 2 cycles:
|
|
|
|
|
* math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
|
|
|
|
|
*
|
|
|
|
|
* 18 cycles:
|
|
|
|
|
* math inv(8) g4<1>F g2<0,1,0>F null { align1 WE_normal 1Q };
|
|
|
|
|
* mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
|
|
|
|
|
*
|
|
|
|
|
* Same for exp2, log2, rsq, sqrt, sin, cos.
|
|
|
|
|
*/
|
2024-02-15 02:27:44 -08:00
|
|
|
latency = 16;
|
2012-12-05 16:19:43 -08:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case SHADER_OPCODE_POW:
|
|
|
|
|
/* 2 cycles:
|
|
|
|
|
* math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
|
|
|
|
|
*
|
|
|
|
|
* 26 cycles:
|
|
|
|
|
* math pow(8) g4<1>F g2<0,1,0>F g2.1<0,1,0>F { align1 WE_normal 1Q };
|
|
|
|
|
* mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
|
|
|
|
|
*/
|
2024-02-15 02:27:44 -08:00
|
|
|
latency = 24;
|
2012-12-05 16:19:43 -08:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case SHADER_OPCODE_TEX:
|
|
|
|
|
case SHADER_OPCODE_TXD:
|
|
|
|
|
case SHADER_OPCODE_TXF:
|
2016-05-04 15:46:45 -07:00
|
|
|
case SHADER_OPCODE_TXF_LZ:
|
2012-12-05 16:19:43 -08:00
|
|
|
case SHADER_OPCODE_TXL:
|
2016-05-04 15:46:45 -07:00
|
|
|
case SHADER_OPCODE_TXL_LZ:
|
2012-12-05 16:19:43 -08:00
|
|
|
/* 18 cycles:
|
|
|
|
|
* mov(8) g115<1>F 0F { align1 WE_normal 1Q };
|
|
|
|
|
* mov(8) g114<1>F 0F { align1 WE_normal 1Q };
|
|
|
|
|
* send(8) g4<1>UW g114<8,8,1>F
|
|
|
|
|
* sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
|
|
|
|
|
*
|
|
|
|
|
* 697 +/-49 cycles (min 610, n=26):
|
|
|
|
|
* mov(8) g115<1>F 0F { align1 WE_normal 1Q };
|
|
|
|
|
* mov(8) g114<1>F 0F { align1 WE_normal 1Q };
|
|
|
|
|
* send(8) g4<1>UW g114<8,8,1>F
|
|
|
|
|
* sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
|
|
|
|
|
* mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
|
|
|
|
|
*
|
|
|
|
|
* So the latency on our first texture load of the batchbuffer takes
|
|
|
|
|
* ~700 cycles, since the caches are cold at that point.
|
|
|
|
|
*
|
|
|
|
|
* 840 +/- 92 cycles (min 720, n=25):
|
|
|
|
|
* mov(8) g115<1>F 0F { align1 WE_normal 1Q };
|
|
|
|
|
* mov(8) g114<1>F 0F { align1 WE_normal 1Q };
|
|
|
|
|
* send(8) g4<1>UW g114<8,8,1>F
|
|
|
|
|
* sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
|
|
|
|
|
* mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
|
|
|
|
|
* send(8) g4<1>UW g114<8,8,1>F
|
|
|
|
|
* sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
|
|
|
|
|
* mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
|
|
|
|
|
*
|
|
|
|
|
* On the second load, it takes just an extra ~140 cycles, and after
|
|
|
|
|
* accounting for the 14 cycles of the MOV's latency, that makes ~130.
|
|
|
|
|
*
|
|
|
|
|
* 683 +/- 49 cycles (min = 602, n=47):
|
|
|
|
|
* mov(8) g115<1>F 0F { align1 WE_normal 1Q };
|
|
|
|
|
* mov(8) g114<1>F 0F { align1 WE_normal 1Q };
|
|
|
|
|
* send(8) g4<1>UW g114<8,8,1>F
|
|
|
|
|
* sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
|
|
|
|
|
* send(8) g50<1>UW g114<8,8,1>F
|
|
|
|
|
* sampler (10, 0, 0, 1) mlen 2 rlen 4 { align1 WE_normal 1Q };
|
|
|
|
|
* mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
|
|
|
|
|
*
|
|
|
|
|
* The unit appears to be pipelined, since this matches up with the
|
|
|
|
|
* cache-cold case, despite there being two loads here. If you replace
|
|
|
|
|
* the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
|
|
|
|
|
*
|
|
|
|
|
* So, take some number between the cache-hot 140 cycles and the
|
|
|
|
|
* cache-cold 700 cycles. No particular tuning was done on this.
|
|
|
|
|
*
|
|
|
|
|
* I haven't done significant testing of the non-TEX opcodes. TXL at
|
|
|
|
|
* least looked about the same as TEX.
|
|
|
|
|
*/
|
|
|
|
|
latency = 200;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case SHADER_OPCODE_TXS:
|
|
|
|
|
/* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
|
|
|
|
|
* cycles (n=15):
|
|
|
|
|
* mov(8) g114<1>UD 0D { align1 WE_normal 1Q };
|
|
|
|
|
* send(8) g6<1>UW g114<8,8,1>F
|
|
|
|
|
* sampler (10, 0, 10, 1) mlen 1 rlen 4 { align1 WE_normal 1Q };
|
|
|
|
|
* mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1Q };
|
|
|
|
|
*
|
|
|
|
|
*
|
|
|
|
|
* Two loads was 535 +/- 30 cycles (n=19):
|
|
|
|
|
* mov(16) g114<1>UD 0D { align1 WE_normal 1H };
|
|
|
|
|
* send(16) g6<1>UW g114<8,8,1>F
|
|
|
|
|
* sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
|
|
|
|
|
* mov(16) g114<1>UD 0D { align1 WE_normal 1H };
|
|
|
|
|
* mov(16) g6<1>F g6<8,8,1>D { align1 WE_normal 1H };
|
|
|
|
|
* send(16) g8<1>UW g114<8,8,1>F
|
|
|
|
|
* sampler (10, 0, 10, 2) mlen 2 rlen 8 { align1 WE_normal 1H };
|
|
|
|
|
* mov(16) g8<1>F g8<8,8,1>D { align1 WE_normal 1H };
|
|
|
|
|
* add(16) g6<1>F g6<8,8,1>F g8<8,8,1>F { align1 WE_normal 1H };
|
|
|
|
|
*
|
|
|
|
|
* Since the only caches that should matter are just the
|
|
|
|
|
* instruction/state cache containing the surface state, assume that we
|
|
|
|
|
* always have hot caches.
|
|
|
|
|
*/
|
|
|
|
|
latency = 100;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
|
|
|
|
|
/* testing using varying-index pull constants:
|
|
|
|
|
*
|
|
|
|
|
* 16 cycles:
|
|
|
|
|
* mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
|
|
|
|
|
* send(8) g4<1>F g4<8,8,1>D
|
|
|
|
|
* data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
|
|
|
|
|
*
|
|
|
|
|
* ~480 cycles:
|
|
|
|
|
* mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
|
|
|
|
|
* send(8) g4<1>F g4<8,8,1>D
|
|
|
|
|
* data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
|
|
|
|
|
* mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
|
|
|
|
|
*
|
|
|
|
|
* ~620 cycles:
|
|
|
|
|
* mov(8) g4<1>D g2.1<0,1,0>F { align1 WE_normal 1Q };
|
|
|
|
|
* send(8) g4<1>F g4<8,8,1>D
|
|
|
|
|
* data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
|
|
|
|
|
* mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
|
|
|
|
|
* send(8) g4<1>F g4<8,8,1>D
|
|
|
|
|
* data (9, 2, 3) mlen 1 rlen 1 { align1 WE_normal 1Q };
|
|
|
|
|
* mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
|
|
|
|
|
*
|
|
|
|
|
* So, if it's cache-hot, it's about 140. If it's cache cold, it's
|
|
|
|
|
* about 460. We expect to mostly be cache hot, so pick something more
|
|
|
|
|
* in that direction.
|
|
|
|
|
*/
|
|
|
|
|
latency = 200;
|
|
|
|
|
break;
|
|
|
|
|
|
2018-10-29 15:06:14 -05:00
|
|
|
case SHADER_OPCODE_SEND:
|
|
|
|
|
switch (inst->sfid) {
|
2018-10-30 15:47:39 -05:00
|
|
|
case BRW_SFID_SAMPLER: {
|
|
|
|
|
unsigned msg_type = (inst->desc >> 12) & 0x1f;
|
|
|
|
|
switch (msg_type) {
|
2021-03-29 15:16:59 -07:00
|
|
|
case GFX5_SAMPLER_MESSAGE_SAMPLE_RESINFO:
|
|
|
|
|
case GFX6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO:
|
2018-10-30 15:47:39 -05:00
|
|
|
/* See also SHADER_OPCODE_TXS */
|
|
|
|
|
latency = 100;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
/* See also SHADER_OPCODE_TEX */
|
|
|
|
|
latency = 200;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2022-12-21 20:16:27 +02:00
|
|
|
case GFX6_SFID_DATAPORT_CONSTANT_CACHE:
|
|
|
|
|
/* See FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD */
|
|
|
|
|
latency = 200;
|
|
|
|
|
break;
|
|
|
|
|
|
2021-03-29 15:16:59 -07:00
|
|
|
case GFX6_SFID_DATAPORT_RENDER_CACHE:
|
2022-06-29 14:13:31 -07:00
|
|
|
switch (brw_fb_desc_msg_type(isa->devinfo, inst->desc)) {
|
2021-03-29 15:16:59 -07:00
|
|
|
case GFX7_DATAPORT_RC_TYPED_SURFACE_WRITE:
|
|
|
|
|
case GFX7_DATAPORT_RC_TYPED_SURFACE_READ:
|
2018-10-30 12:23:44 -05:00
|
|
|
/* See also SHADER_OPCODE_TYPED_SURFACE_READ */
|
|
|
|
|
latency = 600;
|
|
|
|
|
break;
|
|
|
|
|
|
2021-03-29 15:16:59 -07:00
|
|
|
case GFX7_DATAPORT_RC_TYPED_ATOMIC_OP:
|
2018-10-30 12:23:44 -05:00
|
|
|
/* See also SHADER_OPCODE_TYPED_ATOMIC */
|
|
|
|
|
latency = 14000;
|
|
|
|
|
break;
|
|
|
|
|
|
2021-03-29 15:16:59 -07:00
|
|
|
case GFX6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE:
|
2019-08-26 00:05:21 -07:00
|
|
|
/* completely fabricated number */
|
|
|
|
|
latency = 600;
|
|
|
|
|
break;
|
|
|
|
|
|
2018-10-30 12:23:44 -05:00
|
|
|
default:
|
|
|
|
|
unreachable("Unknown render cache message");
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2021-03-29 15:16:59 -07:00
|
|
|
case GFX7_SFID_DATAPORT_DATA_CACHE:
|
2018-10-30 12:23:44 -05:00
|
|
|
switch ((inst->desc >> 14) & 0x1f) {
|
2020-10-08 14:41:43 -05:00
|
|
|
case BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ:
|
2021-03-29 15:16:59 -07:00
|
|
|
case GFX7_DATAPORT_DC_UNALIGNED_OWORD_BLOCK_READ:
|
|
|
|
|
case GFX6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE:
|
2020-10-08 14:41:43 -05:00
|
|
|
/* We have no data for this but assume it's a little faster than
|
|
|
|
|
* untyped surface read/write.
|
|
|
|
|
*/
|
|
|
|
|
latency = 200;
|
|
|
|
|
break;
|
|
|
|
|
|
2021-03-29 15:16:59 -07:00
|
|
|
case GFX7_DATAPORT_DC_DWORD_SCATTERED_READ:
|
|
|
|
|
case GFX6_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE:
|
2018-10-30 12:23:44 -05:00
|
|
|
case HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ:
|
|
|
|
|
case HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE:
|
|
|
|
|
/* We have no data for this but assume it's roughly the same as
|
|
|
|
|
* untyped surface read/write.
|
|
|
|
|
*/
|
|
|
|
|
latency = 300;
|
|
|
|
|
break;
|
|
|
|
|
|
2021-03-29 15:16:59 -07:00
|
|
|
case GFX7_DATAPORT_DC_UNTYPED_SURFACE_READ:
|
|
|
|
|
case GFX7_DATAPORT_DC_UNTYPED_SURFACE_WRITE:
|
2019-02-21 10:32:01 -06:00
|
|
|
/* Test code:
|
|
|
|
|
* mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
|
|
|
|
|
* mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
|
|
|
|
|
* mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
|
|
|
|
|
* send(8) g4<1>UD g112<8,8,1>UD
|
|
|
|
|
* data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
|
|
|
|
|
* .
|
|
|
|
|
* . [repeats 8 times]
|
|
|
|
|
* .
|
|
|
|
|
* mov(8) g112<1>UD 0x00000000UD { align1 WE_all 1Q };
|
|
|
|
|
* mov(1) g112.7<1>UD g1.7<0,1,0>UD { align1 WE_all };
|
|
|
|
|
* mov(8) g113<1>UD 0x00000000UD { align1 WE_normal 1Q };
|
|
|
|
|
* send(8) g4<1>UD g112<8,8,1>UD
|
|
|
|
|
* data (38, 6, 5) mlen 2 rlen 1 { align1 WE_normal 1Q };
|
|
|
|
|
*
|
|
|
|
|
* Running it 100 times as fragment shader on a 128x128 quad
|
|
|
|
|
* gives an average latency of 583 cycles per surface read,
|
|
|
|
|
* standard deviation 0.9%.
|
|
|
|
|
*/
|
2018-10-30 12:23:44 -05:00
|
|
|
latency = 600;
|
|
|
|
|
break;
|
|
|
|
|
|
2021-03-29 15:16:59 -07:00
|
|
|
case GFX7_DATAPORT_DC_UNTYPED_ATOMIC_OP:
|
2019-02-21 10:32:01 -06:00
|
|
|
/* Test code:
|
|
|
|
|
* mov(8) g112<1>ud 0x00000000ud { align1 WE_all 1Q };
|
|
|
|
|
* mov(1) g112.7<1>ud g1.7<0,1,0>ud { align1 WE_all };
|
|
|
|
|
* mov(8) g113<1>ud 0x00000000ud { align1 WE_normal 1Q };
|
|
|
|
|
* send(8) g4<1>ud g112<8,8,1>ud
|
|
|
|
|
* data (38, 5, 6) mlen 2 rlen 1 { align1 WE_normal 1Q };
|
|
|
|
|
*
|
|
|
|
|
* Running it 100 times as fragment shader on a 128x128 quad
|
|
|
|
|
* gives an average latency of 13867 cycles per atomic op,
|
|
|
|
|
* standard deviation 3%. Note that this is a rather
|
|
|
|
|
* pessimistic estimate, the actual latency in cases with few
|
|
|
|
|
* collisions between threads and favorable pipelining has been
|
|
|
|
|
* seen to be reduced by a factor of 100.
|
|
|
|
|
*/
|
2018-10-30 12:23:44 -05:00
|
|
|
latency = 14000;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Unknown data cache message");
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case HSW_SFID_DATAPORT_DATA_CACHE_1:
|
2023-01-27 15:51:09 +02:00
|
|
|
switch (brw_dp_desc_msg_type(isa->devinfo, inst->desc)) {
|
2018-10-30 12:23:44 -05:00
|
|
|
case HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ:
|
|
|
|
|
case HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE:
|
|
|
|
|
case HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ:
|
|
|
|
|
case HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE:
|
2021-03-29 15:16:59 -07:00
|
|
|
case GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_WRITE:
|
|
|
|
|
case GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_READ:
|
|
|
|
|
case GFX8_DATAPORT_DC_PORT1_A64_SCATTERED_WRITE:
|
|
|
|
|
case GFX9_DATAPORT_DC_PORT1_A64_SCATTERED_READ:
|
|
|
|
|
case GFX9_DATAPORT_DC_PORT1_A64_OWORD_BLOCK_READ:
|
|
|
|
|
case GFX9_DATAPORT_DC_PORT1_A64_OWORD_BLOCK_WRITE:
|
|
|
|
|
/* See also GFX7_DATAPORT_DC_UNTYPED_SURFACE_READ */
|
2018-10-30 12:23:44 -05:00
|
|
|
latency = 300;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP:
|
|
|
|
|
case HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2:
|
|
|
|
|
case HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2:
|
|
|
|
|
case HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP:
|
2021-03-29 15:16:59 -07:00
|
|
|
case GFX9_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_FLOAT_OP:
|
|
|
|
|
case GFX8_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_OP:
|
|
|
|
|
case GFX9_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_FLOAT_OP:
|
|
|
|
|
case GFX12_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_HALF_INT_OP:
|
|
|
|
|
case GFX12_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_HALF_FLOAT_OP:
|
|
|
|
|
/* See also GFX7_DATAPORT_DC_UNTYPED_ATOMIC_OP */
|
2018-10-30 12:23:44 -05:00
|
|
|
latency = 14000;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Unknown data cache message");
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2021-11-19 17:57:42 -06:00
|
|
|
case GFX7_SFID_PIXEL_INTERPOLATOR:
|
|
|
|
|
latency = 50; /* TODO */
|
|
|
|
|
break;
|
|
|
|
|
|
2021-04-29 12:48:02 -07:00
|
|
|
case GFX12_SFID_UGM:
|
|
|
|
|
case GFX12_SFID_TGM:
|
|
|
|
|
case GFX12_SFID_SLM:
|
2022-06-29 14:13:31 -07:00
|
|
|
switch (lsc_msg_desc_opcode(isa->devinfo, inst->desc)) {
|
2021-04-29 23:58:26 -07:00
|
|
|
case LSC_OP_LOAD:
|
|
|
|
|
case LSC_OP_STORE:
|
2021-01-26 14:01:43 -08:00
|
|
|
case LSC_OP_LOAD_CMASK:
|
|
|
|
|
case LSC_OP_STORE_CMASK:
|
|
|
|
|
latency = 300;
|
|
|
|
|
break;
|
2021-05-25 11:31:10 +03:00
|
|
|
case LSC_OP_FENCE:
|
2021-04-29 18:48:03 -07:00
|
|
|
case LSC_OP_ATOMIC_INC:
|
|
|
|
|
case LSC_OP_ATOMIC_DEC:
|
|
|
|
|
case LSC_OP_ATOMIC_LOAD:
|
|
|
|
|
case LSC_OP_ATOMIC_STORE:
|
|
|
|
|
case LSC_OP_ATOMIC_ADD:
|
|
|
|
|
case LSC_OP_ATOMIC_SUB:
|
|
|
|
|
case LSC_OP_ATOMIC_MIN:
|
|
|
|
|
case LSC_OP_ATOMIC_MAX:
|
|
|
|
|
case LSC_OP_ATOMIC_UMIN:
|
|
|
|
|
case LSC_OP_ATOMIC_UMAX:
|
|
|
|
|
case LSC_OP_ATOMIC_CMPXCHG:
|
2021-04-29 20:50:42 -07:00
|
|
|
case LSC_OP_ATOMIC_FADD:
|
|
|
|
|
case LSC_OP_ATOMIC_FSUB:
|
|
|
|
|
case LSC_OP_ATOMIC_FMIN:
|
|
|
|
|
case LSC_OP_ATOMIC_FMAX:
|
|
|
|
|
case LSC_OP_ATOMIC_FCMPXCHG:
|
2021-04-29 18:48:03 -07:00
|
|
|
case LSC_OP_ATOMIC_AND:
|
|
|
|
|
case LSC_OP_ATOMIC_OR:
|
|
|
|
|
case LSC_OP_ATOMIC_XOR:
|
|
|
|
|
latency = 1400;
|
|
|
|
|
break;
|
2021-04-29 12:48:02 -07:00
|
|
|
default:
|
|
|
|
|
unreachable("unsupported new data port message instruction");
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2020-10-21 14:46:50 -05:00
|
|
|
case GEN_RT_SFID_BINDLESS_THREAD_DISPATCH:
|
2020-08-06 15:45:45 -05:00
|
|
|
case GEN_RT_SFID_RAY_TRACE_ACCELERATOR:
|
2020-10-21 14:46:50 -05:00
|
|
|
/* TODO.
|
|
|
|
|
*
|
|
|
|
|
* We'll assume for the moment that this is pretty quick as it
|
|
|
|
|
* doesn't actually return any data.
|
|
|
|
|
*/
|
|
|
|
|
latency = 200;
|
|
|
|
|
break;
|
|
|
|
|
|
2022-06-27 15:34:01 -07:00
|
|
|
case BRW_SFID_URB:
|
|
|
|
|
latency = 200;
|
|
|
|
|
break;
|
|
|
|
|
|
2018-10-29 15:06:14 -05:00
|
|
|
default:
|
|
|
|
|
unreachable("Unknown SFID");
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2023-09-20 12:42:24 -07:00
|
|
|
case BRW_OPCODE_DPAS:
|
|
|
|
|
switch (inst->rcount) {
|
|
|
|
|
case 1:
|
|
|
|
|
latency = 21;
|
|
|
|
|
break;
|
|
|
|
|
case 2:
|
|
|
|
|
latency = 22;
|
|
|
|
|
break;
|
|
|
|
|
case 8:
|
|
|
|
|
default:
|
|
|
|
|
latency = 32;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2012-12-05 16:19:43 -08:00
|
|
|
default:
|
|
|
|
|
/* 2 cycles:
|
|
|
|
|
* mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
|
|
|
|
|
*
|
|
|
|
|
* 16 cycles:
|
|
|
|
|
* mul(8) g4<1>F g2<0,1,0>F 0.5F { align1 WE_normal 1Q };
|
|
|
|
|
* mov(8) null g4<8,8,1>F { align1 WE_normal 1Q };
|
|
|
|
|
*/
|
|
|
|
|
latency = 14;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-01-18 17:16:49 -08:00
|
|
|
class instruction_scheduler {
|
|
|
|
|
public:
|
2023-10-20 02:31:20 -07:00
|
|
|
instruction_scheduler(void *mem_ctx, const backend_shader *s, int grf_count,
|
2023-10-20 01:30:19 -07:00
|
|
|
int grf_write_scale, bool post_reg_alloc):
|
2020-02-27 19:49:35 -08:00
|
|
|
bs(s)
|
2011-01-18 17:16:49 -08:00
|
|
|
{
|
2023-10-20 02:31:20 -07:00
|
|
|
this->mem_ctx = mem_ctx;
|
2023-10-10 15:24:38 -07:00
|
|
|
this->lin_ctx = linear_context(this->mem_ctx);
|
2012-12-03 17:58:03 -08:00
|
|
|
this->grf_count = grf_count;
|
2023-10-20 01:30:19 -07:00
|
|
|
this->post_reg_alloc = post_reg_alloc;
|
2023-10-20 00:09:37 -07:00
|
|
|
|
2023-10-10 15:24:38 -07:00
|
|
|
this->last_grf_write = linear_zalloc_array(lin_ctx, schedule_node *, grf_count * grf_write_scale);
|
2023-10-19 22:07:53 -07:00
|
|
|
|
|
|
|
|
this->nodes_len = s->cfg->last_block()->end_ip + 1;
|
|
|
|
|
this->nodes = linear_zalloc_array(lin_ctx, schedule_node, this->nodes_len);
|
|
|
|
|
|
|
|
|
|
const struct brw_isa_info *isa = &bs->compiler->isa;
|
|
|
|
|
|
|
|
|
|
schedule_node *n = nodes;
|
|
|
|
|
foreach_block_and_inst(block, backend_instruction, inst, s->cfg) {
|
|
|
|
|
n->inst = inst;
|
|
|
|
|
|
|
|
|
|
if (!post_reg_alloc)
|
|
|
|
|
n->latency = 1;
|
|
|
|
|
else
|
2024-02-15 02:27:44 -08:00
|
|
|
n->set_latency(isa);
|
2023-10-19 22:07:53 -07:00
|
|
|
|
|
|
|
|
n++;
|
|
|
|
|
}
|
|
|
|
|
assert(n == nodes + nodes_len);
|
2023-10-20 00:09:37 -07:00
|
|
|
|
|
|
|
|
current.block = NULL;
|
|
|
|
|
current.start = NULL;
|
|
|
|
|
current.end = NULL;
|
|
|
|
|
current.len = 0;
|
|
|
|
|
current.time = 0;
|
2023-10-20 00:58:25 -07:00
|
|
|
current.cand_generation = 0;
|
|
|
|
|
current.available.make_empty();
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void add_barrier_deps(schedule_node *n);
|
2023-04-03 14:52:59 +03:00
|
|
|
void add_cross_lane_deps(schedule_node *n);
|
2011-01-18 17:16:49 -08:00
|
|
|
void add_dep(schedule_node *before, schedule_node *after, int latency);
|
2011-05-20 14:13:59 -07:00
|
|
|
void add_dep(schedule_node *before, schedule_node *after);
|
2011-01-18 17:16:49 -08:00
|
|
|
|
2023-10-20 00:09:37 -07:00
|
|
|
void set_current_block(bblock_t *block);
|
2016-08-16 00:01:31 -07:00
|
|
|
void compute_delays();
|
2016-08-16 00:56:04 -07:00
|
|
|
void compute_exits();
|
2013-10-14 11:38:09 -07:00
|
|
|
|
2023-10-20 00:58:25 -07:00
|
|
|
void schedule(schedule_node *chosen);
|
|
|
|
|
void update_children(schedule_node *chosen);
|
2011-03-23 13:53:26 -07:00
|
|
|
|
2011-01-18 17:16:49 -08:00
|
|
|
void *mem_ctx;
|
2023-10-10 15:24:38 -07:00
|
|
|
linear_ctx *lin_ctx;
|
2011-01-18 17:16:49 -08:00
|
|
|
|
2023-10-19 22:07:53 -07:00
|
|
|
schedule_node *nodes;
|
|
|
|
|
int nodes_len;
|
|
|
|
|
|
2023-10-20 00:09:37 -07:00
|
|
|
/* Current block being processed. */
|
|
|
|
|
struct {
|
|
|
|
|
bblock_t *block;
|
|
|
|
|
|
|
|
|
|
/* Range of nodes in the block. End will point to first node
|
|
|
|
|
* address after the block, i.e. the range is [start, end).
|
|
|
|
|
*/
|
|
|
|
|
schedule_node *start;
|
|
|
|
|
schedule_node *end;
|
|
|
|
|
int len;
|
|
|
|
|
|
2023-10-20 00:58:25 -07:00
|
|
|
int scheduled;
|
|
|
|
|
|
|
|
|
|
unsigned cand_generation;
|
2023-10-20 00:09:37 -07:00
|
|
|
int time;
|
2023-10-20 00:39:04 -07:00
|
|
|
exec_list available;
|
2023-10-20 00:09:37 -07:00
|
|
|
} current;
|
|
|
|
|
|
2012-12-03 17:58:03 -08:00
|
|
|
bool post_reg_alloc;
|
|
|
|
|
int grf_count;
|
2020-02-27 19:49:35 -08:00
|
|
|
const backend_shader *bs;
|
2013-10-14 11:38:09 -07:00
|
|
|
|
2023-10-20 01:30:19 -07:00
|
|
|
/**
|
|
|
|
|
* Last instruction to have written the grf (or a channel in the grf, for the
|
|
|
|
|
* scalar backend)
|
|
|
|
|
*/
|
|
|
|
|
schedule_node **last_grf_write;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
class fs_instruction_scheduler : public instruction_scheduler
|
|
|
|
|
{
|
|
|
|
|
public:
|
2023-10-20 02:31:20 -07:00
|
|
|
fs_instruction_scheduler(void *mem_ctx, const fs_visitor *v, int grf_count, int hw_reg_count,
|
2023-10-20 10:32:54 -07:00
|
|
|
int block_count, bool post_reg_alloc);
|
2023-10-20 01:30:19 -07:00
|
|
|
void calculate_deps();
|
|
|
|
|
bool is_compressed(const fs_inst *inst);
|
|
|
|
|
schedule_node *choose_instruction_to_schedule();
|
|
|
|
|
int calculate_issue_time(backend_instruction *inst);
|
|
|
|
|
|
|
|
|
|
void count_reads_remaining(backend_instruction *inst);
|
|
|
|
|
void setup_liveness(cfg_t *cfg);
|
|
|
|
|
void update_register_pressure(backend_instruction *inst);
|
|
|
|
|
int get_register_pressure_benefit(backend_instruction *inst);
|
|
|
|
|
void clear_last_grf_write();
|
|
|
|
|
|
|
|
|
|
void schedule_instructions();
|
2023-10-20 10:32:54 -07:00
|
|
|
void run(instruction_scheduler_mode mode);
|
2023-10-20 01:30:19 -07:00
|
|
|
|
|
|
|
|
const fs_visitor *v;
|
|
|
|
|
unsigned hw_reg_count;
|
|
|
|
|
int reg_pressure;
|
2013-11-06 17:38:23 -08:00
|
|
|
instruction_scheduler_mode mode;
|
|
|
|
|
|
2015-06-09 10:26:53 -07:00
|
|
|
/*
|
|
|
|
|
* The register pressure at the beginning of each basic block.
|
2013-10-14 11:38:09 -07:00
|
|
|
*/
|
|
|
|
|
|
2015-06-09 10:26:53 -07:00
|
|
|
int *reg_pressure_in;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* The virtual GRF's whose range overlaps the beginning of each basic block.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
BITSET_WORD **livein;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* The virtual GRF's whose range overlaps the end of each basic block.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
BITSET_WORD **liveout;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* The hardware GRF's whose range overlaps the end of each basic block.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
BITSET_WORD **hw_liveout;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Whether we've scheduled a write for this virtual GRF yet.
|
2013-10-14 11:38:09 -07:00
|
|
|
*/
|
2015-06-09 10:26:53 -07:00
|
|
|
|
|
|
|
|
bool *written;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* How many reads we haven't scheduled for this virtual GRF yet.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
int *reads_remaining;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* How many reads we haven't scheduled for this hardware GRF yet.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
int *hw_reads_remaining;
|
2023-06-13 13:46:56 -07:00
|
|
|
|
2011-01-18 17:16:49 -08:00
|
|
|
};
|
|
|
|
|
|
2023-10-20 02:31:20 -07:00
|
|
|
fs_instruction_scheduler::fs_instruction_scheduler(void *mem_ctx, const fs_visitor *v,
|
2015-06-09 10:26:53 -07:00
|
|
|
int grf_count, int hw_reg_count,
|
2023-10-20 10:32:54 -07:00
|
|
|
int block_count, bool post_reg_alloc)
|
2023-10-20 02:31:20 -07:00
|
|
|
: instruction_scheduler(mem_ctx, v, grf_count, /* grf_write_scale */ 16,
|
2023-10-20 10:32:54 -07:00
|
|
|
post_reg_alloc),
|
2013-04-29 14:05:33 -07:00
|
|
|
v(v)
|
|
|
|
|
{
|
2023-10-20 01:30:19 -07:00
|
|
|
this->hw_reg_count = hw_reg_count;
|
2023-10-20 10:32:54 -07:00
|
|
|
this->mode = SCHEDULE_NONE;
|
2023-10-20 01:30:19 -07:00
|
|
|
this->reg_pressure = 0;
|
|
|
|
|
|
|
|
|
|
if (!post_reg_alloc) {
|
|
|
|
|
this->reg_pressure_in = linear_zalloc_array(lin_ctx, int, block_count);
|
|
|
|
|
|
|
|
|
|
this->livein = linear_alloc_array(lin_ctx, BITSET_WORD *, block_count);
|
|
|
|
|
for (int i = 0; i < block_count; i++)
|
|
|
|
|
this->livein[i] = linear_zalloc_array(lin_ctx, BITSET_WORD,
|
|
|
|
|
BITSET_WORDS(grf_count));
|
|
|
|
|
|
|
|
|
|
this->liveout = linear_alloc_array(lin_ctx, BITSET_WORD *, block_count);
|
|
|
|
|
for (int i = 0; i < block_count; i++)
|
|
|
|
|
this->liveout[i] = linear_zalloc_array(lin_ctx, BITSET_WORD,
|
|
|
|
|
BITSET_WORDS(grf_count));
|
|
|
|
|
|
|
|
|
|
this->hw_liveout = linear_alloc_array(lin_ctx, BITSET_WORD *, block_count);
|
|
|
|
|
for (int i = 0; i < block_count; i++)
|
|
|
|
|
this->hw_liveout[i] = linear_zalloc_array(lin_ctx, BITSET_WORD,
|
|
|
|
|
BITSET_WORDS(hw_reg_count));
|
|
|
|
|
|
2023-10-20 09:45:14 -07:00
|
|
|
setup_liveness(v->cfg);
|
|
|
|
|
|
2023-10-20 02:15:59 -07:00
|
|
|
this->written = linear_alloc_array(lin_ctx, bool, grf_count);
|
2023-10-20 01:30:19 -07:00
|
|
|
|
2023-10-20 02:15:59 -07:00
|
|
|
this->reads_remaining = linear_alloc_array(lin_ctx, int, grf_count);
|
2023-10-20 01:30:19 -07:00
|
|
|
|
2023-10-20 02:15:59 -07:00
|
|
|
this->hw_reads_remaining = linear_alloc_array(lin_ctx, int, hw_reg_count);
|
2023-10-20 01:30:19 -07:00
|
|
|
} else {
|
|
|
|
|
this->reg_pressure_in = NULL;
|
|
|
|
|
this->livein = NULL;
|
|
|
|
|
this->liveout = NULL;
|
|
|
|
|
this->hw_liveout = NULL;
|
|
|
|
|
this->written = NULL;
|
|
|
|
|
this->reads_remaining = NULL;
|
|
|
|
|
this->hw_reads_remaining = NULL;
|
|
|
|
|
}
|
2023-10-20 09:45:14 -07:00
|
|
|
|
|
|
|
|
foreach_block(block, v->cfg) {
|
|
|
|
|
set_current_block(block);
|
|
|
|
|
|
|
|
|
|
for (schedule_node *n = current.start; n < current.end; n++)
|
|
|
|
|
n->issue_time = calculate_issue_time(n->inst);
|
|
|
|
|
|
|
|
|
|
calculate_deps();
|
|
|
|
|
compute_delays();
|
|
|
|
|
compute_exits();
|
|
|
|
|
}
|
2013-04-29 14:05:33 -07:00
|
|
|
}
|
|
|
|
|
|
2015-06-09 10:26:53 -07:00
|
|
|
static bool
|
|
|
|
|
is_src_duplicate(fs_inst *inst, int src)
|
|
|
|
|
{
|
|
|
|
|
for (int i = 0; i < src; i++)
|
|
|
|
|
if (inst->src[i].equals(inst->src[src]))
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-14 11:38:09 -07:00
|
|
|
void
|
2015-06-09 10:26:53 -07:00
|
|
|
fs_instruction_scheduler::count_reads_remaining(backend_instruction *be)
|
2013-10-14 11:38:09 -07:00
|
|
|
{
|
2023-10-20 02:15:59 -07:00
|
|
|
assert(reads_remaining);
|
2013-10-14 11:38:09 -07:00
|
|
|
|
2023-10-20 02:15:59 -07:00
|
|
|
fs_inst *inst = (fs_inst *)be;
|
2013-10-14 11:38:09 -07:00
|
|
|
|
2014-03-17 10:39:43 -07:00
|
|
|
for (int i = 0; i < inst->sources; i++) {
|
2015-06-09 10:26:53 -07:00
|
|
|
if (is_src_duplicate(inst, i))
|
|
|
|
|
continue;
|
|
|
|
|
|
2015-10-26 17:09:25 -07:00
|
|
|
if (inst->src[i].file == VGRF) {
|
2015-10-26 04:35:14 -07:00
|
|
|
reads_remaining[inst->src[i].nr]++;
|
2015-10-26 17:52:57 -07:00
|
|
|
} else if (inst->src[i].file == FIXED_GRF) {
|
2015-10-24 15:29:03 -07:00
|
|
|
if (inst->src[i].nr >= hw_reg_count)
|
2015-06-09 10:26:53 -07:00
|
|
|
continue;
|
|
|
|
|
|
2016-09-07 16:59:35 -07:00
|
|
|
for (unsigned j = 0; j < regs_read(inst, i); j++)
|
2015-10-24 15:29:03 -07:00
|
|
|
hw_reads_remaining[inst->src[i].nr + j]++;
|
2015-06-09 10:26:53 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
fs_instruction_scheduler::setup_liveness(cfg_t *cfg)
|
|
|
|
|
{
|
2016-03-13 16:25:57 -07:00
|
|
|
const fs_live_variables &live = v->live_analysis.require();
|
|
|
|
|
|
2015-06-09 10:26:53 -07:00
|
|
|
/* First, compute liveness on a per-GRF level using the in/out sets from
|
|
|
|
|
* liveness calculation.
|
|
|
|
|
*/
|
|
|
|
|
for (int block = 0; block < cfg->num_blocks; block++) {
|
2016-03-13 16:25:57 -07:00
|
|
|
for (int i = 0; i < live.num_vars; i++) {
|
|
|
|
|
if (BITSET_TEST(live.block_data[block].livein, i)) {
|
|
|
|
|
int vgrf = live.vgrf_from_var[i];
|
2015-06-09 10:26:53 -07:00
|
|
|
if (!BITSET_TEST(livein[block], vgrf)) {
|
|
|
|
|
reg_pressure_in[block] += v->alloc.sizes[vgrf];
|
|
|
|
|
BITSET_SET(livein[block], vgrf);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-13 16:25:57 -07:00
|
|
|
if (BITSET_TEST(live.block_data[block].liveout, i))
|
|
|
|
|
BITSET_SET(liveout[block], live.vgrf_from_var[i]);
|
2015-06-09 10:26:53 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Now, extend the live in/live out sets for when a range crosses a block
|
|
|
|
|
* boundary, which matches what our register allocator/interference code
|
|
|
|
|
* does to account for force_writemask_all and incompatible exec_mask's.
|
|
|
|
|
*/
|
|
|
|
|
for (int block = 0; block < cfg->num_blocks - 1; block++) {
|
|
|
|
|
for (int i = 0; i < grf_count; i++) {
|
2016-03-13 16:25:57 -07:00
|
|
|
if (live.vgrf_start[i] <= cfg->blocks[block]->end_ip &&
|
|
|
|
|
live.vgrf_end[i] >= cfg->blocks[block + 1]->start_ip) {
|
2015-06-09 10:26:53 -07:00
|
|
|
if (!BITSET_TEST(livein[block + 1], i)) {
|
|
|
|
|
reg_pressure_in[block + 1] += v->alloc.sizes[i];
|
|
|
|
|
BITSET_SET(livein[block + 1], i);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BITSET_SET(liveout[block], i);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int payload_last_use_ip[hw_reg_count];
|
|
|
|
|
v->calculate_payload_ranges(hw_reg_count, payload_last_use_ip);
|
|
|
|
|
|
2018-12-10 14:49:49 -08:00
|
|
|
for (unsigned i = 0; i < hw_reg_count; i++) {
|
2015-06-09 10:26:53 -07:00
|
|
|
if (payload_last_use_ip[i] == -1)
|
2013-10-14 11:38:09 -07:00
|
|
|
continue;
|
|
|
|
|
|
2015-06-09 10:26:53 -07:00
|
|
|
for (int block = 0; block < cfg->num_blocks; block++) {
|
|
|
|
|
if (cfg->blocks[block]->start_ip <= payload_last_use_ip[i])
|
|
|
|
|
reg_pressure_in[block]++;
|
|
|
|
|
|
|
|
|
|
if (cfg->blocks[block]->end_ip <= payload_last_use_ip[i])
|
|
|
|
|
BITSET_SET(hw_liveout[block], i);
|
|
|
|
|
}
|
2013-10-14 11:38:09 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
fs_instruction_scheduler::update_register_pressure(backend_instruction *be)
|
|
|
|
|
{
|
2023-10-20 02:15:59 -07:00
|
|
|
assert(reads_remaining);
|
2013-10-14 11:38:09 -07:00
|
|
|
|
2023-10-20 02:15:59 -07:00
|
|
|
fs_inst *inst = (fs_inst *)be;
|
2013-10-14 11:38:09 -07:00
|
|
|
|
2015-10-26 17:09:25 -07:00
|
|
|
if (inst->dst.file == VGRF) {
|
2015-10-26 04:35:14 -07:00
|
|
|
written[inst->dst.nr] = true;
|
2013-10-14 11:38:09 -07:00
|
|
|
}
|
|
|
|
|
|
2014-03-17 10:39:43 -07:00
|
|
|
for (int i = 0; i < inst->sources; i++) {
|
2015-06-09 10:26:53 -07:00
|
|
|
if (is_src_duplicate(inst, i))
|
|
|
|
|
continue;
|
|
|
|
|
|
2015-10-26 17:09:25 -07:00
|
|
|
if (inst->src[i].file == VGRF) {
|
2015-10-26 04:35:14 -07:00
|
|
|
reads_remaining[inst->src[i].nr]--;
|
2015-10-26 17:52:57 -07:00
|
|
|
} else if (inst->src[i].file == FIXED_GRF &&
|
2015-10-24 15:29:03 -07:00
|
|
|
inst->src[i].nr < hw_reg_count) {
|
2016-09-07 16:59:35 -07:00
|
|
|
for (unsigned off = 0; off < regs_read(inst, i); off++)
|
2015-10-24 15:29:03 -07:00
|
|
|
hw_reads_remaining[inst->src[i].nr + off]--;
|
2013-10-14 11:38:09 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
|
fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
|
|
|
|
|
{
|
|
|
|
|
fs_inst *inst = (fs_inst *)be;
|
|
|
|
|
int benefit = 0;
|
2023-10-20 00:09:37 -07:00
|
|
|
const int block_idx = current.block->num;
|
2013-10-14 11:38:09 -07:00
|
|
|
|
2015-10-26 17:09:25 -07:00
|
|
|
if (inst->dst.file == VGRF) {
|
2015-10-26 04:35:14 -07:00
|
|
|
if (!BITSET_TEST(livein[block_idx], inst->dst.nr) &&
|
|
|
|
|
!written[inst->dst.nr])
|
|
|
|
|
benefit -= v->alloc.sizes[inst->dst.nr];
|
2013-10-14 11:38:09 -07:00
|
|
|
}
|
|
|
|
|
|
2014-03-17 10:39:43 -07:00
|
|
|
for (int i = 0; i < inst->sources; i++) {
|
2015-06-09 10:26:53 -07:00
|
|
|
if (is_src_duplicate(inst, i))
|
2013-10-14 11:38:09 -07:00
|
|
|
continue;
|
|
|
|
|
|
2015-10-26 17:09:25 -07:00
|
|
|
if (inst->src[i].file == VGRF &&
|
2015-10-26 04:35:14 -07:00
|
|
|
!BITSET_TEST(liveout[block_idx], inst->src[i].nr) &&
|
|
|
|
|
reads_remaining[inst->src[i].nr] == 1)
|
|
|
|
|
benefit += v->alloc.sizes[inst->src[i].nr];
|
2015-06-09 10:26:53 -07:00
|
|
|
|
2015-10-26 17:52:57 -07:00
|
|
|
if (inst->src[i].file == FIXED_GRF &&
|
2015-10-24 15:29:03 -07:00
|
|
|
inst->src[i].nr < hw_reg_count) {
|
2016-09-07 16:59:35 -07:00
|
|
|
for (unsigned off = 0; off < regs_read(inst, i); off++) {
|
2015-10-24 15:29:03 -07:00
|
|
|
int reg = inst->src[i].nr + off;
|
2015-06-09 10:26:53 -07:00
|
|
|
if (!BITSET_TEST(hw_liveout[block_idx], reg) &&
|
|
|
|
|
hw_reads_remaining[reg] == 1) {
|
|
|
|
|
benefit++;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-10-14 11:38:09 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return benefit;
|
|
|
|
|
}
|
|
|
|
|
|
2011-01-18 17:16:49 -08:00
|
|
|
void
|
2023-10-20 00:09:37 -07:00
|
|
|
instruction_scheduler::set_current_block(bblock_t *block)
|
2011-01-18 17:16:49 -08:00
|
|
|
{
|
2023-10-20 00:09:37 -07:00
|
|
|
current.block = block;
|
|
|
|
|
current.start = nodes + block->start_ip;
|
|
|
|
|
current.len = block->end_ip - block->start_ip + 1;
|
|
|
|
|
current.end = current.start + current.len;
|
|
|
|
|
current.time = 0;
|
2023-10-20 00:58:25 -07:00
|
|
|
current.scheduled = 0;
|
|
|
|
|
current.cand_generation = 1;
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|
|
|
|
|
|
2016-08-16 00:01:31 -07:00
|
|
|
/** Computation of the delay member of each node. */
|
2013-10-28 00:11:45 -07:00
|
|
|
void
|
2016-08-16 00:01:31 -07:00
|
|
|
instruction_scheduler::compute_delays()
|
2013-10-28 00:11:45 -07:00
|
|
|
{
|
2023-10-20 00:09:37 -07:00
|
|
|
for (schedule_node *n = current.end - 1; n >= current.start; n--) {
|
2023-10-15 23:38:56 -07:00
|
|
|
if (!n->children_count) {
|
2023-10-16 23:25:00 -07:00
|
|
|
n->delay = n->issue_time;
|
2016-08-16 00:01:31 -07:00
|
|
|
} else {
|
2023-10-15 23:38:56 -07:00
|
|
|
for (int i = 0; i < n->children_count; i++) {
|
|
|
|
|
assert(n->children[i].n->delay);
|
|
|
|
|
n->delay = MAX2(n->delay, n->latency + n->children[i].n->delay);
|
2016-08-16 00:01:31 -07:00
|
|
|
}
|
2013-10-28 00:11:45 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-16 00:56:04 -07:00
|
|
|
void
|
|
|
|
|
instruction_scheduler::compute_exits()
|
|
|
|
|
{
|
|
|
|
|
/* Calculate a lower bound of the scheduling time of each node in the
|
|
|
|
|
* graph. This is analogous to the node's critical path but calculated
|
|
|
|
|
* from the top instead of from the bottom of the block.
|
|
|
|
|
*/
|
2023-10-20 00:09:37 -07:00
|
|
|
for (schedule_node *n = current.start; n < current.end; n++) {
|
2023-10-15 23:38:56 -07:00
|
|
|
for (int i = 0; i < n->children_count; i++) {
|
|
|
|
|
schedule_node_child *child = &n->children[i];
|
2023-10-20 10:11:11 -07:00
|
|
|
child->n->initial_unblocked_time =
|
|
|
|
|
MAX2(child->n->initial_unblocked_time,
|
|
|
|
|
n->initial_unblocked_time + n->issue_time + child->effective_latency);
|
2016-08-16 00:56:04 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Calculate the exit of each node by induction based on the exit nodes of
|
|
|
|
|
* its children. The preferred exit of a node is the one among the exit
|
|
|
|
|
* nodes of its children which can be unblocked first according to the
|
|
|
|
|
* optimistic unblocked time estimate calculated above.
|
|
|
|
|
*/
|
2023-10-20 00:09:37 -07:00
|
|
|
for (schedule_node *n = current.end - 1; n >= current.start; n--) {
|
2020-11-30 17:24:51 -06:00
|
|
|
n->exit = (n->inst->opcode == BRW_OPCODE_HALT ? n : NULL);
|
2016-08-16 00:56:04 -07:00
|
|
|
|
2023-10-15 23:38:56 -07:00
|
|
|
for (int i = 0; i < n->children_count; i++) {
|
2023-10-20 10:11:11 -07:00
|
|
|
if (exit_initial_unblocked_time(n->children[i].n) < exit_initial_unblocked_time(n))
|
2023-10-15 23:38:56 -07:00
|
|
|
n->exit = n->children[i].n->exit;
|
2016-08-16 00:56:04 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-01-18 17:16:49 -08:00
|
|
|
/**
|
|
|
|
|
* Add a dependency between two instruction nodes.
|
|
|
|
|
*
|
|
|
|
|
* The @after node will be scheduled after @before. We will try to
|
|
|
|
|
* schedule it @latency cycles after @before, but no guarantees there.
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
instruction_scheduler::add_dep(schedule_node *before, schedule_node *after,
|
2014-11-12 10:17:36 -08:00
|
|
|
int latency)
|
2011-01-18 17:16:49 -08:00
|
|
|
{
|
|
|
|
|
if (!before || !after)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
assert(before != after);
|
|
|
|
|
|
2023-10-15 23:38:56 -07:00
|
|
|
for (int i = 0; i < before->children_count; i++) {
|
|
|
|
|
schedule_node_child *child = &before->children[i];
|
|
|
|
|
if (child->n == after) {
|
|
|
|
|
child->effective_latency = MAX2(child->effective_latency, latency);
|
2014-11-12 10:17:36 -08:00
|
|
|
return;
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-10-15 23:38:56 -07:00
|
|
|
if (before->children_cap <= before->children_count) {
|
|
|
|
|
if (before->children_cap < 16)
|
|
|
|
|
before->children_cap = 16;
|
2011-01-18 17:16:49 -08:00
|
|
|
else
|
2023-10-15 23:38:56 -07:00
|
|
|
before->children_cap *= 2;
|
2011-01-18 17:16:49 -08:00
|
|
|
|
2011-01-21 14:32:31 -08:00
|
|
|
before->children = reralloc(mem_ctx, before->children,
|
2023-10-15 23:38:56 -07:00
|
|
|
schedule_node_child,
|
|
|
|
|
before->children_cap);
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|
|
|
|
|
|
2023-10-15 23:38:56 -07:00
|
|
|
schedule_node_child *child = &before->children[before->children_count];
|
|
|
|
|
child->n = after;
|
|
|
|
|
child->effective_latency = latency;
|
|
|
|
|
before->children_count++;
|
2023-10-20 10:11:11 -07:00
|
|
|
after->initial_parent_count++;
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|
|
|
|
|
|
2011-05-20 14:13:59 -07:00
|
|
|
void
|
|
|
|
|
instruction_scheduler::add_dep(schedule_node *before, schedule_node *after)
|
|
|
|
|
{
|
|
|
|
|
if (!before)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
add_dep(before, after, before->latency);
|
|
|
|
|
}
|
|
|
|
|
|
2017-10-17 23:19:20 -07:00
|
|
|
static bool
|
|
|
|
|
is_scheduling_barrier(const backend_instruction *inst)
|
|
|
|
|
{
|
2020-11-19 09:32:27 -06:00
|
|
|
return inst->opcode == SHADER_OPCODE_HALT_TARGET ||
|
2017-10-17 23:19:20 -07:00
|
|
|
inst->is_control_flow() ||
|
|
|
|
|
inst->has_side_effects();
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-03 14:52:59 +03:00
|
|
|
static bool
|
|
|
|
|
has_cross_lane_access(const fs_inst *inst)
|
|
|
|
|
{
|
|
|
|
|
/* FINISHME:
|
|
|
|
|
*
|
|
|
|
|
* This function is likely incomplete in terms of identify cross lane
|
|
|
|
|
* accesses.
|
|
|
|
|
*/
|
|
|
|
|
if (inst->opcode == SHADER_OPCODE_BROADCAST ||
|
|
|
|
|
inst->opcode == SHADER_OPCODE_READ_SR_REG ||
|
|
|
|
|
inst->opcode == SHADER_OPCODE_CLUSTER_BROADCAST ||
|
|
|
|
|
inst->opcode == SHADER_OPCODE_SHUFFLE ||
|
|
|
|
|
inst->opcode == FS_OPCODE_LOAD_LIVE_CHANNELS ||
|
2024-01-05 09:19:38 -08:00
|
|
|
inst->opcode == SHADER_OPCODE_LOAD_LIVE_CHANNELS ||
|
2023-04-03 14:52:59 +03:00
|
|
|
inst->opcode == SHADER_OPCODE_FIND_LAST_LIVE_CHANNEL ||
|
|
|
|
|
inst->opcode == SHADER_OPCODE_FIND_LIVE_CHANNEL)
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
for (unsigned s = 0; s < inst->sources; s++) {
|
|
|
|
|
if (inst->src[s].file == VGRF) {
|
|
|
|
|
if (inst->src[s].stride == 0)
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2011-01-18 17:16:49 -08:00
|
|
|
/**
|
|
|
|
|
* Sometimes we really want this node to execute after everything that
|
|
|
|
|
* was before it and before everything that followed it. This adds
|
|
|
|
|
* the deps to do so.
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
instruction_scheduler::add_barrier_deps(schedule_node *n)
|
|
|
|
|
{
|
2023-10-20 00:09:37 -07:00
|
|
|
for (schedule_node *prev = n - 1; prev >= current.start; prev--) {
|
|
|
|
|
add_dep(prev, n, 0);
|
|
|
|
|
if (is_scheduling_barrier(prev->inst))
|
|
|
|
|
break;
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|
|
|
|
|
|
2023-10-20 00:09:37 -07:00
|
|
|
for (schedule_node *next = n + 1; next < current.end; next++) {
|
|
|
|
|
add_dep(n, next, 0);
|
|
|
|
|
if (is_scheduling_barrier(next->inst))
|
|
|
|
|
break;
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-03 14:52:59 +03:00
|
|
|
/**
|
|
|
|
|
* Because some instructions like HALT can disable lanes, scheduling prior to
|
|
|
|
|
* a cross lane access should not be allowed, otherwise we could end up with
|
|
|
|
|
* later instructions accessing uninitialized data.
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
instruction_scheduler::add_cross_lane_deps(schedule_node *n)
|
|
|
|
|
{
|
2023-10-20 00:09:37 -07:00
|
|
|
for (schedule_node *prev = n - 1; prev >= current.start; prev--) {
|
|
|
|
|
if (has_cross_lane_access((fs_inst*)prev->inst))
|
|
|
|
|
add_dep(prev, n, 0);
|
2023-04-03 14:52:59 +03:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-23 13:53:26 -07:00
|
|
|
/* instruction scheduling needs to be aware of when an MRF write
|
|
|
|
|
* actually writes 2 MRFs.
|
|
|
|
|
*/
|
|
|
|
|
bool
|
2020-04-02 16:18:12 -07:00
|
|
|
fs_instruction_scheduler::is_compressed(const fs_inst *inst)
|
2011-03-23 13:53:26 -07:00
|
|
|
{
|
2014-08-16 11:34:56 -07:00
|
|
|
return inst->exec_size == 16;
|
2011-03-23 13:53:26 -07:00
|
|
|
}
|
|
|
|
|
|
2023-06-13 14:18:28 -07:00
|
|
|
/* Clears last_grf_write to be ready to start calculating deps for a block
|
|
|
|
|
* again.
|
|
|
|
|
*
|
|
|
|
|
* Since pre-ra grf_count scales with instructions, and instructions scale with
|
|
|
|
|
* BBs, we don't want to memset all of last_grf_write per block or you'll end up
|
|
|
|
|
* O(n^2) with number of blocks. For shaders using softfp64, we get a *lot* of
|
|
|
|
|
* blocks.
|
|
|
|
|
*
|
|
|
|
|
* We don't bother being careful for post-ra, since then grf_count doesn't scale
|
|
|
|
|
* with instructions.
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
fs_instruction_scheduler::clear_last_grf_write()
|
|
|
|
|
{
|
|
|
|
|
if (!post_reg_alloc) {
|
2023-10-20 00:09:37 -07:00
|
|
|
for (schedule_node *n = current.start; n < current.end; n++) {
|
2023-06-13 14:18:28 -07:00
|
|
|
fs_inst *inst = (fs_inst *)n->inst;
|
|
|
|
|
|
|
|
|
|
if (inst->dst.file == VGRF) {
|
|
|
|
|
/* Don't bother being careful with regs_written(), quicker to just clear 2 cachelines. */
|
|
|
|
|
memset(&last_grf_write[inst->dst.nr * 16], 0, sizeof(*last_grf_write) * 16);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
memset(last_grf_write, 0, sizeof(*last_grf_write) * grf_count * 16);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-01-18 17:16:49 -08:00
|
|
|
void
|
2013-04-29 14:05:33 -07:00
|
|
|
fs_instruction_scheduler::calculate_deps()
|
2011-01-18 17:16:49 -08:00
|
|
|
{
|
2013-10-08 22:54:46 -07:00
|
|
|
/* Pre-register-allocation, this tracks the last write per VGRF offset.
|
2012-12-03 17:58:03 -08:00
|
|
|
* After register allocation, reg_offsets are gone and we track individual
|
|
|
|
|
* GRF registers.
|
|
|
|
|
*/
|
2017-12-12 12:05:02 -08:00
|
|
|
schedule_node *last_conditional_mod[8] = {};
|
2014-04-04 16:51:59 +03:00
|
|
|
schedule_node *last_accumulator_write = NULL;
|
2011-05-23 09:12:07 -07:00
|
|
|
/* Fixed HW registers are assumed to be separate from the virtual
|
|
|
|
|
* GRFs, so they can be tracked separately. We don't really write
|
|
|
|
|
* to fixed GRFs much, so don't bother tracking them on a more
|
|
|
|
|
* granular level.
|
|
|
|
|
*/
|
|
|
|
|
schedule_node *last_fixed_grf_write = NULL;
|
2011-01-18 17:16:49 -08:00
|
|
|
|
|
|
|
|
/* top-to-bottom dependencies: RAW and WAW. */
|
2023-10-20 00:09:37 -07:00
|
|
|
for (schedule_node *n = current.start; n < current.end; n++) {
|
2013-04-29 14:05:33 -07:00
|
|
|
fs_inst *inst = (fs_inst *)n->inst;
|
2011-01-18 17:16:49 -08:00
|
|
|
|
2016-03-12 21:15:19 -08:00
|
|
|
if (is_scheduling_barrier(inst))
|
2013-03-27 23:19:39 -07:00
|
|
|
add_barrier_deps(n);
|
|
|
|
|
|
2023-04-03 14:52:59 +03:00
|
|
|
if (inst->opcode == BRW_OPCODE_HALT ||
|
|
|
|
|
inst->opcode == SHADER_OPCODE_HALT_TARGET)
|
|
|
|
|
add_cross_lane_deps(n);
|
|
|
|
|
|
2011-01-18 17:16:49 -08:00
|
|
|
/* read-after-write deps. */
|
2014-03-17 10:39:43 -07:00
|
|
|
for (int i = 0; i < inst->sources; i++) {
|
2015-10-26 17:09:25 -07:00
|
|
|
if (inst->src[i].file == VGRF) {
|
2012-12-03 17:58:03 -08:00
|
|
|
if (post_reg_alloc) {
|
2016-09-07 16:59:35 -07:00
|
|
|
for (unsigned r = 0; r < regs_read(inst, i); r++)
|
2015-10-26 04:35:14 -07:00
|
|
|
add_dep(last_grf_write[inst->src[i].nr + r], n);
|
2012-12-03 17:58:03 -08:00
|
|
|
} else {
|
2016-09-07 16:59:35 -07:00
|
|
|
for (unsigned r = 0; r < regs_read(inst, i); r++) {
|
2016-09-01 12:42:20 -07:00
|
|
|
add_dep(last_grf_write[inst->src[i].nr * 16 +
|
|
|
|
|
inst->src[i].offset / REG_SIZE + r], n);
|
2013-10-08 22:54:46 -07:00
|
|
|
}
|
2012-12-03 17:58:03 -08:00
|
|
|
}
|
2015-10-26 17:52:57 -07:00
|
|
|
} else if (inst->src[i].file == FIXED_GRF) {
|
2014-11-12 10:17:36 -08:00
|
|
|
if (post_reg_alloc) {
|
2016-09-07 16:59:35 -07:00
|
|
|
for (unsigned r = 0; r < regs_read(inst, i); r++)
|
2015-10-24 15:29:03 -07:00
|
|
|
add_dep(last_grf_write[inst->src[i].nr + r], n);
|
2012-12-03 17:58:03 -08:00
|
|
|
} else {
|
|
|
|
|
add_dep(last_fixed_grf_write, n);
|
|
|
|
|
}
|
2014-05-07 09:58:43 +02:00
|
|
|
} else if (inst->src[i].is_accumulator()) {
|
2014-04-04 16:51:59 +03:00
|
|
|
add_dep(last_accumulator_write, n);
|
2021-04-22 09:21:59 -05:00
|
|
|
} else if (inst->src[i].file == ARF && !inst->src[i].is_null()) {
|
2014-11-12 10:17:36 -08:00
|
|
|
add_barrier_deps(n);
|
|
|
|
|
}
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|
|
|
|
|
|
2016-05-18 22:13:52 -07:00
|
|
|
if (const unsigned mask = inst->flags_read(v->devinfo)) {
|
|
|
|
|
assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
|
|
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
|
|
|
|
|
if (mask & (1 << i))
|
|
|
|
|
add_dep(last_conditional_mod[i], n);
|
|
|
|
|
}
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|
|
|
|
|
|
2014-04-04 16:51:59 +03:00
|
|
|
if (inst->reads_accumulator_implicitly()) {
|
2014-05-07 09:58:43 +02:00
|
|
|
add_dep(last_accumulator_write, n);
|
2014-04-04 16:51:59 +03:00
|
|
|
}
|
|
|
|
|
|
2011-01-18 17:16:49 -08:00
|
|
|
/* write-after-write deps. */
|
2015-10-26 17:09:25 -07:00
|
|
|
if (inst->dst.file == VGRF) {
|
2012-12-03 17:58:03 -08:00
|
|
|
if (post_reg_alloc) {
|
2016-09-07 16:59:35 -07:00
|
|
|
for (unsigned r = 0; r < regs_written(inst); r++) {
|
2015-10-26 04:35:14 -07:00
|
|
|
add_dep(last_grf_write[inst->dst.nr + r], n);
|
|
|
|
|
last_grf_write[inst->dst.nr + r] = n;
|
2012-12-03 17:58:03 -08:00
|
|
|
}
|
|
|
|
|
} else {
|
2016-09-07 16:59:35 -07:00
|
|
|
for (unsigned r = 0; r < regs_written(inst); r++) {
|
2016-09-01 12:42:20 -07:00
|
|
|
add_dep(last_grf_write[inst->dst.nr * 16 +
|
|
|
|
|
inst->dst.offset / REG_SIZE + r], n);
|
|
|
|
|
last_grf_write[inst->dst.nr * 16 +
|
|
|
|
|
inst->dst.offset / REG_SIZE + r] = n;
|
2013-10-08 22:54:46 -07:00
|
|
|
}
|
2012-12-03 17:58:03 -08:00
|
|
|
}
|
2015-10-26 17:52:57 -07:00
|
|
|
} else if (inst->dst.file == FIXED_GRF) {
|
2012-12-03 17:58:03 -08:00
|
|
|
if (post_reg_alloc) {
|
2021-03-17 21:30:52 +02:00
|
|
|
for (unsigned r = 0; r < regs_written(inst); r++) {
|
|
|
|
|
add_dep(last_grf_write[inst->dst.nr + r], n);
|
2015-10-24 15:29:03 -07:00
|
|
|
last_grf_write[inst->dst.nr + r] = n;
|
2021-03-17 21:30:52 +02:00
|
|
|
}
|
2012-12-03 17:58:03 -08:00
|
|
|
} else {
|
2021-03-17 21:30:52 +02:00
|
|
|
add_dep(last_fixed_grf_write, n);
|
2012-12-03 17:58:03 -08:00
|
|
|
last_fixed_grf_write = n;
|
|
|
|
|
}
|
2014-05-07 09:58:43 +02:00
|
|
|
} else if (inst->dst.is_accumulator()) {
|
2014-04-04 16:51:59 +03:00
|
|
|
add_dep(last_accumulator_write, n);
|
|
|
|
|
last_accumulator_write = n;
|
2016-02-14 23:21:03 -08:00
|
|
|
} else if (inst->dst.file == ARF && !inst->dst.is_null()) {
|
2014-11-12 10:17:36 -08:00
|
|
|
add_barrier_deps(n);
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|
|
|
|
|
|
intel/fs: sel.cond writes the flags on Gfx4 and Gfx5
On Gfx4 and Gfx5, sel.l (for min) and sel.ge (for max) are implemented
using a separte cmpn and sel instruction. This lowering occurs in
fs_vistor::lower_minmax which is called very, very late... a long, long
time after the first calls to opt_cmod_propagation. As a result,
conditional modifiers can be incorrectly propagated across sel.cond on
those platforms.
No tests were affected by this change, and I find that quite shocking.
After just changing flags_written(), all of the atan tests started
failing on ILK. That required the change in cmod_propagatin (and the
addition of the prop_across_into_sel_gfx5 unit test).
Shader-db results for ILK and GM45 are below. I looked at a couple
before and after shaders... and every case that I looked at had
experienced incorrect cmod propagation. This affected a LOT of apps!
Euro Truck Simulator 2, The Talos Principle, Serious Sam 3, Sanctum 2,
Gang Beasts, and on and on... :(
I discovered this bug while working on a couple new optimization
passes. One of the passes attempts to remove condition modifiers that
are never used. The pass made no progress except on ILK and GM45.
After investigating a couple of the affected shaders, I noticed that
the code in those shaders looked wrong... investigation led to this
cause.
v2: Trivial changes in the unit tests.
v3: Fix type in comment in unit tests. Noticed by Jason and Priit.
v4: Tweak handling of BRW_OPCODE_SEL special case. Suggested by Jason.
Fixes: df1aec763eb ("i965/fs: Define methods to calculate the flag subset read or written by an fs_inst.")
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Tested-by: Dave Airlie <airlied@redhat.com>
Iron Lake
total instructions in shared programs: 8180493 -> 8181781 (0.02%)
instructions in affected programs: 541796 -> 543084 (0.24%)
helped: 28
HURT: 1158
helped stats (abs) min: 1 max: 1 x̄: 1.00 x̃: 1
helped stats (rel) min: 0.35% max: 0.86% x̄: 0.53% x̃: 0.50%
HURT stats (abs) min: 1 max: 3 x̄: 1.14 x̃: 1
HURT stats (rel) min: 0.12% max: 4.00% x̄: 0.37% x̃: 0.23%
95% mean confidence interval for instructions value: 1.06 1.11
95% mean confidence interval for instructions %-change: 0.31% 0.38%
Instructions are HURT.
total cycles in shared programs: 239420470 -> 239421690 (<.01%)
cycles in affected programs: 2925992 -> 2927212 (0.04%)
helped: 49
HURT: 157
helped stats (abs) min: 2 max: 284 x̄: 62.69 x̃: 70
helped stats (rel) min: 0.04% max: 6.20% x̄: 1.68% x̃: 1.96%
HURT stats (abs) min: 2 max: 48 x̄: 27.34 x̃: 24
HURT stats (rel) min: 0.02% max: 2.91% x̄: 0.31% x̃: 0.20%
95% mean confidence interval for cycles value: -0.80 12.64
95% mean confidence interval for cycles %-change: -0.31% <.01%
Inconclusive result (value mean confidence interval includes 0).
GM45
total instructions in shared programs: 4985517 -> 4986207 (0.01%)
instructions in affected programs: 306935 -> 307625 (0.22%)
helped: 14
HURT: 625
helped stats (abs) min: 1 max: 1 x̄: 1.00 x̃: 1
helped stats (rel) min: 0.35% max: 0.82% x̄: 0.52% x̃: 0.49%
HURT stats (abs) min: 1 max: 3 x̄: 1.13 x̃: 1
HURT stats (rel) min: 0.12% max: 3.90% x̄: 0.34% x̃: 0.22%
95% mean confidence interval for instructions value: 1.04 1.12
95% mean confidence interval for instructions %-change: 0.29% 0.36%
Instructions are HURT.
total cycles in shared programs: 153827268 -> 153828052 (<.01%)
cycles in affected programs: 1669290 -> 1670074 (0.05%)
helped: 24
HURT: 84
helped stats (abs) min: 2 max: 232 x̄: 64.33 x̃: 67
helped stats (rel) min: 0.04% max: 4.62% x̄: 1.60% x̃: 1.94%
HURT stats (abs) min: 2 max: 48 x̄: 27.71 x̃: 24
HURT stats (rel) min: 0.02% max: 2.66% x̄: 0.34% x̃: 0.14%
95% mean confidence interval for cycles value: -1.94 16.46
95% mean confidence interval for cycles %-change: -0.29% 0.11%
Inconclusive result (value mean confidence interval includes 0).
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/12191>
2021-08-02 21:33:17 -07:00
|
|
|
if (const unsigned mask = inst->flags_written(v->devinfo)) {
|
2016-05-18 22:13:52 -07:00
|
|
|
assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
|
|
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
|
|
|
|
|
if (mask & (1 << i)) {
|
|
|
|
|
add_dep(last_conditional_mod[i], n, 0);
|
|
|
|
|
last_conditional_mod[i] = n;
|
|
|
|
|
}
|
|
|
|
|
}
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|
2014-04-04 16:51:59 +03:00
|
|
|
|
2015-04-17 12:15:58 -07:00
|
|
|
if (inst->writes_accumulator_implicitly(v->devinfo) &&
|
2014-05-07 09:58:43 +02:00
|
|
|
!inst->dst.is_accumulator()) {
|
|
|
|
|
add_dep(last_accumulator_write, n);
|
|
|
|
|
last_accumulator_write = n;
|
2014-04-04 16:51:59 +03:00
|
|
|
}
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|
|
|
|
|
|
2023-06-13 14:18:28 -07:00
|
|
|
clear_last_grf_write();
|
|
|
|
|
|
2011-01-18 17:16:49 -08:00
|
|
|
/* bottom-to-top dependencies: WAR */
|
2012-12-06 10:36:11 -08:00
|
|
|
memset(last_conditional_mod, 0, sizeof(last_conditional_mod));
|
2014-04-04 16:51:59 +03:00
|
|
|
last_accumulator_write = NULL;
|
2011-05-23 09:12:07 -07:00
|
|
|
last_fixed_grf_write = NULL;
|
2011-01-18 17:16:49 -08:00
|
|
|
|
2023-10-20 00:09:37 -07:00
|
|
|
for (schedule_node *n = current.end - 1; n >= current.start; n--) {
|
2013-04-29 14:05:33 -07:00
|
|
|
fs_inst *inst = (fs_inst *)n->inst;
|
2011-01-18 17:16:49 -08:00
|
|
|
|
|
|
|
|
/* write-after-read deps. */
|
2014-03-17 10:39:43 -07:00
|
|
|
for (int i = 0; i < inst->sources; i++) {
|
2015-10-26 17:09:25 -07:00
|
|
|
if (inst->src[i].file == VGRF) {
|
2012-12-03 17:58:03 -08:00
|
|
|
if (post_reg_alloc) {
|
2016-09-07 16:59:35 -07:00
|
|
|
for (unsigned r = 0; r < regs_read(inst, i); r++)
|
2015-10-26 04:35:14 -07:00
|
|
|
add_dep(n, last_grf_write[inst->src[i].nr + r], 0);
|
2012-12-03 17:58:03 -08:00
|
|
|
} else {
|
2016-09-07 16:59:35 -07:00
|
|
|
for (unsigned r = 0; r < regs_read(inst, i); r++) {
|
2016-09-01 12:42:20 -07:00
|
|
|
add_dep(n, last_grf_write[inst->src[i].nr * 16 +
|
|
|
|
|
inst->src[i].offset / REG_SIZE + r], 0);
|
2013-10-08 22:54:46 -07:00
|
|
|
}
|
2012-12-03 17:58:03 -08:00
|
|
|
}
|
2015-10-26 17:52:57 -07:00
|
|
|
} else if (inst->src[i].file == FIXED_GRF) {
|
2014-11-12 10:17:36 -08:00
|
|
|
if (post_reg_alloc) {
|
2016-09-07 16:59:35 -07:00
|
|
|
for (unsigned r = 0; r < regs_read(inst, i); r++)
|
2015-10-24 15:29:03 -07:00
|
|
|
add_dep(n, last_grf_write[inst->src[i].nr + r], 0);
|
2012-12-03 17:58:03 -08:00
|
|
|
} else {
|
2015-06-07 00:37:27 -04:00
|
|
|
add_dep(n, last_fixed_grf_write, 0);
|
2012-12-03 17:58:03 -08:00
|
|
|
}
|
2014-05-07 09:58:43 +02:00
|
|
|
} else if (inst->src[i].is_accumulator()) {
|
2015-06-07 00:37:27 -04:00
|
|
|
add_dep(n, last_accumulator_write, 0);
|
2021-04-22 09:21:59 -05:00
|
|
|
} else if (inst->src[i].file == ARF && !inst->src[i].is_null()) {
|
2014-11-12 10:17:36 -08:00
|
|
|
add_barrier_deps(n);
|
|
|
|
|
}
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|
|
|
|
|
|
2016-05-18 22:13:52 -07:00
|
|
|
if (const unsigned mask = inst->flags_read(v->devinfo)) {
|
|
|
|
|
assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
|
|
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
|
|
|
|
|
if (mask & (1 << i))
|
|
|
|
|
add_dep(n, last_conditional_mod[i]);
|
|
|
|
|
}
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|
|
|
|
|
|
2014-04-04 16:51:59 +03:00
|
|
|
if (inst->reads_accumulator_implicitly()) {
|
2014-05-07 09:58:43 +02:00
|
|
|
add_dep(n, last_accumulator_write);
|
2014-04-04 16:51:59 +03:00
|
|
|
}
|
|
|
|
|
|
2011-01-18 17:16:49 -08:00
|
|
|
/* Update the things this instruction wrote, so earlier reads
|
|
|
|
|
* can mark this as WAR dependency.
|
|
|
|
|
*/
|
2015-10-26 17:09:25 -07:00
|
|
|
if (inst->dst.file == VGRF) {
|
2012-12-03 17:58:03 -08:00
|
|
|
if (post_reg_alloc) {
|
2016-09-07 16:59:35 -07:00
|
|
|
for (unsigned r = 0; r < regs_written(inst); r++)
|
2015-10-26 04:35:14 -07:00
|
|
|
last_grf_write[inst->dst.nr + r] = n;
|
2012-12-03 17:58:03 -08:00
|
|
|
} else {
|
2016-09-07 16:59:35 -07:00
|
|
|
for (unsigned r = 0; r < regs_written(inst); r++) {
|
2016-09-01 12:42:20 -07:00
|
|
|
last_grf_write[inst->dst.nr * 16 +
|
|
|
|
|
inst->dst.offset / REG_SIZE + r] = n;
|
2013-10-08 22:54:46 -07:00
|
|
|
}
|
2012-12-03 17:58:03 -08:00
|
|
|
}
|
2015-10-26 17:52:57 -07:00
|
|
|
} else if (inst->dst.file == FIXED_GRF) {
|
2012-12-03 17:58:03 -08:00
|
|
|
if (post_reg_alloc) {
|
2016-09-07 16:59:35 -07:00
|
|
|
for (unsigned r = 0; r < regs_written(inst); r++)
|
2015-10-24 15:29:03 -07:00
|
|
|
last_grf_write[inst->dst.nr + r] = n;
|
2012-12-03 17:58:03 -08:00
|
|
|
} else {
|
|
|
|
|
last_fixed_grf_write = n;
|
|
|
|
|
}
|
2014-05-07 09:58:43 +02:00
|
|
|
} else if (inst->dst.is_accumulator()) {
|
2014-04-04 16:51:59 +03:00
|
|
|
last_accumulator_write = n;
|
2016-02-14 23:21:03 -08:00
|
|
|
} else if (inst->dst.file == ARF && !inst->dst.is_null()) {
|
2014-11-12 10:17:36 -08:00
|
|
|
add_barrier_deps(n);
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|
|
|
|
|
|
intel/fs: sel.cond writes the flags on Gfx4 and Gfx5
On Gfx4 and Gfx5, sel.l (for min) and sel.ge (for max) are implemented
using a separte cmpn and sel instruction. This lowering occurs in
fs_vistor::lower_minmax which is called very, very late... a long, long
time after the first calls to opt_cmod_propagation. As a result,
conditional modifiers can be incorrectly propagated across sel.cond on
those platforms.
No tests were affected by this change, and I find that quite shocking.
After just changing flags_written(), all of the atan tests started
failing on ILK. That required the change in cmod_propagatin (and the
addition of the prop_across_into_sel_gfx5 unit test).
Shader-db results for ILK and GM45 are below. I looked at a couple
before and after shaders... and every case that I looked at had
experienced incorrect cmod propagation. This affected a LOT of apps!
Euro Truck Simulator 2, The Talos Principle, Serious Sam 3, Sanctum 2,
Gang Beasts, and on and on... :(
I discovered this bug while working on a couple new optimization
passes. One of the passes attempts to remove condition modifiers that
are never used. The pass made no progress except on ILK and GM45.
After investigating a couple of the affected shaders, I noticed that
the code in those shaders looked wrong... investigation led to this
cause.
v2: Trivial changes in the unit tests.
v3: Fix type in comment in unit tests. Noticed by Jason and Priit.
v4: Tweak handling of BRW_OPCODE_SEL special case. Suggested by Jason.
Fixes: df1aec763eb ("i965/fs: Define methods to calculate the flag subset read or written by an fs_inst.")
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Tested-by: Dave Airlie <airlied@redhat.com>
Iron Lake
total instructions in shared programs: 8180493 -> 8181781 (0.02%)
instructions in affected programs: 541796 -> 543084 (0.24%)
helped: 28
HURT: 1158
helped stats (abs) min: 1 max: 1 x̄: 1.00 x̃: 1
helped stats (rel) min: 0.35% max: 0.86% x̄: 0.53% x̃: 0.50%
HURT stats (abs) min: 1 max: 3 x̄: 1.14 x̃: 1
HURT stats (rel) min: 0.12% max: 4.00% x̄: 0.37% x̃: 0.23%
95% mean confidence interval for instructions value: 1.06 1.11
95% mean confidence interval for instructions %-change: 0.31% 0.38%
Instructions are HURT.
total cycles in shared programs: 239420470 -> 239421690 (<.01%)
cycles in affected programs: 2925992 -> 2927212 (0.04%)
helped: 49
HURT: 157
helped stats (abs) min: 2 max: 284 x̄: 62.69 x̃: 70
helped stats (rel) min: 0.04% max: 6.20% x̄: 1.68% x̃: 1.96%
HURT stats (abs) min: 2 max: 48 x̄: 27.34 x̃: 24
HURT stats (rel) min: 0.02% max: 2.91% x̄: 0.31% x̃: 0.20%
95% mean confidence interval for cycles value: -0.80 12.64
95% mean confidence interval for cycles %-change: -0.31% <.01%
Inconclusive result (value mean confidence interval includes 0).
GM45
total instructions in shared programs: 4985517 -> 4986207 (0.01%)
instructions in affected programs: 306935 -> 307625 (0.22%)
helped: 14
HURT: 625
helped stats (abs) min: 1 max: 1 x̄: 1.00 x̃: 1
helped stats (rel) min: 0.35% max: 0.82% x̄: 0.52% x̃: 0.49%
HURT stats (abs) min: 1 max: 3 x̄: 1.13 x̃: 1
HURT stats (rel) min: 0.12% max: 3.90% x̄: 0.34% x̃: 0.22%
95% mean confidence interval for instructions value: 1.04 1.12
95% mean confidence interval for instructions %-change: 0.29% 0.36%
Instructions are HURT.
total cycles in shared programs: 153827268 -> 153828052 (<.01%)
cycles in affected programs: 1669290 -> 1670074 (0.05%)
helped: 24
HURT: 84
helped stats (abs) min: 2 max: 232 x̄: 64.33 x̃: 67
helped stats (rel) min: 0.04% max: 4.62% x̄: 1.60% x̃: 1.94%
HURT stats (abs) min: 2 max: 48 x̄: 27.71 x̃: 24
HURT stats (rel) min: 0.02% max: 2.66% x̄: 0.34% x̃: 0.14%
95% mean confidence interval for cycles value: -1.94 16.46
95% mean confidence interval for cycles %-change: -0.29% 0.11%
Inconclusive result (value mean confidence interval includes 0).
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/12191>
2021-08-02 21:33:17 -07:00
|
|
|
if (const unsigned mask = inst->flags_written(v->devinfo)) {
|
2016-05-18 22:13:52 -07:00
|
|
|
assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
|
|
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
|
|
|
|
|
if (mask & (1 << i))
|
|
|
|
|
last_conditional_mod[i] = n;
|
|
|
|
|
}
|
2012-06-18 14:50:04 -07:00
|
|
|
}
|
2014-04-04 16:51:59 +03:00
|
|
|
|
2015-04-17 12:15:58 -07:00
|
|
|
if (inst->writes_accumulator_implicitly(v->devinfo)) {
|
2014-05-07 09:58:43 +02:00
|
|
|
last_accumulator_write = n;
|
2014-04-04 16:51:59 +03:00
|
|
|
}
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|
2023-06-13 14:18:28 -07:00
|
|
|
|
|
|
|
|
clear_last_grf_write();
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|
|
|
|
|
|
2013-04-29 16:45:10 -07:00
|
|
|
schedule_node *
|
2013-04-29 14:05:33 -07:00
|
|
|
fs_instruction_scheduler::choose_instruction_to_schedule()
|
2013-04-29 16:45:10 -07:00
|
|
|
{
|
|
|
|
|
schedule_node *chosen = NULL;
|
|
|
|
|
|
2013-11-19 13:07:12 -08:00
|
|
|
if (mode == SCHEDULE_PRE || mode == SCHEDULE_POST) {
|
2013-04-29 16:45:10 -07:00
|
|
|
int chosen_time = 0;
|
|
|
|
|
|
2016-08-12 16:13:16 -07:00
|
|
|
/* Of the instructions ready to execute or the closest to being ready,
|
|
|
|
|
* choose the one most likely to unblock an early program exit, or
|
|
|
|
|
* otherwise the oldest one.
|
2013-04-29 16:45:10 -07:00
|
|
|
*/
|
2023-10-20 00:39:04 -07:00
|
|
|
foreach_in_list(schedule_node, n, ¤t.available) {
|
2016-08-12 16:13:16 -07:00
|
|
|
if (!chosen ||
|
2023-10-20 10:11:11 -07:00
|
|
|
exit_tmp_unblocked_time(n) < exit_tmp_unblocked_time(chosen) ||
|
|
|
|
|
(exit_tmp_unblocked_time(n) == exit_tmp_unblocked_time(chosen) &&
|
|
|
|
|
n->tmp.unblocked_time < chosen_time)) {
|
2013-04-29 16:45:10 -07:00
|
|
|
chosen = n;
|
2023-10-20 10:11:11 -07:00
|
|
|
chosen_time = n->tmp.unblocked_time;
|
2013-04-29 16:45:10 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
2021-01-25 18:43:06 +01:00
|
|
|
int chosen_register_pressure_benefit = 0;
|
|
|
|
|
|
2013-04-29 16:45:10 -07:00
|
|
|
/* Before register allocation, we don't care about the latencies of
|
|
|
|
|
* instructions. All we care about is reducing live intervals of
|
2013-11-12 15:33:27 -08:00
|
|
|
* variables so that we can avoid register spilling, or get SIMD16
|
2013-04-29 16:45:10 -07:00
|
|
|
* shaders which naturally do a better job of hiding instruction
|
|
|
|
|
* latency.
|
|
|
|
|
*/
|
2023-10-20 00:39:04 -07:00
|
|
|
foreach_in_list(schedule_node, n, ¤t.available) {
|
2013-10-28 15:17:07 -07:00
|
|
|
if (!chosen) {
|
|
|
|
|
chosen = n;
|
2021-01-25 18:43:06 +01:00
|
|
|
chosen_register_pressure_benefit =
|
|
|
|
|
get_register_pressure_benefit(chosen->inst);
|
2013-10-28 15:17:07 -07:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-14 11:38:09 -07:00
|
|
|
/* Most important: If we can definitely reduce register pressure, do
|
|
|
|
|
* so immediately.
|
|
|
|
|
*/
|
|
|
|
|
int register_pressure_benefit = get_register_pressure_benefit(n->inst);
|
|
|
|
|
|
|
|
|
|
if (register_pressure_benefit > 0 &&
|
|
|
|
|
register_pressure_benefit > chosen_register_pressure_benefit) {
|
|
|
|
|
chosen = n;
|
2021-01-25 18:43:06 +01:00
|
|
|
chosen_register_pressure_benefit = register_pressure_benefit;
|
2013-10-14 11:38:09 -07:00
|
|
|
continue;
|
|
|
|
|
} else if (chosen_register_pressure_benefit > 0 &&
|
|
|
|
|
(register_pressure_benefit <
|
|
|
|
|
chosen_register_pressure_benefit)) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2013-11-06 17:38:23 -08:00
|
|
|
if (mode == SCHEDULE_PRE_LIFO) {
|
|
|
|
|
/* Prefer instructions that recently became available for
|
|
|
|
|
* scheduling. These are the things that are most likely to
|
|
|
|
|
* (eventually) make a variable dead and reduce register pressure.
|
|
|
|
|
* Typical register pressure estimates don't work for us because
|
|
|
|
|
* most of our pressure comes from texturing, where no single
|
|
|
|
|
* instruction to schedule will make a vec4 value dead.
|
2013-10-28 15:17:07 -07:00
|
|
|
*/
|
2023-10-20 10:11:11 -07:00
|
|
|
if (n->tmp.cand_generation > chosen->tmp.cand_generation) {
|
2013-10-28 15:17:07 -07:00
|
|
|
chosen = n;
|
2021-01-25 18:43:06 +01:00
|
|
|
chosen_register_pressure_benefit = register_pressure_benefit;
|
2013-10-28 15:17:07 -07:00
|
|
|
continue;
|
2023-10-20 10:11:11 -07:00
|
|
|
} else if (n->tmp.cand_generation < chosen->tmp.cand_generation) {
|
2013-10-28 15:17:07 -07:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* For instructions pushed on the cands list at the same time, prefer
|
|
|
|
|
* the one with the highest delay to the end of the program. This is
|
|
|
|
|
* most likely to have its values able to be consumed first (such as
|
|
|
|
|
* for a large tree of lowered ubo loads, which appear reversed in
|
|
|
|
|
* the instruction stream with respect to when they can be consumed).
|
|
|
|
|
*/
|
|
|
|
|
if (n->delay > chosen->delay) {
|
|
|
|
|
chosen = n;
|
2021-01-25 18:43:06 +01:00
|
|
|
chosen_register_pressure_benefit = register_pressure_benefit;
|
2013-10-28 15:17:07 -07:00
|
|
|
continue;
|
|
|
|
|
} else if (n->delay < chosen->delay) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-12 16:13:16 -07:00
|
|
|
/* Prefer the node most likely to unblock an early program exit.
|
|
|
|
|
*/
|
2023-10-20 10:11:11 -07:00
|
|
|
if (exit_tmp_unblocked_time(n) < exit_tmp_unblocked_time(chosen)) {
|
2016-08-12 16:13:16 -07:00
|
|
|
chosen = n;
|
2021-01-25 18:43:06 +01:00
|
|
|
chosen_register_pressure_benefit = register_pressure_benefit;
|
2016-08-12 16:13:16 -07:00
|
|
|
continue;
|
2023-10-20 10:11:11 -07:00
|
|
|
} else if (exit_tmp_unblocked_time(n) > exit_tmp_unblocked_time(chosen)) {
|
2016-08-12 16:13:16 -07:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-28 15:17:07 -07:00
|
|
|
/* If all other metrics are equal, we prefer the first instruction in
|
|
|
|
|
* the list (program execution).
|
|
|
|
|
*/
|
2013-04-29 16:45:10 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return chosen;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int
|
2023-10-20 01:18:24 -07:00
|
|
|
fs_instruction_scheduler::calculate_issue_time(backend_instruction *inst0)
|
2013-04-29 16:45:10 -07:00
|
|
|
{
|
2022-06-29 14:13:31 -07:00
|
|
|
const struct brw_isa_info *isa = &v->compiler->isa;
|
2020-04-02 16:20:34 -07:00
|
|
|
const fs_inst *inst = static_cast<fs_inst *>(inst0);
|
2022-06-29 14:13:31 -07:00
|
|
|
const unsigned overhead = v->grf_used && has_bank_conflict(isa, inst) ?
|
2020-04-02 16:20:34 -07:00
|
|
|
DIV_ROUND_UP(inst->dst.component_size(inst->exec_size), REG_SIZE) : 0;
|
|
|
|
|
if (is_compressed(inst))
|
2017-12-06 11:42:54 -08:00
|
|
|
return 4 + overhead;
|
2013-04-29 16:45:10 -07:00
|
|
|
else
|
2017-12-06 11:42:54 -08:00
|
|
|
return 2 + overhead;
|
2013-04-29 16:45:10 -07:00
|
|
|
}
|
|
|
|
|
|
2011-01-18 17:16:49 -08:00
|
|
|
void
|
2023-10-20 00:58:25 -07:00
|
|
|
instruction_scheduler::schedule(schedule_node *chosen)
|
|
|
|
|
{
|
|
|
|
|
assert(current.scheduled < current.len);
|
|
|
|
|
current.scheduled++;
|
|
|
|
|
|
|
|
|
|
assert(chosen);
|
|
|
|
|
chosen->remove();
|
|
|
|
|
current.block->instructions.push_tail(chosen->inst);
|
|
|
|
|
|
|
|
|
|
/* If we expected a delay for scheduling, then bump the clock to reflect
|
|
|
|
|
* that. In reality, the hardware will switch to another hyperthread
|
|
|
|
|
* and may not return to dispatching our thread for a while even after
|
|
|
|
|
* we're unblocked. After this, we have the time when the chosen
|
|
|
|
|
* instruction will start executing.
|
|
|
|
|
*/
|
2023-10-20 10:11:11 -07:00
|
|
|
current.time = MAX2(current.time, chosen->tmp.unblocked_time);
|
2023-10-20 00:58:25 -07:00
|
|
|
|
|
|
|
|
/* Update the clock for how soon an instruction could start after the
|
|
|
|
|
* chosen one.
|
|
|
|
|
*/
|
2023-10-16 23:25:00 -07:00
|
|
|
current.time += chosen->issue_time;
|
2023-10-20 00:58:25 -07:00
|
|
|
|
|
|
|
|
if (debug) {
|
|
|
|
|
fprintf(stderr, "clock %4d, scheduled: ", current.time);
|
|
|
|
|
bs->dump_instruction(chosen->inst);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
instruction_scheduler::update_children(schedule_node *chosen)
|
2011-01-18 17:16:49 -08:00
|
|
|
{
|
2023-10-20 00:58:25 -07:00
|
|
|
/* Now that we've scheduled a new instruction, some of its
|
|
|
|
|
* children can be promoted to the list of instructions ready to
|
|
|
|
|
* be scheduled. Update the children's unblocked time for this
|
|
|
|
|
* DAG edge as we do so.
|
|
|
|
|
*/
|
2023-10-15 23:38:56 -07:00
|
|
|
for (int i = chosen->children_count - 1; i >= 0; i--) {
|
|
|
|
|
schedule_node_child *child = &chosen->children[i];
|
2023-10-20 00:58:25 -07:00
|
|
|
|
2023-10-20 10:11:11 -07:00
|
|
|
child->n->tmp.unblocked_time = MAX2(child->n->tmp.unblocked_time,
|
|
|
|
|
current.time + child->effective_latency);
|
2023-10-20 00:58:25 -07:00
|
|
|
|
|
|
|
|
if (debug) {
|
2023-10-20 10:11:11 -07:00
|
|
|
fprintf(stderr, "\tchild %d, %d parents: ", i, child->n->tmp.parent_count);
|
2023-10-15 23:38:56 -07:00
|
|
|
bs->dump_instruction(child->n->inst);
|
2023-10-20 00:58:25 -07:00
|
|
|
}
|
|
|
|
|
|
2023-10-20 10:11:11 -07:00
|
|
|
child->n->tmp.cand_generation = current.cand_generation;
|
|
|
|
|
child->n->tmp.parent_count--;
|
|
|
|
|
if (child->n->tmp.parent_count == 0) {
|
2023-10-20 00:58:25 -07:00
|
|
|
if (debug) {
|
|
|
|
|
fprintf(stderr, "\t\tnow available\n");
|
|
|
|
|
}
|
2023-10-15 23:38:56 -07:00
|
|
|
current.available.push_head(child->n);
|
2023-10-20 00:58:25 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
current.cand_generation++;
|
|
|
|
|
}
|
2016-04-28 15:19:28 -07:00
|
|
|
|
2023-10-20 00:58:25 -07:00
|
|
|
void
|
2023-10-20 01:18:24 -07:00
|
|
|
fs_instruction_scheduler::schedule_instructions()
|
2023-10-20 00:58:25 -07:00
|
|
|
{
|
2015-06-09 10:26:53 -07:00
|
|
|
if (!post_reg_alloc)
|
2023-10-20 00:09:37 -07:00
|
|
|
reg_pressure = reg_pressure_in[current.block->num];
|
|
|
|
|
|
2023-10-20 00:58:25 -07:00
|
|
|
assert(current.available.is_empty());
|
2023-10-20 00:09:37 -07:00
|
|
|
for (schedule_node *n = current.start; n < current.end; n++) {
|
2023-10-20 10:11:11 -07:00
|
|
|
reset_node_tmp(n);
|
|
|
|
|
|
|
|
|
|
/* Add DAG heads to the list of available instructions. */
|
|
|
|
|
if (n->tmp.parent_count == 0)
|
2023-10-20 00:39:04 -07:00
|
|
|
current.available.push_tail(n);
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|
|
|
|
|
|
2023-10-20 12:16:18 -07:00
|
|
|
current.block->instructions.make_empty();
|
|
|
|
|
|
2023-10-20 00:39:04 -07:00
|
|
|
while (!current.available.is_empty()) {
|
2013-04-29 16:45:10 -07:00
|
|
|
schedule_node *chosen = choose_instruction_to_schedule();
|
2023-10-20 00:58:25 -07:00
|
|
|
schedule(chosen);
|
2015-06-09 10:26:53 -07:00
|
|
|
|
|
|
|
|
if (!post_reg_alloc) {
|
|
|
|
|
reg_pressure -= get_register_pressure_benefit(chosen->inst);
|
|
|
|
|
update_register_pressure(chosen->inst);
|
2023-10-20 00:58:25 -07:00
|
|
|
if (debug)
|
2015-06-09 10:26:53 -07:00
|
|
|
fprintf(stderr, "(register pressure %d)\n", reg_pressure);
|
2012-12-04 13:52:19 -08:00
|
|
|
}
|
|
|
|
|
|
2023-10-20 00:58:25 -07:00
|
|
|
update_children(chosen);
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2023-10-20 10:32:54 -07:00
|
|
|
fs_instruction_scheduler::run(instruction_scheduler_mode mode)
|
2011-01-18 17:16:49 -08:00
|
|
|
{
|
2023-10-20 10:32:54 -07:00
|
|
|
this->mode = mode;
|
|
|
|
|
|
2015-06-09 10:26:53 -07:00
|
|
|
if (debug && !post_reg_alloc) {
|
2013-12-22 23:29:31 -08:00
|
|
|
fprintf(stderr, "\nInstructions before scheduling (reg_alloc %d)\n",
|
|
|
|
|
post_reg_alloc);
|
2015-06-09 10:26:53 -07:00
|
|
|
bs->dump_instructions();
|
2012-12-04 13:52:19 -08:00
|
|
|
}
|
|
|
|
|
|
2023-10-20 02:15:59 -07:00
|
|
|
if (!post_reg_alloc) {
|
|
|
|
|
memset(reads_remaining, 0, grf_count * sizeof(*reads_remaining));
|
|
|
|
|
memset(hw_reads_remaining, 0, hw_reg_count * sizeof(*hw_reads_remaining));
|
2023-06-13 12:50:02 -07:00
|
|
|
memset(written, 0, grf_count * sizeof(*written));
|
|
|
|
|
}
|
|
|
|
|
|
2023-10-20 01:18:24 -07:00
|
|
|
foreach_block(block, v->cfg) {
|
2023-10-20 00:09:37 -07:00
|
|
|
set_current_block(block);
|
2011-01-18 17:16:49 -08:00
|
|
|
|
2023-10-20 09:45:14 -07:00
|
|
|
if (!post_reg_alloc) {
|
|
|
|
|
for (schedule_node *n = current.start; n < current.end; n++)
|
2023-10-20 02:15:59 -07:00
|
|
|
count_reads_remaining(n->inst);
|
|
|
|
|
}
|
2023-10-16 23:25:00 -07:00
|
|
|
|
2023-10-20 00:09:37 -07:00
|
|
|
schedule_instructions();
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|
|
|
|
|
|
2015-06-09 10:26:53 -07:00
|
|
|
if (debug && !post_reg_alloc) {
|
2013-12-22 23:29:31 -08:00
|
|
|
fprintf(stderr, "\nInstructions after scheduling (reg_alloc %d)\n",
|
|
|
|
|
post_reg_alloc);
|
2015-05-20 09:44:01 -07:00
|
|
|
bs->dump_instructions();
|
2013-04-29 14:05:33 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-10-20 10:32:54 -07:00
|
|
|
fs_instruction_scheduler *
|
|
|
|
|
fs_visitor::prepare_scheduler(void *mem_ctx)
|
|
|
|
|
{
|
|
|
|
|
const int grf_count = alloc.count;
|
|
|
|
|
|
|
|
|
|
fs_instruction_scheduler *empty = rzalloc(mem_ctx, fs_instruction_scheduler);
|
|
|
|
|
return new (empty) fs_instruction_scheduler(mem_ctx, this, grf_count, first_non_payload_grf,
|
|
|
|
|
cfg->num_blocks, /* post_reg_alloc */ false);
|
|
|
|
|
}
|
|
|
|
|
|
2013-04-29 14:05:33 -07:00
|
|
|
void
|
2023-10-20 10:32:54 -07:00
|
|
|
fs_visitor::schedule_instructions_pre_ra(fs_instruction_scheduler *sched,
|
|
|
|
|
instruction_scheduler_mode mode)
|
2013-04-29 14:05:33 -07:00
|
|
|
{
|
2023-08-14 19:19:45 -07:00
|
|
|
if (mode == SCHEDULE_NONE)
|
|
|
|
|
return;
|
|
|
|
|
|
2023-10-20 10:32:54 -07:00
|
|
|
sched->run(mode);
|
|
|
|
|
|
|
|
|
|
invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
fs_visitor::schedule_instructions_post_ra()
|
|
|
|
|
{
|
|
|
|
|
const bool post_reg_alloc = true;
|
|
|
|
|
const int grf_count = reg_unit(devinfo) * grf_used;
|
2013-04-29 14:05:33 -07:00
|
|
|
|
2023-10-20 02:31:20 -07:00
|
|
|
void *mem_ctx = ralloc_context(NULL);
|
|
|
|
|
|
|
|
|
|
fs_instruction_scheduler sched(mem_ctx, this, grf_count, first_non_payload_grf,
|
2023-10-20 10:32:54 -07:00
|
|
|
cfg->num_blocks, post_reg_alloc);
|
|
|
|
|
sched.run(SCHEDULE_POST);
|
2013-04-29 14:05:33 -07:00
|
|
|
|
2023-10-20 02:31:20 -07:00
|
|
|
ralloc_free(mem_ctx);
|
|
|
|
|
|
2016-03-13 19:26:37 -07:00
|
|
|
invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
|
2011-01-18 17:16:49 -08:00
|
|
|
}
|