1 //*****************************************************************************
2 // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved.
4 // This file contains confidential and proprietary information
5 // of Xilinx, Inc. and is protected under U.S. and
6 // international copyright and other intellectual property
10 // This disclaimer is not a license and does not grant any
11 // rights to the materials distributed herewith. Except as
12 // otherwise provided in a valid license issued to you by
13 // Xilinx, and to the maximum extent permitted by applicable
14 // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND
15 // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES
16 // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING
17 // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON-
18 // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and
19 // (2) Xilinx shall not be liable (whether in contract or tort,
20 // including negligence, or under any other theory of
21 // liability) for any loss or damage of any kind or nature
22 // related to, arising under or in connection with these
23 // materials, including for any direct, or any indirect,
24 // special, incidental, or consequential loss or damage
25 // (including loss of data, profits, goodwill, or any type of
26 // loss or damage suffered as a result of any action brought
27 // by a third party) even if such damage or loss was
28 // reasonably foreseeable or Xilinx had been advised of the
29 // possibility of the same.
31 // CRITICAL APPLICATIONS
32 // Xilinx products are not designed or intended to be fail-
33 // safe, or for use in any application requiring fail-safe
34 // performance, such as life-support or safety devices or
35 // systems, Class III medical devices, nuclear facilities,
36 // applications related to the deployment of airbags, or any
37 // other applications that could lead to death, personal
38 // injury, or severe property or environmental damage
39 // (individually and collectively, "Critical
40 // Applications"). Customer assumes the sole risk and
41 // liability of any use of Xilinx products in Critical
42 // Applications, subject only to applicable laws and
43 // regulations governing limitations on product liability.
45 // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS
46 // PART OF THIS FILE AT ALL TIMES.
48 //*****************************************************************************
51 // /___/ \ / Vendor : Xilinx
52 // \ \ \/ Version : %version
53 // \ \ Application : MIG
54 // / / Filename : bank_state.v
55 // /___/ /\ Date Last Modified : $date$
56 // \ \ / \ Date Created : Tue Jun 30 2009
60 //Design Name : DDR3 SDRAM
64 //*****************************************************************************
67 // Primary bank state machine. All bank specific timing is generated here.
69 // Conceptually, when a bank machine is assigned a request, conflicts are
70 // checked. If there is a conflict, then the new request is added
71 // to the queue for that rank-bank.
73 // Eventually, that request will find itself at the head of the queue for
74 // its rank-bank. Forthwith, the bank machine will begin arbitration to send an
75 // activate command to the DRAM. Once arbitration is successful and the
76 // activate is sent, the row state machine waits the RCD delay. The RAS
77 // counter is also started when the activate is sent.
79 // Upon completion of the RCD delay, the bank state machine will begin
80 // arbitration for sending out the column command. Once the column
81 // command has been sent, the bank state machine waits the RTP latency, and
82 // if the command is a write, the RAS counter is loaded with the WR latency.
84 // When the RTP counter reaches zero, the pre charge wait state is entered.
85 // Once the RAS timer reaches zero, arbitration to send a precharge command
88 // Upon successful transmission of the precharge command, the bank state
89 // machine waits the precharge period and then rejoins the idle list.
91 // For an open rank-bank hit, a bank machine passes management of the rank-bank to
92 // a bank machine that is managing the subsequent request to the same page. A bank
93 // machine can either be a "passer" or a "passee" in this handoff. There
94 // are two conditions that have to occur before an open bank can be passed.
95 // A spatial condition, ie same rank-bank and row address. And a temporal condition,
96 // ie the passee has completed it work with the bank, but has not issued a precharge.
98 // The spatial condition is signalled by pass_open_bank_ns. The temporal condition
99 // is when the column command is issued, or when the bank_wait_in_progress
100 // signal is true. Bank_wait_in_progress is true when the RTP timer is not
101 // zero, or when the RAS/WR timer is not zero and the state machine is waiting
102 // to send out a precharge command.
104 // On an open bank pass, the passer transitions from the temporal condition
105 // noted above and performs the end of request processing and eventually lands
106 // in the act_wait_r state.
108 // On an open bank pass, the passee lands in the col_wait_r state and waits
109 // for its chance to send out a column command.
111 // Since there is a single data bus shared by all columns in all ranks, there
112 // is a single column machine. The column machine is primarily in charge of
113 // managing the timing on the DQ data bus. It reserves states for data transfer,
114 // driver turnaround states, and preambles. It also has the ability to add
115 // additional programmable delay for read to write changeovers. This read to write
116 // delay is generated in the column machine which inhibits writes via the
119 // There is a rank machine for every rank. The rank machines are responsible
120 // for enforcing rank specific timing such as FAW, and WTR. RRD is guaranteed
121 // in the bank machine since it is closely coupled to the operation of the
122 // bank machine and is timing critical.
124 // Since a bank machine can be working on a request for any rank, all rank machines
125 // inhibits are input to all bank machines. Based on the rank of the current
126 // request, each bank machine selects the rank information corresponding
127 // to the rank of its current request.
129 // Since driver turnaround states and WTR delays are so severe with DDRIII, the
130 // memory interface has the ability to promote requests that use the same
131 // driver as the most recent request. There is logic in this block that
132 // detects when the driver for its request is the same as the driver for
133 // the most recent request. In such a case, this block will send out special
134 // "same" request early enough to eliminate dead states when there is no
135 // driver changeover.
144 parameter ADDR_CMD_MODE =
"1T",
145 parameter BM_CNT_WIDTH =
2,
146 parameter BURST_MODE =
"8",
148 parameter DATA_BUF_ADDR_WIDTH =
8,
149 parameter DRAM_TYPE =
"DDR3",
150 parameter ECC =
"OFF",
152 parameter nBANK_MACHS =
4,
153 parameter nCK_PER_CLK =
2,
154 parameter nOP_WAIT =
0,
155 parameter nRAS_CLKS =
10,
159 parameter nWTP_CLKS =
5,
160 parameter ORDERING =
"NORM",
162 parameter RANK_WIDTH =
4,
163 parameter RAS_TIMER_WIDTH =
5,
164 parameter STARVE_LIMIT =
2
168 start_rcd,
act_wait_r,
rd_half_rmw,
ras_timer_ns,
end_rtp,
169 bank_wait_in_progress,
start_pre_wait,
op_exit_req,
pre_wait_r,
170 allow_auto_pre,
precharge_bm_end,
demand_act_priority,
rts_row,
171 act_this_rank_r,
demand_priority,
col_rdy_wr,
rts_col,
wr_this_rank_r,
172 rd_this_rank_r,
rts_pre,
rtc,
174 clk,
rst,
bm_end,
pass_open_bank_r,
sending_row,
sending_pre,
rcv_open_bank,
175 sending_col,
rd_wr_r,
req_wr_r,
rd_data_addr,
req_data_buf_addr_r,
176 phy_rddata_valid,
rd_rmw,
ras_timer_ns_in,
rb_hit_busies_r,
idle_r,
177 passing_open_bank,
low_idle_cnt_r,
op_exit_grant,
tail_r,
178 auto_pre_r,
pass_open_bank_ns,
req_rank_r,
req_rank_r_in,
179 start_rcd_in,
inhbt_act_faw_r,
wait_for_maint_r,
head_r,
sent_row,
180 demand_act_priority_in,
order_q_zero,
sent_col,
q_has_rd,
181 q_has_priority,
req_priority_r,
idle_ns,
demand_priority_in,
inhbt_rd,
182 inhbt_wr,
dq_busy_data,
rnk_config_strobe,
rnk_config_valid_r,
rnk_config,
183 rnk_config_kill_rts_col,
phy_mc_cmd_full,
phy_mc_ctl_full,
phy_mc_data_full
186 function integer clogb2 (
input integer size);
// ceiling logb2
189 for (
clogb2=
1;
size>
1;
clogb2=
clogb2+
1)
192 endfunction // clogb2
197 // Activate wait state machine.
200 always @(
posedge clk)
bm_end_r1 <= #TCQ
bm_end;
204 input pass_open_bank_r;
208 wire start_rcd_lcl =
act_wait_r_lcl &&
sending_row;
209 output wire start_rcd;
210 assign start_rcd =
start_rcd_lcl;
211 wire act_wait_ns =
rst ||
212 ((
act_wait_r_lcl && ~
start_rcd_lcl && ~
rcv_open_bank) ||
213 bm_end_r1 || (
pass_open_bank_r &&
bm_end));
214 always @(
posedge clk)
act_wait_r_lcl <= #TCQ
act_wait_ns;
215 output wire act_wait_r;
216 assign act_wait_r =
act_wait_r_lcl;
220 // When CWL is even, CAS commands are issued on slot 0 and RAS commands are
221 // issued on slot 1. This implies that the RCD can never expire in the same
222 // cycle as the RAS (otherwise the CAS for a given transaction would precede
223 // the RAS). Similarly, this can also cause premature expiration for longer
224 // RCD. An offset must be added to RCD before translating it to the FPGA clock
225 // domain. In this mode, CAS are on the first DRAM clock cycle corresponding to
226 // a given FPGA cycle. In 2:1 mode add 2 to generate this offset aligned to
227 // the FPGA cycle. Likewise, add 4 to generate an aligned offset in 4:1 mode.
229 // When CWL is odd, RAS commands are issued on slot 0 and CAS commands are
230 // issued on slot 1. There is a natural 1 cycle seperation between RAS and CAS
231 // in the DRAM clock domain so the RCD can expire in the same FPGA cycle as the
232 // RAS command. In 2:1 mode, there are only 2 slots so direct translation
233 // correctly places the CAS with respect to the corresponding RAS. In 4:1 mode,
234 // there are two slots after CAS, so 2 is added to shift the timer into the
235 // next FPGA cycle for cases that can't expire in the current cycle.
237 // In 2T mode, the offset from ROW to COL commands is fixed at 2. In 2:1 mode,
238 // It is sufficient to translate to the half-rate domain and add the remainder.
239 // In 4:1 mode, we must translate to the quarter-rate domain and add an
240 // additional fabric cycle only if the remainder exceeds the fixed offset of 2
242 localparam nRCD_CLKS =
246 ADDR_CMD_MODE ==
"2T" ?
247 (
nRCD/
2) + (
nRCD%
2) :
251 // (nCK_PER_CLK == 4)
252 ADDR_CMD_MODE ==
"2T" ?
253 (
nRCD/
4) + (
nRCD%
4 >
2 ?
1 :
0) :
255 (
nRCD-
2 ? (
nRCD-
2) /
4 +
1 :
1) :
258 localparam nRCD_CLKS_M2 = (
nRCD_CLKS-
2 <
0) ?
0 :
nRCD_CLKS-
2;
259 localparam RCD_TIMER_WIDTH =
clogb2(
nRCD_CLKS_M2+
1);
262 reg [
RCD_TIMER_WIDTH-
1:
0]
rcd_timer_r = {
RCD_TIMER_WIDTH{
1'b0}};
264 reg rcd_active_r =
1'b0;
267 if (
nRCD_CLKS <=
2)
begin :
rcd_timer_leq_2
268 always @(
/*AS**/start_rcd_lcl)
end_rcd =
start_rcd_lcl;
270 else if (
nRCD_CLKS >
2)
begin :
rcd_timer_gt_2
271 reg [
RCD_TIMER_WIDTH-
1:
0]
rcd_timer_ns;
272 always @(
/*AS**/rcd_timer_r or rst or start_rcd_lcl)
begin
273 if (
rst)
rcd_timer_ns =
ZERO[
RCD_TIMER_WIDTH-
1:
0];
275 rcd_timer_ns =
rcd_timer_r;
276 if (
start_rcd_lcl)
rcd_timer_ns =
nRCD_CLKS_M2[
RCD_TIMER_WIDTH-
1:
0];
277 else if (|
rcd_timer_r)
rcd_timer_ns =
278 rcd_timer_r -
ONE[
RCD_TIMER_WIDTH-
1:
0];
281 always @(
posedge clk)
rcd_timer_r <= #TCQ
rcd_timer_ns;
282 wire end_rcd_ns = (
rcd_timer_ns ==
ONE[
RCD_TIMER_WIDTH-
1:
0]);
283 always @(
posedge clk)
end_rcd =
end_rcd_ns;
284 wire rcd_active_ns = |
rcd_timer_ns;
285 always @(
posedge clk)
rcd_active_r <= #TCQ
rcd_active_ns;
289 // Figure out if the read that's completing is for an RMW for
290 // this bank machine. Delay by a state if CWL != 8 since the
291 // data is not ready in the RMW buffer for the early write
292 // data fetch that happens with ECC and CWL != 8.
293 // Create a state bit indicating we're waiting for the read
294 // half of the rmw to complete.
298 input [
DATA_BUF_ADDR_WIDTH-
1:
0]
rd_data_addr;
299 input [
DATA_BUF_ADDR_WIDTH-
1:
0]
req_data_buf_addr_r;
300 input phy_rddata_valid;
302 reg rmw_rd_done =
1'b0;
303 reg rd_half_rmw_lcl =
1'b0;
304 output wire rd_half_rmw;
305 assign rd_half_rmw =
rd_half_rmw_lcl;
306 reg rmw_wait_r =
1'b0;
308 if (
ECC !=
"OFF")
begin :
rmw_on
309 // Delay phy_rddata_valid and rd_rmw by one cycle to align them
310 // to req_data_buf_addr_r so that rmw_wait_r clears properly
311 reg phy_rddata_valid_r;
313 always @(
posedge clk)
begin
314 phy_rddata_valid_r <= #TCQ
phy_rddata_valid;
315 rd_rmw_r <= #TCQ
rd_rmw;
317 wire my_rmw_rd_ns =
phy_rddata_valid_r &&
rd_rmw_r &&
318 (
rd_data_addr ==
req_data_buf_addr_r);
319 if (
CWL ==
8)
always @(
my_rmw_rd_ns)
rmw_rd_done =
my_rmw_rd_ns;
320 else always @(
posedge clk)
rmw_rd_done = #TCQ
my_rmw_rd_ns;
321 always @(
/*AS**/rd_wr_r or req_wr_r)
rd_half_rmw_lcl =
req_wr_r &&
rd_wr_r;
322 wire rmw_wait_ns = ~
rst &&
323 ((
rmw_wait_r && ~
rmw_rd_done) || (
rd_half_rmw_lcl &&
sending_col));
324 always @(
posedge clk)
rmw_wait_r <= #TCQ
rmw_wait_ns;
328 // column wait state machine.
329 wire col_wait_ns = ~
rst && ((
col_wait_r && ~
sending_col) ||
end_rcd
330 ||
rcv_open_bank || (
rmw_rd_done &&
rmw_wait_r));
331 always @(
posedge clk)
col_wait_r <= #TCQ
col_wait_ns;
333 // Set up various RAS timer parameters, wires, etc.
336 output reg [
RAS_TIMER_WIDTH-
1:
0]
ras_timer_ns;
337 reg [
RAS_TIMER_WIDTH-
1:
0]
ras_timer_r;
338 input [(
2*(
RAS_TIMER_WIDTH*
nBANK_MACHS))-
1:
0]
ras_timer_ns_in;
339 input [(
nBANK_MACHS*
2)-
1:
0]
rb_hit_busies_r;
341 // On a bank pass, select the RAS timer from the passing bank machine.
342 reg [
RAS_TIMER_WIDTH-
1:
0]
passed_ras_timer;
344 always @(
/*AS**/ras_timer_ns_in or rb_hit_busies_r)
begin
345 passed_ras_timer = {
RAS_TIMER_WIDTH{
1'b0}};
346 for (
i=
ID+
1;
i<(
ID+
nBANK_MACHS);
i=
i+
1)
347 if (
rb_hit_busies_r[
i])
348 passed_ras_timer =
ras_timer_ns_in[
i*
RAS_TIMER_WIDTH+:
RAS_TIMER_WIDTH];
351 // RAS and (reused for) WTP timer. When an open bank is passed, this
352 // timer is passed to the new owner. The existing RAS prevents
353 // an activate from occuring too early.
356 wire start_wtp_timer =
sending_col && ~
rd_wr_r;
359 always @(
/*AS**/bm_end_r1 or ras_timer_r or rst or start_rcd_lcl
360 or start_wtp_timer)
begin
361 if (
bm_end_r1 ||
rst)
ras_timer_ns =
ZERO[
RAS_TIMER_WIDTH-
1:
0];
363 ras_timer_ns =
ras_timer_r;
364 if (
start_rcd_lcl)
ras_timer_ns =
365 nRAS_CLKS[
RAS_TIMER_WIDTH-
1:
0] -
TWO[
RAS_TIMER_WIDTH-
1:
0];
366 if (
start_wtp_timer)
ras_timer_ns =
367 // As the timer is being reused, it is essential to compare
368 // before new value is loaded.
369 (
ras_timer_r <= (
nWTP_CLKS-
2)) ?
nWTP_CLKS[
RAS_TIMER_WIDTH-
1:
0] -
TWO[
RAS_TIMER_WIDTH-
1:
0]
370 :
ras_timer_r -
ONE[
RAS_TIMER_WIDTH-
1:
0];
371 if (|
ras_timer_r && ~
start_wtp_timer)
ras_timer_ns =
372 ras_timer_r -
ONE[
RAS_TIMER_WIDTH-
1:
0];
376 wire [
RAS_TIMER_WIDTH-
1:
0]
ras_timer_passed_ns =
rcv_open_bank
379 always @(
posedge clk)
ras_timer_r <= #TCQ
ras_timer_passed_ns;
381 wire ras_timer_zero_ns = (
ras_timer_ns ==
ZERO[
RAS_TIMER_WIDTH-
1:
0]);
382 reg ras_timer_zero_r;
383 always @(
posedge clk)
ras_timer_zero_r <= #TCQ
ras_timer_zero_ns;
385 // RTP timer. Unless 2T mode, add one for 2:1 mode. This accounts for loss of
386 // one DRAM CK due to column command to row command fixed offset. In 2T mode,
387 // Add the remainder. In 4:1 mode, the fixed offset is -2. Add 2 unless in 2T
388 // mode, in which case we add 1 if the remainder exceeds the fixed offset.
389 localparam nRTP_CLKS = (
nCK_PER_CLK ==
1)
392 ? (
nRTP/
2) + ((
ADDR_CMD_MODE ==
"2T") ?
nRTP%
2 :
1) :
393 (
nRTP/
4) + ((
ADDR_CMD_MODE ==
"2T") ? (
nRTP%
4 >
2 ?
2 :
1) :
2);
394 localparam nRTP_CLKS_M1 = ((
nRTP_CLKS-
1) <=
0) ?
0 :
nRTP_CLKS-
1;
395 localparam RTP_TIMER_WIDTH =
clogb2(
nRTP_CLKS_M1 +
1);
396 reg [
RTP_TIMER_WIDTH-
1:
0]
rtp_timer_ns;
397 reg [
RTP_TIMER_WIDTH-
1:
0]
rtp_timer_r;
398 wire sending_col_not_rmw_rd =
sending_col && ~
rd_half_rmw_lcl;
399 always @(
/*AS**/pass_open_bank_r or rst or rtp_timer_r
400 or sending_col_not_rmw_rd)
begin
401 rtp_timer_ns =
rtp_timer_r;
402 if (
rst ||
pass_open_bank_r)
403 rtp_timer_ns =
ZERO[
RTP_TIMER_WIDTH-
1:
0];
405 if (
sending_col_not_rmw_rd)
406 rtp_timer_ns =
nRTP_CLKS_M1[
RTP_TIMER_WIDTH-
1:
0];
407 if (|
rtp_timer_r)
rtp_timer_ns =
rtp_timer_r -
ONE[
RTP_TIMER_WIDTH-
1:
0];
410 always @(
posedge clk)
rtp_timer_r <= #TCQ
rtp_timer_ns;
412 wire end_rtp_lcl = ~
pass_open_bank_r &&
413 ((
rtp_timer_r ==
ONE[
RTP_TIMER_WIDTH-
1:
0]) ||
414 ((
nRTP_CLKS_M1 ==
0) &&
sending_col_not_rmw_rd));
416 assign end_rtp =
end_rtp_lcl;
418 // Optionally implement open page mode timer.
419 localparam OP_WIDTH =
clogb2(
nOP_WAIT +
1);
420 output wire bank_wait_in_progress;
421 output wire start_pre_wait;
422 input passing_open_bank;
423 input low_idle_cnt_r;
424 output wire op_exit_req;
427 output reg pre_wait_r;
430 if (
nOP_WAIT ==
0)
begin :
op_mode_disabled
431 assign bank_wait_in_progress =
sending_col_not_rmw_rd || |
rtp_timer_r ||
432 (
pre_wait_r && ~
ras_timer_zero_r);
433 assign start_pre_wait =
end_rtp_lcl;
434 assign op_exit_req =
1'b0;
436 else begin :
op_mode_enabled
438 assign bank_wait_in_progress =
sending_col || |
rtp_timer_r ||
439 (
pre_wait_r && ~
ras_timer_zero_r) ||
441 wire op_active = ~
rst && ~
passing_open_bank && ((
end_rtp_lcl &&
tail_r)
443 wire op_wait_ns = ~
op_exit_grant &&
op_active;
444 always @(
posedge clk)
op_wait_r <= #TCQ
op_wait_ns;
445 assign start_pre_wait =
op_exit_grant ||
446 (
end_rtp_lcl && ~
tail_r && ~
passing_open_bank);
448 assign op_exit_req = (
low_idle_cnt_r &&
op_active);
450 reg [
OP_WIDTH-
1:
0]
op_cnt_r;
451 wire [
OP_WIDTH-
1:
0]
op_cnt_ns =
452 (
passing_open_bank ||
op_exit_grant ||
rst)
455 ?
nOP_WAIT[
OP_WIDTH-
1:
0]
457 ?
op_cnt_r -
ONE[
OP_WIDTH-
1:
0]
459 always @(
posedge clk)
op_cnt_r <= #TCQ
op_cnt_ns;
460 assign op_exit_req = (
low_idle_cnt_r &&
op_active) ||
461 (
op_wait_r && ~|
op_cnt_r);
466 output allow_auto_pre;
467 wire allow_auto_pre =
act_wait_r_lcl ||
rcd_active_r ||
468 (
col_wait_r && ~
sending_col);
470 // precharge wait state machine.
473 input pass_open_bank_ns;
474 wire pre_wait_ns = ~
rst && (~
pass_open_bank_ns &&
475 (
start_pre_wait || (
pre_wait_r && ~
start_pre)));
476 always @(
posedge clk)
pre_wait_r <= #TCQ
pre_wait_ns;
477 wire pre_request =
pre_wait_r &&
ras_timer_zero_r && ~
auto_pre_r;
480 localparam nRP_CLKS = (
nCK_PER_CLK ==
1) ?
nRP :
481 (
nCK_PER_CLK ==
2) ? ((
nRP/
2) + (
nRP%
2)) :
482 /*(nCK_PER_CLK == 4)**/ ((
nRP/
4) + ((
nRP%
4) ?
1 :
0));
484 // Subtract two because there are a minimum of two fabric states from
485 // end of RP timer until earliest possible arb to send act.
486 localparam nRP_CLKS_M2 = (
nRP_CLKS-
2 <
0) ?
0 :
nRP_CLKS-
2;
487 localparam RP_TIMER_WIDTH =
clogb2(
nRP_CLKS_M2 +
1);
494 if((
nCK_PER_CLK ==
4) && (
ADDR_CMD_MODE !=
"2T"))
begin
496 assign start_pre =
pre_wait_r &&
ras_timer_zero_r &&
497 (
sending_pre ||
auto_pre_r);
499 assign rts_pre = ~
sending_pre &&
pre_request;
505 assign start_pre =
pre_wait_r &&
ras_timer_zero_r &&
506 (
sending_row ||
auto_pre_r);
508 assign rts_pre =
1'b0;
514 reg [
RP_TIMER_WIDTH-
1:
0]
rp_timer_r =
ZERO[
RP_TIMER_WIDTH-
1:
0];
517 if (
nRP_CLKS_M2 >
ZERO)
begin :
rp_timer
518 reg [
RP_TIMER_WIDTH-
1:
0]
rp_timer_ns;
519 always @(
/*AS**/rp_timer_r or rst or start_pre)
520 if (
rst)
rp_timer_ns =
ZERO[
RP_TIMER_WIDTH-
1:
0];
522 rp_timer_ns =
rp_timer_r;
523 if (
start_pre)
rp_timer_ns =
nRP_CLKS_M2[
RP_TIMER_WIDTH-
1:
0];
524 else if (|
rp_timer_r)
rp_timer_ns =
525 rp_timer_r -
ONE[
RP_TIMER_WIDTH-
1:
0];
527 always @(
posedge clk)
rp_timer_r <= #TCQ
rp_timer_ns;
528 end // block: rp_timer
531 output wire precharge_bm_end;
532 assign precharge_bm_end = (
rp_timer_r ==
ONE[
RP_TIMER_WIDTH-
1:
0]) ||
533 (
start_pre && (
nRP_CLKS_M2 ==
ZERO));
535 // Compute RRD related activate inhibit.
536 // Compare this bank machine's rank with others, then
537 // select result based on grant. An alternative is to
538 // select the just issued rank with the grant and simply
539 // compare against this bank machine's rank. However, this
540 // serializes the selection of the rank and the compare processes.
541 // As implemented below, the compare occurs first, then the
542 // selection based on grant. This is faster.
544 input [
RANK_WIDTH-
1:
0]
req_rank_r;
545 input [(
RANK_WIDTH*
nBANK_MACHS*
2)-
1:
0]
req_rank_r_in;
548 input [(
nBANK_MACHS*
2)-
1:
0]
start_rcd_in;
553 always @(
/*AS**/req_rank_r or req_rank_r_in or start_rcd_in)
begin
554 inhbt_act_rrd =
1'b0;
555 for (
j=(
ID+
1);
j<(
ID+
nBANK_MACHS);
j=
j+
1)
556 inhbt_act_rrd =
inhbt_act_rrd ||
start_rcd_in[
j];
559 always @(
/*AS**/req_rank_r or req_rank_r_in or start_rcd_in)
begin
560 inhbt_act_rrd =
1'b0;
561 for (
j=(
ID+
1);
j<(
ID+
nBANK_MACHS);
j=
j+
1)
562 inhbt_act_rrd =
inhbt_act_rrd ||
564 (
req_rank_r_in[(
j*
RANK_WIDTH)+:
RANK_WIDTH] ==
req_rank_r));
570 // Extract the activate command inhibit for the rank associated
571 // with this request. FAW and RRD are computed separately so that
572 // gate level timing can be carefully managed.
573 input [
RANKS-
1:
0]
inhbt_act_faw_r;
574 wire my_inhbt_act_faw =
inhbt_act_faw_r[
req_rank_r];
576 input wait_for_maint_r;
578 wire act_req = ~
idle_r &&
head_r &&
act_wait_r &&
ras_timer_zero_r &&
581 // Implement simple starvation avoidance for act requests. Precharge
582 // requests don't need this because they are never gated off by
583 // timing events such as inhbt_act_rrd. Priority request timeout
584 // is fixed at a single trip around the round robin arbiter.
587 wire rts_act_denied =
act_req &&
sent_row && ~
sending_row;
589 reg [
BM_CNT_WIDTH-
1:
0]
act_starve_limit_cntr_ns;
590 reg [
BM_CNT_WIDTH-
1:
0]
act_starve_limit_cntr_r;
593 if (
BM_CNT_WIDTH >
1)
// Number of Bank Machs > 2
594 begin :
BM_MORE_THAN_2
595 always @(
/*AS**/act_req or act_starve_limit_cntr_r or rts_act_denied)
597 act_starve_limit_cntr_ns =
act_starve_limit_cntr_r;
599 act_starve_limit_cntr_ns = {
BM_CNT_WIDTH{
1'b0}};
601 if (
rts_act_denied && &
act_starve_limit_cntr_r)
602 act_starve_limit_cntr_ns =
act_starve_limit_cntr_r +
603 {{
BM_CNT_WIDTH-
1{
1'b0}},
1'b1};
606 else // Number of Bank Machs == 2
608 always @(
/*AS**/act_req or act_starve_limit_cntr_r or rts_act_denied)
610 act_starve_limit_cntr_ns =
act_starve_limit_cntr_r;
612 act_starve_limit_cntr_ns = {
BM_CNT_WIDTH{
1'b0}};
614 if (
rts_act_denied && &
act_starve_limit_cntr_r)
615 act_starve_limit_cntr_ns =
act_starve_limit_cntr_r +
621 always @(
posedge clk)
act_starve_limit_cntr_r <=
622 #TCQ
act_starve_limit_cntr_ns;
624 reg demand_act_priority_r;
625 wire demand_act_priority_ns =
act_req &&
626 (
demand_act_priority_r || (
rts_act_denied && &
act_starve_limit_cntr_r));
627 always @(
posedge clk)
demand_act_priority_r <= #TCQ
demand_act_priority_ns;
630 cover_demand_act_priority:
631 cover property (@(
posedge clk) (~rst && demand_act_priority_r));
634 output wire demand_act_priority;
635 assign demand_act_priority =
demand_act_priority_r && ~
sending_row;
637 // compute act_demanded from other demand_act_priorities
638 input [(
nBANK_MACHS*
2)-
1:
0]
demand_act_priority_in;
639 reg act_demanded =
1'b0;
641 if (
nBANK_MACHS >
1)
begin :
compute_act_demanded
647 wire row_demand_ok =
demand_act_priority_r || ~
act_demanded;
649 // Generate the Request To Send row arbitation signal.
654 if((
nCK_PER_CLK ==
4) && (
ADDR_CMD_MODE !=
"2T"))
655 assign rts_row = ~
sending_row &&
row_demand_ok &&
656 (
act_req && ~
my_inhbt_act_faw && ~
inhbt_act_rrd);
658 assign rts_row = ~
sending_row &&
row_demand_ok &&
659 ((
act_req && ~
my_inhbt_act_faw && ~
inhbt_act_rrd) ||
664 four_activate_window_wait:
665 cover property (@(
posedge clk)
666 (~rst && ~sending_row && act_req && my_inhbt_act_faw));
668 cover property (@(
posedge clk)
669 (~rst && ~sending_row && act_req && inhbt_act_rrd));
672 // Provide rank machines early knowledge that this bank machine is
673 // going to send an activate to the rank. In this way, the rank
674 // machines just need to use the sending_row wire to figure out if
675 // they need to keep track of the activate.
676 output reg [
RANKS-
1:
0]
act_this_rank_r;
677 reg [
RANKS-
1:
0]
act_this_rank_ns;
678 always @(
/*AS**/act_wait_r or req_rank_r)
begin
679 act_this_rank_ns = {
RANKS{
1'b0}};
680 for (
i =
0;
i <
RANKS;
i =
i +
1)
681 act_this_rank_ns[
i] =
act_wait_r && (
i[
RANK_WIDTH-
1:
0] ==
req_rank_r);
683 always @(
posedge clk)
act_this_rank_r <= #TCQ
act_this_rank_ns;
686 // Generate request to send column command signal.
689 wire req_bank_rdy_ns =
order_q_zero &&
col_wait_r;
691 always @(
posedge clk)
req_bank_rdy_r <= #TCQ
req_bank_rdy_ns;
693 // Determine is we have been denied a column command request.
695 wire rts_col_denied =
req_bank_rdy_r &&
sent_col && ~
sending_col;
697 // Implement a starvation limit counter. Count the number of times a
698 // request to send a column command has been denied.
699 localparam STARVE_LIMIT_CNT =
STARVE_LIMIT *
nBANK_MACHS;
700 localparam STARVE_LIMIT_WIDTH =
clogb2(
STARVE_LIMIT_CNT);
701 reg [
STARVE_LIMIT_WIDTH-
1:
0]
starve_limit_cntr_r;
702 reg [
STARVE_LIMIT_WIDTH-
1:
0]
starve_limit_cntr_ns;
703 always @(
/*AS**/col_wait_r or rts_col_denied or starve_limit_cntr_r)
705 starve_limit_cntr_ns = {
STARVE_LIMIT_WIDTH{
1'b0}};
707 if (
rts_col_denied && (
starve_limit_cntr_r !=
STARVE_LIMIT_CNT-
1))
708 starve_limit_cntr_ns =
starve_limit_cntr_r +
709 {{
STARVE_LIMIT_WIDTH-
1{
1'b0}},
1'b1};
710 else starve_limit_cntr_ns =
starve_limit_cntr_r;
711 always @(
posedge clk)
starve_limit_cntr_r <= #TCQ
starve_limit_cntr_ns;
714 input q_has_priority;
716 // Decide if this bank machine should demand priority. Priority is demanded
717 // when starvation limit counter is reached, or a bit in the request.
718 wire starved = ((
starve_limit_cntr_r == (
STARVE_LIMIT_CNT-
1)) &&
720 input req_priority_r;
722 reg demand_priority_r;
723 wire demand_priority_ns = ~
idle_ns &&
col_wait_ns &&
724 (
demand_priority_r ||
726 (
req_priority_r ||
q_has_priority)) ||
727 (
starved && (
q_has_rd || ~
req_wr_r)));
729 always @(
posedge clk)
demand_priority_r <= #TCQ
demand_priority_ns;
732 wire rdy_for_priority = ~
rst && ~
demand_priority_r && ~
idle_ns &&
734 req_triggers_demand_priority:
735 cover property (@(
posedge clk)
736 (rdy_for_priority && req_priority_r && ~q_has_priority && ~starved));
737 q_priority_triggers_demand_priority:
738 cover property (@(
posedge clk)
739 (rdy_for_priority && ~req_priority_r && q_has_priority && ~starved));
740 wire not_req_or_q_rdy_for_priority =
741 rdy_for_priority && ~
req_priority_r && ~
q_has_priority;
742 starved_req_triggers_demand_priority:
743 cover property (@(
posedge clk)
744 (not_req_or_q_rdy_for_priority && starved && ~q_has_rd && ~req_wr_r));
745 starved_q_triggers_demand_priority:
746 cover property (@(
posedge clk)
747 (not_req_or_q_rdy_for_priority && starved && q_has_rd && req_wr_r));
750 // compute demanded from other demand_priorities
751 input [(
nBANK_MACHS*
2)-
1:
0]
demand_priority_in;
754 if (
nBANK_MACHS >
1)
begin :
compute_demanded
761 // In order to make sure that there is no starvation amongst a possibly
762 // unlimited stream of priority requests, add a second stage to the demand
763 // priority signal. If there are no other requests demanding priority, then
764 // go ahead and assert demand_priority. If any other requests are asserting
765 // demand_priority, hold off asserting demand_priority until these clear, then
766 // assert demand priority. Its possible to get multiple requests asserting
767 // demand priority simultaneously, but that's OK. Those requests will be
768 // serviced, demanded will fall, and another group of requests will be
769 // allowed to assert demand_priority.
771 reg demanded_prior_r;
772 wire demanded_prior_ns =
demanded &&
773 (
demanded_prior_r || ~
demand_priority_r);
774 always @(
posedge clk)
demanded_prior_r <= #TCQ
demanded_prior_ns;
776 output wire demand_priority;
777 assign demand_priority =
demand_priority_r && ~
demanded_prior_r &&
781 demand_priority_gated:
782 cover property (@(
posedge clk) (demand_priority_r && ~demand_priority));
784 if (nBANK_MACHS >
1) multiple_demand_priority:
785 cover property (@(
posedge clk)
790 wire demand_ok = demand_priority_r || ~demanded;
792 // Figure out if the request in this bank machine matches the current rank
794 input rnk_config_strobe;
795 input rnk_config_kill_rts_col;
796 input rnk_config_valid_r;
797 input [
RANK_WIDTH-
1:
0]
rnk_config;
800 wire rnk_config_match =
rnk_config_valid_r && (
rnk_config ==
req_rank_r);
801 assign rtc = ~
rnk_config_match && ~
rnk_config_kill_rts_col &&
order_q_zero &&
col_wait_r &&
demand_ok;
803 // Using rank state provided by the rank machines, figure out if
804 // a read requests should wait for WTR or RTW.
805 input [
RANKS-
1:
0]
inhbt_rd;
806 wire my_inhbt_rd =
inhbt_rd[
req_rank_r];
807 input [
RANKS-
1:
0]
inhbt_wr;
808 wire my_inhbt_wr =
inhbt_wr[
req_rank_r];
809 wire allow_rw = ~
rd_wr_r ? ~
my_inhbt_wr : ~
my_inhbt_rd;
811 // DQ bus timing constraints.
814 // Column command is ready to arbitrate, except for databus restrictions.
815 wire col_rdy = (
col_wait_r || ((
nRCD_CLKS <=
1) &&
end_rcd) ||
816 (
rcv_open_bank &&
nCK_PER_CLK ==
2 &&
DRAM_TYPE==
"DDR2" &&
BURST_MODE ==
"4") ||
817 (
rcv_open_bank &&
nCK_PER_CLK ==
4 &&
BURST_MODE ==
"8")) &&
820 // Column command is ready to arbitrate for sending a write. Used
821 // to generate early wr_data_addr for ECC mode.
822 output wire col_rdy_wr;
823 assign col_rdy_wr =
col_rdy && ~
rd_wr_r;
825 // Figure out if we're ready to send a column command based on all timing
827 // if timing is an issue.
828 wire col_cmd_rts =
col_rdy && ~
dq_busy_data &&
allow_rw &&
rnk_config_match;
831 col_wait_for_order_q: cover property
833 (~rst && col_wait_r && ~order_q_zero && ~dq_busy_data &&
835 col_wait_for_dq_busy: cover property
837 (~rst && col_wait_r && order_q_zero && dq_busy_data &&
839 col_wait_for_allow_rw: cover property
841 (~rst && col_wait_r && order_q_zero && ~dq_busy_data &&
845 // Implement flow control for the command and control FIFOs and for the data
846 // FIFO during writes
847 input phy_mc_ctl_full;
848 input phy_mc_cmd_full;
849 input phy_mc_data_full;
851 // Register ctl_full and cmd_full
852 reg phy_mc_ctl_full_r =
1'b0;
853 reg phy_mc_cmd_full_r =
1'b0;
854 always @(
posedge clk)
856 phy_mc_ctl_full_r <= #TCQ
1'b0;
857 phy_mc_cmd_full_r <= #TCQ
1'b0;
859 phy_mc_ctl_full_r <= #TCQ
phy_mc_ctl_full;
860 phy_mc_cmd_full_r <= #TCQ
phy_mc_cmd_full;
863 // register output data pre-fifo almost full condition and fold in WR status
864 reg ofs_rdy_r =
1'b0;
865 always @(
posedge clk)
867 ofs_rdy_r <= #TCQ
1'b0;
869 ofs_rdy_r <= #TCQ ~
phy_mc_cmd_full_r && ~
phy_mc_ctl_full_r && ~(
phy_mc_data_full && ~
rd_wr_r);
871 // Disable priority feature for one state after a config to insure
872 // forward progress on the just installed io config.
873 reg override_demand_r;
874 wire override_demand_ns =
rnk_config_strobe ||
rnk_config_kill_rts_col;
875 always @(
posedge clk)
override_demand_r <=
override_demand_ns;
877 assign rts_col = ~
sending_col && (
demand_ok ||
override_demand_r) &&
878 col_cmd_rts &&
ofs_rdy_r;
880 // As in act_this_rank, wr/rd_this_rank informs rank machines
881 // that this bank machine is doing a write/rd. Removes logic
883 reg [
RANKS-
1:
0]
wr_this_rank_ns;
884 reg [
RANKS-
1:
0]
rd_this_rank_ns;
885 always @(
/*AS**/rd_wr_r or req_rank_r)
begin
886 wr_this_rank_ns = {
RANKS{
1'b0}};
887 rd_this_rank_ns = {
RANKS{
1'b0}};
888 for (
i=
0;
i<
RANKS;
i=
i+
1)
begin
889 wr_this_rank_ns[
i] = ~
rd_wr_r && (
i[
RANK_WIDTH-
1:
0] ==
req_rank_r);
890 rd_this_rank_ns[
i] =
rd_wr_r && (
i[
RANK_WIDTH-
1:
0] ==
req_rank_r);
893 output reg [
RANKS-
1:
0]
wr_this_rank_r;
894 always @(
posedge clk)
wr_this_rank_r <= #TCQ
wr_this_rank_ns;
895 output reg [
RANKS-
1:
0]
rd_this_rank_r;
896 always @(
posedge clk)
rd_this_rank_r <= #TCQ
rd_this_rank_ns;
898 endmodule // bank_state