AMC13
Firmwares for the different applications of the AMC13 uTCA board made at Boston University
 All Classes Variables
mig_7series_v1_9_rank_common.v
1  //*****************************************************************************
2 // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved.
3 //
4 // This file contains confidential and proprietary information
5 // of Xilinx, Inc. and is protected under U.S. and
6 // international copyright and other intellectual property
7 // laws.
8 //
9 // DISCLAIMER
10 // This disclaimer is not a license and does not grant any
11 // rights to the materials distributed herewith. Except as
12 // otherwise provided in a valid license issued to you by
13 // Xilinx, and to the maximum extent permitted by applicable
14 // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND
15 // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES
16 // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING
17 // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON-
18 // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and
19 // (2) Xilinx shall not be liable (whether in contract or tort,
20 // including negligence, or under any other theory of
21 // liability) for any loss or damage of any kind or nature
22 // related to, arising under or in connection with these
23 // materials, including for any direct, or any indirect,
24 // special, incidental, or consequential loss or damage
25 // (including loss of data, profits, goodwill, or any type of
26 // loss or damage suffered as a result of any action brought
27 // by a third party) even if such damage or loss was
28 // reasonably foreseeable or Xilinx had been advised of the
29 // possibility of the same.
30 //
31 // CRITICAL APPLICATIONS
32 // Xilinx products are not designed or intended to be fail-
33 // safe, or for use in any application requiring fail-safe
34 // performance, such as life-support or safety devices or
35 // systems, Class III medical devices, nuclear facilities,
36 // applications related to the deployment of airbags, or any
37 // other applications that could lead to death, personal
38 // injury, or severe property or environmental damage
39 // (individually and collectively, "Critical
40 // Applications"). Customer assumes the sole risk and
41 // liability of any use of Xilinx products in Critical
42 // Applications, subject only to applicable laws and
43 // regulations governing limitations on product liability.
44 //
45 // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS
46 // PART OF THIS FILE AT ALL TIMES.
47 //
48 //*****************************************************************************
49 // ____ ____
50 // / /\/ /
51 // /___/ \ / Vendor : Xilinx
52 // \ \ \/ Version : %version
53 // \ \ Application : MIG
54 // / / Filename : rank_common.v
55 // /___/ /\ Date Last Modified : $date$
56 // \ \ / \ Date Created : Tue Jun 30 2009
57 // \___\/\___\
58 //
59 //Device : 7-Series
60 //Design Name : DDR3 SDRAM
61 //Purpose :
62 //Reference :
63 //Revision History :
64 //*****************************************************************************
65 
66 // Block for logic common to all rank machines. Contains
67 // a clock prescaler, and arbiters for refresh and periodic
68 // read functions.
69 
70 `timescale 1 ps / 1 ps
71 
73  (
74  parameter TCQ = 100,
75  parameter DRAM_TYPE = "DDR3",
76  parameter MAINT_PRESCALER_DIV = 40,
77  parameter nBANK_MACHS = 4,
78  parameter nCKESR = 4,
79  parameter nCK_PER_CLK = 2,
80  parameter PERIODIC_RD_TIMER_DIV = 20,
81  parameter RANK_WIDTH = 2,
82  parameter RANKS = 4,
83  parameter REFRESH_TIMER_DIV = 39,
84  parameter ZQ_TIMER_DIV = 640000
85  )
86  (/*AUTOARG**/
87  // Outputs
88  maint_prescaler_tick_r, refresh_tick, maint_zq_r, maint_sre_r, maint_srx_r,
89  maint_req_r, maint_rank_r, clear_periodic_rd_request, periodic_rd_r,
90  periodic_rd_rank_r, app_ref_ack, app_zq_ack, app_sr_active, maint_ref_zq_wip,
91  // Inputs
92  clk, rst, init_calib_complete, app_ref_req, app_zq_req, app_sr_req,
93  insert_maint_r1, refresh_request, maint_wip_r, slot_0_present, slot_1_present,
94  periodic_rd_request, periodic_rd_ack_r
95  );
96 
97  function integer clogb2 (input integer size); // ceiling logb2
98  begin
99  size = size - 1;
100  for (clogb2=1; size>1; clogb2=clogb2+1)
101  size = size >> 1;
102  end
103  endfunction // clogb2
104 
105  input clk;
106  input rst;
107 
108 // Maintenance and periodic read prescaler. Nominally 200 nS.
109  localparam ONE = 1;
110  localparam MAINT_PRESCALER_WIDTH = clogb2(MAINT_PRESCALER_DIV + 1);
111  input init_calib_complete;
112  reg maint_prescaler_tick_r_lcl;
113  generate
114  begin : maint_prescaler
115  reg [MAINT_PRESCALER_WIDTH-1:0] maint_prescaler_r;
116  reg [MAINT_PRESCALER_WIDTH-1:0] maint_prescaler_ns;
117  wire maint_prescaler_tick_ns =
118  (maint_prescaler_r == ONE[MAINT_PRESCALER_WIDTH-1:0]);
119  always @(/*AS**/init_calib_complete or maint_prescaler_r
120  or maint_prescaler_tick_ns) begin
121  maint_prescaler_ns = maint_prescaler_r;
122  if (~init_calib_complete || maint_prescaler_tick_ns)
123  maint_prescaler_ns = MAINT_PRESCALER_DIV[MAINT_PRESCALER_WIDTH-1:0];
124  else if (|maint_prescaler_r)
125  maint_prescaler_ns = maint_prescaler_r - ONE[MAINT_PRESCALER_WIDTH-1:0];
126  end
127  always @(posedge clk) maint_prescaler_r <= #TCQ maint_prescaler_ns;
128 
129  always @(posedge clk) maint_prescaler_tick_r_lcl <=
130  #TCQ maint_prescaler_tick_ns;
131  end
132  endgenerate
133  output wire maint_prescaler_tick_r;
134  assign maint_prescaler_tick_r = maint_prescaler_tick_r_lcl;
135 
136 // Refresh timebase. Nominically 7800 nS.
137  localparam REFRESH_TIMER_WIDTH = clogb2(REFRESH_TIMER_DIV + /*idle**/ 1);
138  wire refresh_tick_lcl;
139  generate
140  begin : refresh_timer
141  reg [REFRESH_TIMER_WIDTH-1:0] refresh_timer_r;
142  reg [REFRESH_TIMER_WIDTH-1:0] refresh_timer_ns;
143  always @(/*AS**/init_calib_complete or maint_prescaler_tick_r_lcl
144  or refresh_tick_lcl or refresh_timer_r) begin
145  refresh_timer_ns = refresh_timer_r;
146  if (~init_calib_complete || refresh_tick_lcl)
147  refresh_timer_ns = REFRESH_TIMER_DIV[REFRESH_TIMER_WIDTH-1:0];
148  else if (|refresh_timer_r && maint_prescaler_tick_r_lcl)
149  refresh_timer_ns =
150  refresh_timer_r - ONE[REFRESH_TIMER_WIDTH-1:0];
151  end
152  always @(posedge clk) refresh_timer_r <= #TCQ refresh_timer_ns;
153  assign refresh_tick_lcl = (refresh_timer_r ==
154  ONE[REFRESH_TIMER_WIDTH-1:0]) && maint_prescaler_tick_r_lcl;
155  end
156  endgenerate
157  output wire refresh_tick;
158  assign refresh_tick = refresh_tick_lcl;
159 
160 // ZQ timebase. Nominally 128 mS
161  localparam ZQ_TIMER_WIDTH = clogb2(ZQ_TIMER_DIV + 1);
162  input app_zq_req;
163  input insert_maint_r1;
164  reg maint_zq_r_lcl;
165  reg zq_request = 1'b0;
166  generate
167  if (DRAM_TYPE == "DDR3") begin : zq_cntrl
168  reg zq_tick = 1'b0;
169  if (ZQ_TIMER_DIV !=0) begin : zq_timer
170  reg [ZQ_TIMER_WIDTH-1:0] zq_timer_r;
171  reg [ZQ_TIMER_WIDTH-1:0] zq_timer_ns;
172  always @(/*AS**/init_calib_complete or maint_prescaler_tick_r_lcl
173  or zq_tick or zq_timer_r) begin
174  zq_timer_ns = zq_timer_r;
175  if (~init_calib_complete || zq_tick)
176  zq_timer_ns = ZQ_TIMER_DIV[ZQ_TIMER_WIDTH-1:0];
177  else if (|zq_timer_r && maint_prescaler_tick_r_lcl)
178  zq_timer_ns = zq_timer_r - ONE[ZQ_TIMER_WIDTH-1:0];
179  end
180  always @(posedge clk) zq_timer_r <= #TCQ zq_timer_ns;
181  always @(/*AS**/maint_prescaler_tick_r_lcl or zq_timer_r)
182  zq_tick = (zq_timer_r ==
183  ONE[ZQ_TIMER_WIDTH-1:0] && maint_prescaler_tick_r_lcl);
184  end // zq_timer
185 
186 // ZQ request. Set request with timer tick, and when exiting PHY init. Never
187 // request if ZQ_TIMER_DIV == 0.
188  begin : zq_request_logic
189  wire zq_clears_zq_request = insert_maint_r1 && maint_zq_r_lcl;
190  reg zq_request_r;
191  wire zq_request_ns = ~rst && (DRAM_TYPE == "DDR3") &&
192  ((~init_calib_complete && (ZQ_TIMER_DIV != 0)) ||
193  (zq_request_r && ~zq_clears_zq_request) ||
194  zq_tick ||
195  (app_zq_req && init_calib_complete));
196  always @(posedge clk) zq_request_r <= #TCQ zq_request_ns;
197  always @(/*AS**/init_calib_complete or zq_request_r)
198  zq_request = init_calib_complete && zq_request_r;
199  end // zq_request_logic
200  end
201  endgenerate
202 
203  // Self-refresh control
204  localparam nCKESR_CLKS = (nCKESR / nCK_PER_CLK) + (nCKESR % nCK_PER_CLK ? 1 : 0);
205  localparam CKESR_TIMER_WIDTH = clogb2(nCKESR_CLKS + 1);
206  input app_sr_req;
207  reg maint_sre_r_lcl;
208  reg maint_srx_r_lcl;
209  reg sre_request = 1'b0;
210  wire inhbt_srx;
211 
212  generate begin : sr_cntrl
213 
214  // SRE request. Set request with user request.
215  begin : sre_request_logic
216 
217  reg sre_request_r;
218  wire sre_clears_sre_request = insert_maint_r1 && maint_sre_r_lcl;
219 
220  wire sre_request_ns = ~rst && ((sre_request_r && ~sre_clears_sre_request)
221  || (app_sr_req && init_calib_complete && ~maint_sre_r_lcl));
222 
223  always @(posedge clk) sre_request_r <= #TCQ sre_request_ns;
224 
225  always @(init_calib_complete or sre_request_r)
226  sre_request = init_calib_complete && sre_request_r;
227 
228  end // sre_request_logic
229 
230  // CKESR timer: Self-Refresh must be maintained for a minimum of tCKESR
231  begin : ckesr_timer
232 
233  reg [CKESR_TIMER_WIDTH-1:0] ckesr_timer_r = {CKESR_TIMER_WIDTH{1'b0}};
234  reg [CKESR_TIMER_WIDTH-1:0] ckesr_timer_ns = {CKESR_TIMER_WIDTH{1'b0}};
235 
236  always @(insert_maint_r1 or ckesr_timer_r or maint_sre_r_lcl) begin
237 
238  ckesr_timer_ns = ckesr_timer_r;
239 
240  if (insert_maint_r1 && maint_sre_r_lcl)
241  ckesr_timer_ns = nCKESR_CLKS[CKESR_TIMER_WIDTH-1:0];
242  else if(|ckesr_timer_r)
243  ckesr_timer_ns = ckesr_timer_r - ONE[CKESR_TIMER_WIDTH-1:0];
244 
245  end
246 
247  always @(posedge clk) ckesr_timer_r <= #TCQ ckesr_timer_ns;
248 
249  assign inhbt_srx = |ckesr_timer_r;
250 
251  end // ckesr_timer
252 
253  end
254 
255  endgenerate
256 
257 // DRAM maintenance operations of refresh and ZQ calibration, and self-refresh
258 // DRAM maintenance operations and self-refresh have their own channel in the
259 // queue. There is also a single, very simple bank machine
260 // dedicated to these operations. Its assumed that the
261 // maintenance operations can be completed quickly enough
262 // to avoid any queuing.
263 //
264 // ZQ, refresh and self-refresh requests share a channel into controller.
265 // Self-refresh is appended to the uppermost bit of the request bus and ZQ is
266 // appended just below that.
267 
268  input[RANKS-1:0] refresh_request;
269  input maint_wip_r;
270  reg maint_req_r_lcl;
271  reg [RANK_WIDTH-1:0] maint_rank_r_lcl;
272  input [7:0] slot_0_present;
273  input [7:0] slot_1_present;
274 
275  generate
276  begin : maintenance_request
277 
278 // Maintenance request pipeline.
279  reg upd_last_master_r;
280  reg new_maint_rank_r;
281  wire maint_busy = upd_last_master_r || new_maint_rank_r ||
282  maint_req_r_lcl || maint_wip_r;
283  wire [RANKS+1:0] maint_request = {sre_request, zq_request, refresh_request[RANKS-1:0]};
284  wire upd_last_master_ns = |maint_request && ~maint_busy;
285  always @(posedge clk) upd_last_master_r <= #TCQ upd_last_master_ns;
286  always @(posedge clk) new_maint_rank_r <= #TCQ upd_last_master_r;
287  always @(posedge clk) maint_req_r_lcl <= #TCQ new_maint_rank_r;
288 
289 // Arbitrate maintenance requests.
290  wire [RANKS+1:0] maint_grant_ns;
291  wire [RANKS+1:0] maint_grant_r;
293  (.WIDTH (RANKS+2))
294  maint_arb0
295  (.grant_ns (maint_grant_ns),
296  .grant_r (maint_grant_r),
297  .upd_last_master (upd_last_master_r),
298  .current_master (maint_grant_r),
299  .req (maint_request),
300  .disable_grant (1'b0),
301  /*AUTOINST**/
302  // Inputs
303  .clk (clk),
304  .rst (rst));
305 
306 // Look at arbitration results. Decide if ZQ, refresh or self-refresh.
307 // If refresh select the maintenance rank from the winning rank controller.
308 // If ZQ or self-refresh, generate a sequence of rank numbers corresponding to
309 // slots populated maint_rank_r is not used for comparisons in the queue for ZQ
310 // or self-refresh requests. The bank machine will enable CS for the number of
311 // states equal to the the number of occupied slots. This will produce a
312 // command to every occupied slot, but not in any particular order.
313  wire [7:0] present = slot_0_present | slot_1_present;
314  integer i;
315  reg [RANK_WIDTH-1:0] maint_rank_ns;
316  wire maint_zq_ns = ~rst && (upd_last_master_r
317  ? maint_grant_r[RANKS]
318  : maint_zq_r_lcl);
319  wire maint_srx_ns = ~rst && (maint_sre_r_lcl
320  ? ~app_sr_req & ~inhbt_srx
321  : maint_srx_r_lcl && upd_last_master_r
322  ? maint_grant_r[RANKS+1]
323  : maint_srx_r_lcl);
324  wire maint_sre_ns = ~rst && (upd_last_master_r
325  ? maint_grant_r[RANKS+1]
326  : maint_sre_r_lcl && ~maint_srx_ns);
327  always @(/*AS**/maint_grant_r or maint_rank_r_lcl or maint_zq_ns
328  or maint_sre_ns or maint_srx_ns or present or rst
329  or upd_last_master_r) begin
330  if (rst) maint_rank_ns = {RANK_WIDTH{1'b0}};
331  else begin
332  maint_rank_ns = maint_rank_r_lcl;
333  if (maint_zq_ns || maint_sre_ns || maint_srx_ns) begin
334  maint_rank_ns = maint_rank_r_lcl + ONE[RANK_WIDTH-1:0];
335  for (i=0; i<8; i=i+1)
336  if (~present[maint_rank_ns])
337  maint_rank_ns = maint_rank_ns + ONE[RANK_WIDTH-1:0];
338  end
339  else
340  if (upd_last_master_r)
341  for (i=0; i<RANKS; i=i+1)
342  if (maint_grant_r[i]) maint_rank_ns = i[RANK_WIDTH-1:0];
343  end
344  end
345  always @(posedge clk) maint_rank_r_lcl <= #TCQ maint_rank_ns;
346  always @(posedge clk) maint_zq_r_lcl <= #TCQ maint_zq_ns;
347  always @(posedge clk) maint_sre_r_lcl <= #TCQ maint_sre_ns;
348  always @(posedge clk) maint_srx_r_lcl <= #TCQ maint_srx_ns;
349 
350  end // block: maintenance_request
351  endgenerate
352  output wire maint_zq_r;
353  assign maint_zq_r = maint_zq_r_lcl;
354  output wire maint_sre_r;
355  assign maint_sre_r = maint_sre_r_lcl;
356  output wire maint_srx_r;
357  assign maint_srx_r = maint_srx_r_lcl;
358  output wire maint_req_r;
359  assign maint_req_r = maint_req_r_lcl;
360  output wire [RANK_WIDTH-1:0] maint_rank_r;
361  assign maint_rank_r = maint_rank_r_lcl;
362 
363 // Indicate whether self-refresh is active or not.
364 
365  output app_sr_active;
366  reg app_sr_active_r;
367 
368  wire app_sr_active_ns =
369  insert_maint_r1 ? maint_sre_r && ~maint_srx_r : app_sr_active_r;
370 
371  always @(posedge clk) app_sr_active_r <= #TCQ app_sr_active_ns;
372 
373  assign app_sr_active = app_sr_active_r;
374 
375 // Acknowledge user REF and ZQ Requests
376 
377  input app_ref_req;
378  output app_ref_ack;
379  wire app_ref_ack_ns;
380  wire app_ref_ns;
381  reg app_ref_ack_r = 1'b0;
382  reg app_ref_r = 1'b0;
383 
384  assign app_ref_ns = init_calib_complete && (app_ref_req || app_ref_r && |refresh_request);
385  assign app_ref_ack_ns = app_ref_r && ~|refresh_request;
386 
387  always @(posedge clk) app_ref_r <= #TCQ app_ref_ns;
388  always @(posedge clk) app_ref_ack_r <= #TCQ app_ref_ack_ns;
389 
390  assign app_ref_ack = app_ref_ack_r;
391 
392  output app_zq_ack;
393  wire app_zq_ack_ns;
394  wire app_zq_ns;
395  reg app_zq_ack_r = 1'b0;
396  reg app_zq_r = 1'b0;
397 
398  assign app_zq_ns = init_calib_complete && (app_zq_req || app_zq_r && zq_request);
399  assign app_zq_ack_ns = app_zq_r && ~zq_request;
400 
401  always @(posedge clk) app_zq_r <= #TCQ app_zq_ns;
402  always @(posedge clk) app_zq_ack_r <= #TCQ app_zq_ack_ns;
403 
404  assign app_zq_ack = app_zq_ack_r;
405 
406 // Periodic reads to maintain PHY alignment.
407 // Demand insertion of periodic read as soon as
408 // possible. Since the is a single rank, bank compare mechanism
409 // must be used, periodic reads must be forced in at the
410 // expense of not accepting a normal request.
411 
412  input [RANKS-1:0] periodic_rd_request;
413  reg periodic_rd_r_lcl;
414  reg [RANK_WIDTH-1:0] periodic_rd_rank_r_lcl;
415  input periodic_rd_ack_r;
416  output wire [RANKS-1:0] clear_periodic_rd_request;
417  output wire periodic_rd_r;
418  output wire [RANK_WIDTH-1:0] periodic_rd_rank_r;
419 
420  generate
421  // This is not needed in 7-Series and should remain disabled
422  if ( PERIODIC_RD_TIMER_DIV != 0 ) begin : periodic_read_request
423 
424 // Maintenance request pipeline.
425  reg periodic_rd_r_cnt;
426  wire int_periodic_rd_ack_r = (periodic_rd_ack_r && periodic_rd_r_cnt);
427  reg upd_last_master_r;
428  wire periodic_rd_busy = upd_last_master_r || periodic_rd_r_lcl;
429  wire upd_last_master_ns =
430  init_calib_complete && (|periodic_rd_request && ~periodic_rd_busy);
431  always @(posedge clk) upd_last_master_r <= #TCQ upd_last_master_ns;
432  wire periodic_rd_ns = init_calib_complete &&
433  (upd_last_master_r || (periodic_rd_r_lcl && ~int_periodic_rd_ack_r));
434  always @(posedge clk) periodic_rd_r_lcl <= #TCQ periodic_rd_ns;
435 
436  always @(posedge clk) begin
437  if (rst) periodic_rd_r_cnt <= #TCQ 1'b0;
438  else if (periodic_rd_r_lcl && periodic_rd_ack_r)
439  periodic_rd_r_cnt <= ~periodic_rd_r_cnt;
440  end
441 
442 // Arbitrate periodic read requests.
443  wire [RANKS-1:0] periodic_rd_grant_ns;
444  reg [RANKS-1:0] periodic_rd_grant_r;
446  (.WIDTH (RANKS))
447  periodic_rd_arb0
448  (.grant_ns (periodic_rd_grant_ns[RANKS-1:0]),
449  .grant_r (),
450  .upd_last_master (upd_last_master_r),
451  .current_master (periodic_rd_grant_r[RANKS-1:0]),
452  .req (periodic_rd_request[RANKS-1:0]),
453  .disable_grant (1'b0),
454  /*AUTOINST**/
455  // Inputs
456  .clk (clk),
457  .rst (rst));
458 
459  always @(posedge clk) periodic_rd_grant_r = upd_last_master_ns
460  ? periodic_rd_grant_ns
461  : periodic_rd_grant_r;
462 // Encode and set periodic read rank into periodic_rd_rank_r.
463  integer i;
464  reg [RANK_WIDTH-1:0] periodic_rd_rank_ns;
465  always @(/*AS**/periodic_rd_grant_r or periodic_rd_rank_r_lcl
466  or upd_last_master_r) begin
467  periodic_rd_rank_ns = periodic_rd_rank_r_lcl;
468  if (upd_last_master_r)
469  for (i=0; i<RANKS; i=i+1)
470  if (periodic_rd_grant_r[i])
471  periodic_rd_rank_ns = i[RANK_WIDTH-1:0];
472  end
473  always @(posedge clk) periodic_rd_rank_r_lcl <=
474  #TCQ periodic_rd_rank_ns;
475 
476 // Once the request is dropped in the queue, it might be a while before it
477 // emerges. Can't clear the request based on seeing the read issued.
478 // Need to clear the request as soon as its made it into the queue.
479  assign clear_periodic_rd_request =
480  periodic_rd_grant_r & {RANKS{periodic_rd_ack_r}};
481 
482 
483  assign periodic_rd_r = periodic_rd_r_lcl;
484  assign periodic_rd_rank_r = periodic_rd_rank_r_lcl;
485 
486  end else begin
487 
488  // Disable periodic reads
489  assign clear_periodic_rd_request = {RANKS{1'b0}};
490  assign periodic_rd_r = 1'b0;
491  assign periodic_rd_rank_r = {RANK_WIDTH{1'b0}};
492 
493  end // block: periodic_read_request
494  endgenerate
495 
496 // Indicate that a refresh is in progress. The PHY will use this to schedule
497 // tap adjustments during idle bus time
498 
499  reg maint_ref_zq_wip_r = 1'b0;
500  output maint_ref_zq_wip;
501 
502  always @(posedge clk)
503  if(rst)
504  maint_ref_zq_wip_r <= #TCQ 1'b0;
505  else if((zq_request || |refresh_request) && insert_maint_r1)
506  maint_ref_zq_wip_r <= #TCQ 1'b1;
507  else if(~maint_wip_r)
508  maint_ref_zq_wip_r <= #TCQ 1'b0;
509 
510  assign maint_ref_zq_wip = maint_ref_zq_wip_r;
511 
512 endmodule