AMC13
Firmwares for the different applications of the AMC13 uTCA board made at Boston University
Main Page
Design Unit List
Files
File List
All
Classes
Variables
src
common
DDR
ddr3_1_9_a
phy
mig_7series_v1_9_ddr_phy_wrcal.v
1
//*****************************************************************************
2
// (c) Copyright 2009 - 2013 Xilinx, Inc. All rights reserved.
3
//
4
// This file contains confidential and proprietary information
5
// of Xilinx, Inc. and is protected under U.S. and
6
// international copyright and other intellectual property
7
// laws.
8
//
9
// DISCLAIMER
10
// This disclaimer is not a license and does not grant any
11
// rights to the materials distributed herewith. Except as
12
// otherwise provided in a valid license issued to you by
13
// Xilinx, and to the maximum extent permitted by applicable
14
// law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND
15
// WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES
16
// AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING
17
// BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON-
18
// INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and
19
// (2) Xilinx shall not be liable (whether in contract or tort,
20
// including negligence, or under any other theory of
21
// liability) for any loss or damage of any kind or nature
22
// related to, arising under or in connection with these
23
// materials, including for any direct, or any indirect,
24
// special, incidental, or consequential loss or damage
25
// (including loss of data, profits, goodwill, or any type of
26
// loss or damage suffered as a result of any action brought
27
// by a third party) even if such damage or loss was
28
// reasonably foreseeable or Xilinx had been advised of the
29
// possibility of the same.
30
//
31
// CRITICAL APPLICATIONS
32
// Xilinx products are not designed or intended to be fail-
33
// safe, or for use in any application requiring fail-safe
34
// performance, such as life-support or safety devices or
35
// systems, Class III medical devices, nuclear facilities,
36
// applications related to the deployment of airbags, or any
37
// other applications that could lead to death, personal
38
// injury, or severe property or environmental damage
39
// (individually and collectively, "Critical
40
// Applications"). Customer assumes the sole risk and
41
// liability of any use of Xilinx products in Critical
42
// Applications, subject only to applicable laws and
43
// regulations governing limitations on product liability.
44
//
45
// THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS
46
// PART OF THIS FILE AT ALL TIMES.
47
//
48
//*****************************************************************************
49
// ____ ____
50
// / /\/ /
51
// /___/ \ / Vendor: Xilinx
52
// \ \ \/ Version:
53
// \ \ Application: MIG
54
// / / Filename: ddr_phy_wrcal.v
55
// /___/ /\ Date Last Modified: $Date: 2011/06/02 08:35:09 $
56
// \ \ / \ Date Created:
57
// \___\/\___\
58
//
59
//Device: 7 Series
60
//Design Name: DDR3 SDRAM
61
//Purpose:
62
// Write calibration logic to align DQS to correct CK edge
63
//Reference:
64
//Revision History:
65
//*****************************************************************************
66
67
/******************************************************************************
68
**$Id: ddr_phy_wrcal.v,v 1.1 2011/06/02 08:35:09 mishra Exp $
69
**$Date: 2011/06/02 08:35:09 $
70
**$Author:
71
**$Revision:
72
**$Source:
73
*******************************************************************************/
74
75
`timescale
1ps/1ps
76
77
module
mig_7series_v1_9_ddr_phy_wrcal
#
78
(
79
parameter
TCQ
=
100
,
// clk->out delay (sim only)
80
parameter
nCK_PER_CLK
=
2
,
// # of memory clocks per CLK
81
parameter
CLK_PERIOD
=
2500
,
82
parameter
DQ_WIDTH
=
64
,
// # of DQ (data)
83
parameter
DQS_CNT_WIDTH
=
3
,
// = ceil(log2(DQS_WIDTH))
84
parameter
DQS_WIDTH
=
8
,
// # of DQS (strobe)
85
parameter
DRAM_WIDTH
=
8
,
// # of DQ per DQS
86
parameter
PRE_REV3ES
=
"OFF"
,
// Delay O/Ps using Phaser_Out fine dly
87
parameter
SIM_CAL_OPTION
=
"NONE"
// Skip various calibration steps
88
)
89
(
90
input
clk
,
91
input
rst
,
92
// Calibration status, control signals
93
input
wrcal_start
,
94
input
wrcal_rd_wait
,
95
input
wrcal_sanity_chk
,
96
input
dqsfound_retry_done
,
97
input
phy_rddata_en
,
98
output
dqsfound_retry
,
99
output
wrcal_read_req
,
100
output
reg
wrcal_act_req
,
101
output
reg
wrcal_done
,
102
output
reg
wrcal_pat_err
,
103
output
reg
wrcal_prech_req
,
104
output
reg
temp_wrcal_done
,
105
output
reg
wrcal_sanity_chk_done
,
106
input
prech_done
,
107
// Captured data in resync clock domain
108
input
[
2
*
nCK_PER_CLK
*
DQ_WIDTH
-
1
:
0
]
rd_data
,
109
// Write level values of Phaser_Out coarse and fine
110
// delay taps required to load Phaser_Out register
111
input
[
3
*
DQS_WIDTH
-
1
:
0
]
wl_po_coarse_cnt
,
112
input
[
6
*
DQS_WIDTH
-
1
:
0
]
wl_po_fine_cnt
,
113
input
wrlvl_byte_done
,
114
output
reg
wrlvl_byte_redo
,
115
output
reg
early1_data
,
116
output
reg
early2_data
,
117
// DQ IDELAY
118
output
reg
idelay_ld
,
119
output
reg
wrcal_pat_resume
,
// to phy_init for write
120
output
reg
[
DQS_CNT_WIDTH
:
0
]
po_stg2_wrcal_cnt
,
121
output
phy_if_reset
,
122
123
// Debug Port
124
output
[
6
*
DQS_WIDTH
-
1
:
0
]
dbg_final_po_fine_tap_cnt
,
125
output
[
3
*
DQS_WIDTH
-
1
:
0
]
dbg_final_po_coarse_tap_cnt
,
126
output
[
99
:
0
]
dbg_phy_wrcal
127
);
128
129
// Length of calibration sequence (in # of words)
130
//localparam CAL_PAT_LEN = 8;
131
132
// Read data shift register length
133
localparam
RD_SHIFT_LEN
=
1
;
//(nCK_PER_CLK == 4) ? 1 : 2;
134
135
// # of reads for reliable read capture
136
localparam
NUM_READS
=
2
;
137
138
// # of cycles to wait after changing RDEN count value
139
localparam
RDEN_WAIT_CNT
=
12
;
140
141
localparam
COARSE_CNT
= (
CLK_PERIOD
/
nCK_PER_CLK
<=
2500
) ?
3
:
6
;
142
localparam
FINE_CNT
= (
CLK_PERIOD
/
nCK_PER_CLK
<=
2500
) ?
22
:
44
;
143
144
145
localparam
CAL2_IDLE
=
4'h0
;
146
localparam
CAL2_READ_WAIT
=
4'h1
;
147
localparam
CAL2_NEXT_DQS
=
4'h2
;
148
localparam
CAL2_WRLVL_WAIT
=
4'h3
;
149
localparam
CAL2_IFIFO_RESET
=
4'h4
;
150
localparam
CAL2_DQ_IDEL_DEC
=
4'h5
;
151
localparam
CAL2_DONE
=
4'h6
;
152
localparam
CAL2_SANITY_WAIT
=
4'h7
;
153
localparam
CAL2_ERR
=
4'h8
;
154
155
integer
i
,
j
,
k
,
l
,
m
,
p
,
q
,
d
;
156
157
reg
[
2
:
0
]
po_coarse_tap_cnt
[
0
:
DQS_WIDTH
-
1
];
158
reg
[
3
*
DQS_WIDTH
-
1
:
0
]
po_coarse_tap_cnt_w
;
159
reg
[
5
:
0
]
po_fine_tap_cnt
[
0
:
DQS_WIDTH
-
1
];
160
reg
[
6
*
DQS_WIDTH
-
1
:
0
]
po_fine_tap_cnt_w
;
161
(*
keep
=
"true"
,
max_fanout
=
10
*)
reg
[
DQS_CNT_WIDTH
:
0
]
wrcal_dqs_cnt_r
/* synthesis syn_maxfan = 10 **/
;
162
reg
[
4
:
0
]
not_empty_wait_cnt
;
163
reg
[
3
:
0
]
tap_inc_wait_cnt
;
164
reg
cal2_done_r
;
165
reg
cal2_done_r1
;
166
reg
cal2_prech_req_r
;
167
reg
[
3
:
0
]
cal2_state_r
;
168
reg
[
3
:
0
]
cal2_state_r1
;
169
reg
[
2
:
0
]
wl_po_coarse_cnt_w
[
0
:
DQS_WIDTH
-
1
];
170
reg
[
5
:
0
]
wl_po_fine_cnt_w
[
0
:
DQS_WIDTH
-
1
];
171
reg
cal2_if_reset
;
172
reg
wrcal_pat_resume_r
;
173
reg
wrcal_pat_resume_r1
;
174
reg
wrcal_pat_resume_r2
;
175
reg
wrcal_pat_resume_r3
;
176
reg
[
DRAM_WIDTH
-
1
:
0
]
mux_rd_fall0_r
;
177
reg
[
DRAM_WIDTH
-
1
:
0
]
mux_rd_fall1_r
;
178
reg
[
DRAM_WIDTH
-
1
:
0
]
mux_rd_rise0_r
;
179
reg
[
DRAM_WIDTH
-
1
:
0
]
mux_rd_rise1_r
;
180
reg
[
DRAM_WIDTH
-
1
:
0
]
mux_rd_fall2_r
;
181
reg
[
DRAM_WIDTH
-
1
:
0
]
mux_rd_fall3_r
;
182
reg
[
DRAM_WIDTH
-
1
:
0
]
mux_rd_rise2_r
;
183
reg
[
DRAM_WIDTH
-
1
:
0
]
mux_rd_rise3_r
;
184
reg
pat_data_match_r
;
185
reg
pat1_data_match_r
;
186
reg
pat1_data_match_r1
;
187
reg
pat2_data_match_r
;
188
reg
pat_data_match_valid_r
;
189
wire
[
RD_SHIFT_LEN
-
1
:
0
]
pat_fall0
[
3
:
0
];
190
wire
[
RD_SHIFT_LEN
-
1
:
0
]
pat_fall1
[
3
:
0
];
191
wire
[
RD_SHIFT_LEN
-
1
:
0
]
pat_fall2
[
3
:
0
];
192
wire
[
RD_SHIFT_LEN
-
1
:
0
]
pat_fall3
[
3
:
0
];
193
wire
[
RD_SHIFT_LEN
-
1
:
0
]
pat1_fall0
[
3
:
0
];
194
wire
[
RD_SHIFT_LEN
-
1
:
0
]
pat1_fall1
[
3
:
0
];
195
wire
[
RD_SHIFT_LEN
-
1
:
0
]
pat2_fall0
[
3
:
0
];
196
wire
[
RD_SHIFT_LEN
-
1
:
0
]
pat2_fall1
[
3
:
0
];
197
wire
[
RD_SHIFT_LEN
-
1
:
0
]
early_fall0
[
3
:
0
];
198
wire
[
RD_SHIFT_LEN
-
1
:
0
]
early_fall1
[
3
:
0
];
199
wire
[
RD_SHIFT_LEN
-
1
:
0
]
early_fall2
[
3
:
0
];
200
wire
[
RD_SHIFT_LEN
-
1
:
0
]
early_fall3
[
3
:
0
];
201
wire
[
RD_SHIFT_LEN
-
1
:
0
]
early1_fall0
[
3
:
0
];
202
wire
[
RD_SHIFT_LEN
-
1
:
0
]
early1_fall1
[
3
:
0
];
203
wire
[
RD_SHIFT_LEN
-
1
:
0
]
early2_fall0
[
3
:
0
];
204
wire
[
RD_SHIFT_LEN
-
1
:
0
]
early2_fall1
[
3
:
0
];
205
reg
[
DRAM_WIDTH
-
1
:
0
]
pat_match_fall0_r
;
206
reg
pat_match_fall0_and_r
;
207
reg
[
DRAM_WIDTH
-
1
:
0
]
pat_match_fall1_r
;
208
reg
pat_match_fall1_and_r
;
209
reg
[
DRAM_WIDTH
-
1
:
0
]
pat_match_fall2_r
;
210
reg
pat_match_fall2_and_r
;
211
reg
[
DRAM_WIDTH
-
1
:
0
]
pat_match_fall3_r
;
212
reg
pat_match_fall3_and_r
;
213
reg
[
DRAM_WIDTH
-
1
:
0
]
pat_match_rise0_r
;
214
reg
pat_match_rise0_and_r
;
215
reg
[
DRAM_WIDTH
-
1
:
0
]
pat_match_rise1_r
;
216
reg
pat_match_rise1_and_r
;
217
reg
[
DRAM_WIDTH
-
1
:
0
]
pat_match_rise2_r
;
218
reg
pat_match_rise2_and_r
;
219
reg
[
DRAM_WIDTH
-
1
:
0
]
pat_match_rise3_r
;
220
reg
pat_match_rise3_and_r
;
221
reg
[
DRAM_WIDTH
-
1
:
0
]
pat1_match_rise0_r
;
222
reg
[
DRAM_WIDTH
-
1
:
0
]
pat1_match_rise1_r
;
223
reg
[
DRAM_WIDTH
-
1
:
0
]
pat1_match_fall0_r
;
224
reg
[
DRAM_WIDTH
-
1
:
0
]
pat1_match_fall1_r
;
225
reg
[
DRAM_WIDTH
-
1
:
0
]
pat2_match_rise0_r
;
226
reg
[
DRAM_WIDTH
-
1
:
0
]
pat2_match_rise1_r
;
227
reg
[
DRAM_WIDTH
-
1
:
0
]
pat2_match_fall0_r
;
228
reg
[
DRAM_WIDTH
-
1
:
0
]
pat2_match_fall1_r
;
229
reg
pat1_match_rise0_and_r
;
230
reg
pat1_match_rise1_and_r
;
231
reg
pat1_match_fall0_and_r
;
232
reg
pat1_match_fall1_and_r
;
233
reg
pat2_match_rise0_and_r
;
234
reg
pat2_match_rise1_and_r
;
235
reg
pat2_match_fall0_and_r
;
236
reg
pat2_match_fall1_and_r
;
237
reg
early1_data_match_r
;
238
reg
early1_data_match_r1
;
239
reg
[
DRAM_WIDTH
-
1
:
0
]
early1_match_fall0_r
;
240
reg
early1_match_fall0_and_r
;
241
reg
[
DRAM_WIDTH
-
1
:
0
]
early1_match_fall1_r
;
242
reg
early1_match_fall1_and_r
;
243
reg
[
DRAM_WIDTH
-
1
:
0
]
early1_match_fall2_r
;
244
reg
early1_match_fall2_and_r
;
245
reg
[
DRAM_WIDTH
-
1
:
0
]
early1_match_fall3_r
;
246
reg
early1_match_fall3_and_r
;
247
reg
[
DRAM_WIDTH
-
1
:
0
]
early1_match_rise0_r
;
248
reg
early1_match_rise0_and_r
;
249
reg
[
DRAM_WIDTH
-
1
:
0
]
early1_match_rise1_r
;
250
reg
early1_match_rise1_and_r
;
251
reg
[
DRAM_WIDTH
-
1
:
0
]
early1_match_rise2_r
;
252
reg
early1_match_rise2_and_r
;
253
reg
[
DRAM_WIDTH
-
1
:
0
]
early1_match_rise3_r
;
254
reg
early1_match_rise3_and_r
;
255
reg
early2_data_match_r
;
256
reg
[
DRAM_WIDTH
-
1
:
0
]
early2_match_fall0_r
;
257
reg
early2_match_fall0_and_r
;
258
reg
[
DRAM_WIDTH
-
1
:
0
]
early2_match_fall1_r
;
259
reg
early2_match_fall1_and_r
;
260
reg
[
DRAM_WIDTH
-
1
:
0
]
early2_match_fall2_r
;
261
reg
early2_match_fall2_and_r
;
262
reg
[
DRAM_WIDTH
-
1
:
0
]
early2_match_fall3_r
;
263
reg
early2_match_fall3_and_r
;
264
reg
[
DRAM_WIDTH
-
1
:
0
]
early2_match_rise0_r
;
265
reg
early2_match_rise0_and_r
;
266
reg
[
DRAM_WIDTH
-
1
:
0
]
early2_match_rise1_r
;
267
reg
early2_match_rise1_and_r
;
268
reg
[
DRAM_WIDTH
-
1
:
0
]
early2_match_rise2_r
;
269
reg
early2_match_rise2_and_r
;
270
reg
[
DRAM_WIDTH
-
1
:
0
]
early2_match_rise3_r
;
271
reg
early2_match_rise3_and_r
;
272
wire
[
RD_SHIFT_LEN
-
1
:
0
]
pat_rise0
[
3
:
0
];
273
wire
[
RD_SHIFT_LEN
-
1
:
0
]
pat_rise1
[
3
:
0
];
274
wire
[
RD_SHIFT_LEN
-
1
:
0
]
pat_rise2
[
3
:
0
];
275
wire
[
RD_SHIFT_LEN
-
1
:
0
]
pat_rise3
[
3
:
0
];
276
wire
[
RD_SHIFT_LEN
-
1
:
0
]
pat1_rise0
[
3
:
0
];
277
wire
[
RD_SHIFT_LEN
-
1
:
0
]
pat1_rise1
[
3
:
0
];
278
wire
[
RD_SHIFT_LEN
-
1
:
0
]
pat2_rise0
[
3
:
0
];
279
wire
[
RD_SHIFT_LEN
-
1
:
0
]
pat2_rise1
[
3
:
0
];
280
wire
[
RD_SHIFT_LEN
-
1
:
0
]
early_rise0
[
3
:
0
];
281
wire
[
RD_SHIFT_LEN
-
1
:
0
]
early_rise1
[
3
:
0
];
282
wire
[
RD_SHIFT_LEN
-
1
:
0
]
early_rise2
[
3
:
0
];
283
wire
[
RD_SHIFT_LEN
-
1
:
0
]
early_rise3
[
3
:
0
];
284
wire
[
RD_SHIFT_LEN
-
1
:
0
]
early1_rise0
[
3
:
0
];
285
wire
[
RD_SHIFT_LEN
-
1
:
0
]
early1_rise1
[
3
:
0
];
286
wire
[
RD_SHIFT_LEN
-
1
:
0
]
early2_rise0
[
3
:
0
];
287
wire
[
RD_SHIFT_LEN
-
1
:
0
]
early2_rise1
[
3
:
0
];
288
wire
[
DQ_WIDTH
-
1
:
0
]
rd_data_rise0
;
289
wire
[
DQ_WIDTH
-
1
:
0
]
rd_data_fall0
;
290
wire
[
DQ_WIDTH
-
1
:
0
]
rd_data_rise1
;
291
wire
[
DQ_WIDTH
-
1
:
0
]
rd_data_fall1
;
292
wire
[
DQ_WIDTH
-
1
:
0
]
rd_data_rise2
;
293
wire
[
DQ_WIDTH
-
1
:
0
]
rd_data_fall2
;
294
wire
[
DQ_WIDTH
-
1
:
0
]
rd_data_rise3
;
295
wire
[
DQ_WIDTH
-
1
:
0
]
rd_data_fall3
;
296
reg
[
DQS_CNT_WIDTH
:
0
]
rd_mux_sel_r
;
297
reg
rd_active_posedge_r
;
298
reg
rd_active_r
;
299
reg
rd_active_r1
;
300
reg
rd_active_r2
;
301
reg
rd_active_r3
;
302
reg
rd_active_r4
;
303
reg
rd_active_r5
;
304
reg
[
RD_SHIFT_LEN
-
1
:
0
]
sr_fall0_r
[
DRAM_WIDTH
-
1
:
0
];
305
reg
[
RD_SHIFT_LEN
-
1
:
0
]
sr_fall1_r
[
DRAM_WIDTH
-
1
:
0
];
306
reg
[
RD_SHIFT_LEN
-
1
:
0
]
sr_rise0_r
[
DRAM_WIDTH
-
1
:
0
];
307
reg
[
RD_SHIFT_LEN
-
1
:
0
]
sr_rise1_r
[
DRAM_WIDTH
-
1
:
0
];
308
reg
[
RD_SHIFT_LEN
-
1
:
0
]
sr_fall2_r
[
DRAM_WIDTH
-
1
:
0
];
309
reg
[
RD_SHIFT_LEN
-
1
:
0
]
sr_fall3_r
[
DRAM_WIDTH
-
1
:
0
];
310
reg
[
RD_SHIFT_LEN
-
1
:
0
]
sr_rise2_r
[
DRAM_WIDTH
-
1
:
0
];
311
reg
[
RD_SHIFT_LEN
-
1
:
0
]
sr_rise3_r
[
DRAM_WIDTH
-
1
:
0
];
312
reg
wrlvl_byte_done_r
;
313
reg
idelay_ld_done
;
314
reg
pat1_detect
;
315
reg
early1_detect
;
316
reg
wrcal_sanity_chk_r
;
317
reg
wrcal_sanity_chk_err
;
318
319
320
//***************************************************************************
321
// Debug
322
//***************************************************************************
323
324
always
@(*)
begin
325
for
(
d
=
0
;
d
<
DQS_WIDTH
;
d
=
d
+
1
)
begin
326
po_fine_tap_cnt_w
[(
6
*
d
)+:
6
] <= #TCQ
po_fine_tap_cnt
[
d
];
327
po_coarse_tap_cnt_w
[(
3
*
d
)+:
3
] <= #TCQ
po_coarse_tap_cnt
[
d
];
328
end
329
end
330
331
assign
dbg_final_po_fine_tap_cnt
=
po_fine_tap_cnt_w
;
332
assign
dbg_final_po_coarse_tap_cnt
=
po_coarse_tap_cnt_w
;
333
334
assign
dbg_phy_wrcal
[
0
] =
pat_data_match_r
;
335
assign
dbg_phy_wrcal
[
4
:
1
] =
cal2_state_r1
[
2
:
0
];
336
assign
dbg_phy_wrcal
[
5
] =
wrcal_sanity_chk_err
;
337
assign
dbg_phy_wrcal
[
6
] =
wrcal_start
;
338
assign
dbg_phy_wrcal
[
7
] =
wrcal_done
;
339
assign
dbg_phy_wrcal
[
8
] =
pat_data_match_valid_r
;
340
assign
dbg_phy_wrcal
[
13
+:
DQS_CNT_WIDTH
]=
wrcal_dqs_cnt_r
;
341
assign
dbg_phy_wrcal
[
17
+:
5
] =
'd0
;
342
assign
dbg_phy_wrcal
[
22
+:
5
] =
'd0
;
343
assign
dbg_phy_wrcal
[
27
] =
1'b0
;
344
assign
dbg_phy_wrcal
[
28
+:
5
] =
'd0
;
345
assign
dbg_phy_wrcal
[
53
:
33
] =
'b0
;
346
assign
dbg_phy_wrcal
[
54
] =
1'b0
;
347
assign
dbg_phy_wrcal
[
55
+:
5
] =
'd0
;
348
assign
dbg_phy_wrcal
[
60
] =
1'b0
;
349
assign
dbg_phy_wrcal
[
61
+:
5
] =
'd0
;
350
assign
dbg_phy_wrcal
[
66
+:
5
] =
not_empty_wait_cnt
;
351
assign
dbg_phy_wrcal
[
71
] =
early1_data
;
352
assign
dbg_phy_wrcal
[
72
] =
early2_data
;
353
354
assign
dqsfound_retry
=
1'b0
;
355
assign
wrcal_read_req
=
1'b0
;
356
assign
phy_if_reset
=
cal2_if_reset
;
357
358
//**************************************************************************
359
// DQS count to hard PHY during write calibration using Phaser_OUT Stage2
360
// coarse delay
361
//**************************************************************************
362
363
always
@(
posedge
clk
)
begin
364
po_stg2_wrcal_cnt
<= #TCQ
wrcal_dqs_cnt_r
;
365
wrlvl_byte_done_r
<= #TCQ
wrlvl_byte_done
;
366
wrcal_sanity_chk_r
<= #TCQ
wrcal_sanity_chk
;
367
end
368
369
//***************************************************************************
370
// Data mux to route appropriate byte to calibration logic - i.e. calibration
371
// is done sequentially, one byte (or DQS group) at a time
372
//***************************************************************************
373
374
generate
375
if
(
nCK_PER_CLK
==
4
)
begin
:
gen_rd_data_div4
376
assign
rd_data_rise0
=
rd_data
[
DQ_WIDTH
-
1
:
0
];
377
assign
rd_data_fall0
=
rd_data
[
2
*
DQ_WIDTH
-
1
:
DQ_WIDTH
];
378
assign
rd_data_rise1
=
rd_data
[
3
*
DQ_WIDTH
-
1
:
2
*
DQ_WIDTH
];
379
assign
rd_data_fall1
=
rd_data
[
4
*
DQ_WIDTH
-
1
:
3
*
DQ_WIDTH
];
380
assign
rd_data_rise2
=
rd_data
[
5
*
DQ_WIDTH
-
1
:
4
*
DQ_WIDTH
];
381
assign
rd_data_fall2
=
rd_data
[
6
*
DQ_WIDTH
-
1
:
5
*
DQ_WIDTH
];
382
assign
rd_data_rise3
=
rd_data
[
7
*
DQ_WIDTH
-
1
:
6
*
DQ_WIDTH
];
383
assign
rd_data_fall3
=
rd_data
[
8
*
DQ_WIDTH
-
1
:
7
*
DQ_WIDTH
];
384
end
else
if
(
nCK_PER_CLK
==
2
)
begin
:
gen_rd_data_div2
385
assign
rd_data_rise0
=
rd_data
[
DQ_WIDTH
-
1
:
0
];
386
assign
rd_data_fall0
=
rd_data
[
2
*
DQ_WIDTH
-
1
:
DQ_WIDTH
];
387
assign
rd_data_rise1
=
rd_data
[
3
*
DQ_WIDTH
-
1
:
2
*
DQ_WIDTH
];
388
assign
rd_data_fall1
=
rd_data
[
4
*
DQ_WIDTH
-
1
:
3
*
DQ_WIDTH
];
389
end
390
endgenerate
391
392
//**************************************************************************
393
// Final Phaser OUT coarse and fine delay taps after write calibration
394
// Sum of taps used during write leveling taps and write calibration
395
//**************************************************************************
396
397
always
@(*)
begin
398
for
(
m
=
0
;
m
<
DQS_WIDTH
;
m
=
m
+
1
)
begin
399
wl_po_coarse_cnt_w
[
m
] =
wl_po_coarse_cnt
[
3
*
m
+:
3
];
400
wl_po_fine_cnt_w
[
m
] =
wl_po_fine_cnt
[
6
*
m
+:
6
];
401
end
402
end
403
404
always
@(
posedge
clk
)
begin
405
if
(
rst
)
begin
406
for
(
p
=
0
;
p
<
DQS_WIDTH
;
p
=
p
+
1
)
begin
407
po_coarse_tap_cnt
[
p
] <= #TCQ {
3
{
1'b0
}};
408
po_fine_tap_cnt
[
p
] <= #TCQ {
6
{
1'b0
}};
409
end
410
end
else
if
(
cal2_done_r
&& ~
cal2_done_r1
)
begin
411
for
(
q
=
0
;
q
<
DQS_WIDTH
;
q
=
q
+
1
)
begin
412
po_coarse_tap_cnt
[
q
] <= #TCQ
wl_po_coarse_cnt_w
[
i
];
413
po_fine_tap_cnt
[
q
] <= #TCQ
wl_po_fine_cnt_w
[
i
];
414
end
415
end
416
end
417
418
always
@(
posedge
clk
)
begin
419
rd_mux_sel_r
<= #TCQ
wrcal_dqs_cnt_r
;
420
end
421
422
// Register outputs for improved timing.
423
// NOTE: Will need to change when per-bit DQ deskew is supported.
424
// Currenly all bits in DQS group are checked in aggregate
425
generate
426
genvar
mux_i
;
427
if
(
nCK_PER_CLK
==
4
)
begin
:
gen_mux_rd_div4
428
for
(
mux_i
=
0
;
mux_i
<
DRAM_WIDTH
;
mux_i
=
mux_i
+
1
)
begin
:
gen_mux_rd
429
always
@(
posedge
clk
)
begin
430
mux_rd_rise0_r
[
mux_i
] <= #TCQ
rd_data_rise0
[
DRAM_WIDTH
*
rd_mux_sel_r
+
mux_i
];
431
mux_rd_fall0_r
[
mux_i
] <= #TCQ
rd_data_fall0
[
DRAM_WIDTH
*
rd_mux_sel_r
+
mux_i
];
432
mux_rd_rise1_r
[
mux_i
] <= #TCQ
rd_data_rise1
[
DRAM_WIDTH
*
rd_mux_sel_r
+
mux_i
];
433
mux_rd_fall1_r
[
mux_i
] <= #TCQ
rd_data_fall1
[
DRAM_WIDTH
*
rd_mux_sel_r
+
mux_i
];
434
mux_rd_rise2_r
[
mux_i
] <= #TCQ
rd_data_rise2
[
DRAM_WIDTH
*
rd_mux_sel_r
+
mux_i
];
435
mux_rd_fall2_r
[
mux_i
] <= #TCQ
rd_data_fall2
[
DRAM_WIDTH
*
rd_mux_sel_r
+
mux_i
];
436
mux_rd_rise3_r
[
mux_i
] <= #TCQ
rd_data_rise3
[
DRAM_WIDTH
*
rd_mux_sel_r
+
mux_i
];
437
mux_rd_fall3_r
[
mux_i
] <= #TCQ
rd_data_fall3
[
DRAM_WIDTH
*
rd_mux_sel_r
+
mux_i
];
438
end
439
end
440
end
else
if
(
nCK_PER_CLK
==
2
)
begin
:
gen_mux_rd_div2
441
for
(
mux_i
=
0
;
mux_i
<
DRAM_WIDTH
;
mux_i
=
mux_i
+
1
)
begin
:
gen_mux_rd
442
always
@(
posedge
clk
)
begin
443
mux_rd_rise0_r
[
mux_i
] <= #TCQ
rd_data_rise0
[
DRAM_WIDTH
*
rd_mux_sel_r
+
mux_i
];
444
mux_rd_fall0_r
[
mux_i
] <= #TCQ
rd_data_fall0
[
DRAM_WIDTH
*
rd_mux_sel_r
+
mux_i
];
445
mux_rd_rise1_r
[
mux_i
] <= #TCQ
rd_data_rise1
[
DRAM_WIDTH
*
rd_mux_sel_r
+
mux_i
];
446
mux_rd_fall1_r
[
mux_i
] <= #TCQ
rd_data_fall1
[
DRAM_WIDTH
*
rd_mux_sel_r
+
mux_i
];
447
end
448
end
449
end
450
endgenerate
451
452
//***************************************************************************
453
// generate request to PHY_INIT logic to issue precharged. Required when
454
// calibration can take a long time (during which there are only constant
455
// reads present on this bus). In this case need to issue perioidic
456
// precharges to avoid tRAS violation. This signal must meet the following
457
// requirements: (1) only transition from 0->1 when prech is first needed,
458
// (2) stay at 1 and only transition 1->0 when RDLVL_PRECH_DONE asserted
459
//***************************************************************************
460
461
always
@(
posedge
clk
)
462
if
(
rst
)
463
wrcal_prech_req
<= #TCQ
1'b0
;
464
else
465
// Combine requests from all stages here
466
wrcal_prech_req
<= #TCQ
cal2_prech_req_r
;
467
468
//***************************************************************************
469
// Shift register to store last RDDATA_SHIFT_LEN cycles of data from ISERDES
470
// NOTE: Written using discrete flops, but SRL can be used if the matching
471
// logic does the comparison sequentially, rather than parallel
472
//***************************************************************************
473
474
generate
475
genvar
rd_i
;
476
if
(
nCK_PER_CLK
==
4
)
begin
:
gen_sr_div4
477
for
(
rd_i
=
0
;
rd_i
<
DRAM_WIDTH
;
rd_i
=
rd_i
+
1
)
begin
:
gen_sr
478
always
@(
posedge
clk
)
begin
479
sr_rise0_r
[
rd_i
] <= #TCQ
mux_rd_rise0_r
[
rd_i
];
480
sr_fall0_r
[
rd_i
] <= #TCQ
mux_rd_fall0_r
[
rd_i
];
481
sr_rise1_r
[
rd_i
] <= #TCQ
mux_rd_rise1_r
[
rd_i
];
482
sr_fall1_r
[
rd_i
] <= #TCQ
mux_rd_fall1_r
[
rd_i
];
483
sr_rise2_r
[
rd_i
] <= #TCQ
mux_rd_rise2_r
[
rd_i
];
484
sr_fall2_r
[
rd_i
] <= #TCQ
mux_rd_fall2_r
[
rd_i
];
485
sr_rise3_r
[
rd_i
] <= #TCQ
mux_rd_rise3_r
[
rd_i
];
486
sr_fall3_r
[
rd_i
] <= #TCQ
mux_rd_fall3_r
[
rd_i
];
487
end
488
end
489
end
else
if
(
nCK_PER_CLK
==
2
)
begin
:
gen_sr_div2
490
for
(
rd_i
=
0
;
rd_i
<
DRAM_WIDTH
;
rd_i
=
rd_i
+
1
)
begin
:
gen_sr
491
always
@(
posedge
clk
)
begin
492
sr_rise0_r
[
rd_i
] <= #TCQ
mux_rd_rise0_r
[
rd_i
];
493
sr_fall0_r
[
rd_i
] <= #TCQ
mux_rd_fall0_r
[
rd_i
];
494
sr_rise1_r
[
rd_i
] <= #TCQ
mux_rd_rise1_r
[
rd_i
];
495
sr_fall1_r
[
rd_i
] <= #TCQ
mux_rd_fall1_r
[
rd_i
];
496
end
497
end
498
end
499
endgenerate
500
501
//***************************************************************************
502
// Write calibration:
503
// During write leveling DQS is aligned to the nearest CK edge that may not
504
// be the correct CK edge. Write calibration is required to align the DQS to
505
// the correct CK edge that clocks the write command.
506
// The Phaser_Out coarse delay line is adjusted if required to add a memory
507
// clock cycle of delay in order to read back the expected pattern.
508
//***************************************************************************
509
510
always
@(
posedge
clk
)
begin
511
rd_active_r
<= #TCQ
phy_rddata_en
;
512
rd_active_r1
<= #TCQ
rd_active_r
;
513
rd_active_r2
<= #TCQ
rd_active_r1
;
514
rd_active_r3
<= #TCQ
rd_active_r2
;
515
rd_active_r4
<= #TCQ
rd_active_r3
;
516
rd_active_r5
<= #TCQ
rd_active_r4
;
517
end
518
519
//*****************************************************************
520
// Expected data pattern when properly received by read capture
521
// logic:
522
// Based on pattern of ({rise,fall}) =
523
// 0xF, 0x0, 0xA, 0x5, 0x5, 0xA, 0x9, 0x6
524
// Each nibble will look like:
525
// bit3: 1, 0, 1, 0, 0, 1, 1, 0
526
// bit2: 1, 0, 0, 1, 1, 0, 0, 1
527
// bit1: 1, 0, 1, 0, 0, 1, 0, 1
528
// bit0: 1, 0, 0, 1, 1, 0, 1, 0
529
// Change the hard-coded pattern below accordingly as RD_SHIFT_LEN
530
// and the actual training pattern contents change
531
//*****************************************************************
532
533
generate
534
if
(
nCK_PER_CLK
==
4
)
begin
:
gen_pat_div4
535
// FF00AA5555AA9966
536
assign
pat_rise0
[
3
] =
1'b1
;
537
assign
pat_fall0
[
3
] =
1'b0
;
538
assign
pat_rise1
[
3
] =
1'b1
;
539
assign
pat_fall1
[
3
] =
1'b0
;
540
assign
pat_rise2
[
3
] =
1'b0
;
541
assign
pat_fall2
[
3
] =
1'b1
;
542
assign
pat_rise3
[
3
] =
1'b1
;
543
assign
pat_fall3
[
3
] =
1'b0
;
544
545
assign
pat_rise0
[
2
] =
1'b1
;
546
assign
pat_fall0
[
2
] =
1'b0
;
547
assign
pat_rise1
[
2
] =
1'b0
;
548
assign
pat_fall1
[
2
] =
1'b1
;
549
assign
pat_rise2
[
2
] =
1'b1
;
550
assign
pat_fall2
[
2
] =
1'b0
;
551
assign
pat_rise3
[
2
] =
1'b0
;
552
assign
pat_fall3
[
2
] =
1'b1
;
553
554
assign
pat_rise0
[
1
] =
1'b1
;
555
assign
pat_fall0
[
1
] =
1'b0
;
556
assign
pat_rise1
[
1
] =
1'b1
;
557
assign
pat_fall1
[
1
] =
1'b0
;
558
assign
pat_rise2
[
1
] =
1'b0
;
559
assign
pat_fall2
[
1
] =
1'b1
;
560
assign
pat_rise3
[
1
] =
1'b0
;
561
assign
pat_fall3
[
1
] =
1'b1
;
562
563
assign
pat_rise0
[
0
] =
1'b1
;
564
assign
pat_fall0
[
0
] =
1'b0
;
565
assign
pat_rise1
[
0
] =
1'b0
;
566
assign
pat_fall1
[
0
] =
1'b1
;
567
assign
pat_rise2
[
0
] =
1'b1
;
568
assign
pat_fall2
[
0
] =
1'b0
;
569
assign
pat_rise3
[
0
] =
1'b1
;
570
assign
pat_fall3
[
0
] =
1'b0
;
571
572
// Pattern to distinguish between early write and incorrect read
573
// BB11EE4444EEDD88
574
assign
early_rise0
[
3
] =
1'b1
;
575
assign
early_fall0
[
3
] =
1'b0
;
576
assign
early_rise1
[
3
] =
1'b1
;
577
assign
early_fall1
[
3
] =
1'b0
;
578
assign
early_rise2
[
3
] =
1'b0
;
579
assign
early_fall2
[
3
] =
1'b1
;
580
assign
early_rise3
[
3
] =
1'b1
;
581
assign
early_fall3
[
3
] =
1'b1
;
582
583
assign
early_rise0
[
2
] =
1'b0
;
584
assign
early_fall0
[
2
] =
1'b0
;
585
assign
early_rise1
[
2
] =
1'b1
;
586
assign
early_fall1
[
2
] =
1'b1
;
587
assign
early_rise2
[
2
] =
1'b1
;
588
assign
early_fall2
[
2
] =
1'b1
;
589
assign
early_rise3
[
2
] =
1'b1
;
590
assign
early_fall3
[
2
] =
1'b0
;
591
592
assign
early_rise0
[
1
] =
1'b1
;
593
assign
early_fall0
[
1
] =
1'b0
;
594
assign
early_rise1
[
1
] =
1'b1
;
595
assign
early_fall1
[
1
] =
1'b0
;
596
assign
early_rise2
[
1
] =
1'b0
;
597
assign
early_fall2
[
1
] =
1'b1
;
598
assign
early_rise3
[
1
] =
1'b0
;
599
assign
early_fall3
[
1
] =
1'b0
;
600
601
assign
early_rise0
[
0
] =
1'b1
;
602
assign
early_fall0
[
0
] =
1'b1
;
603
assign
early_rise1
[
0
] =
1'b0
;
604
assign
early_fall1
[
0
] =
1'b0
;
605
assign
early_rise2
[
0
] =
1'b0
;
606
assign
early_fall2
[
0
] =
1'b0
;
607
assign
early_rise3
[
0
] =
1'b1
;
608
assign
early_fall3
[
0
] =
1'b0
;
609
610
end
else
if
(
nCK_PER_CLK
==
2
)
begin
:
gen_pat_div2
611
// First cycle pattern FF00AA55
612
assign
pat1_rise0
[
3
] =
1'b1
;
613
assign
pat1_fall0
[
3
] =
1'b0
;
614
assign
pat1_rise1
[
3
] =
1'b1
;
615
assign
pat1_fall1
[
3
] =
1'b0
;
616
617
assign
pat1_rise0
[
2
] =
1'b1
;
618
assign
pat1_fall0
[
2
] =
1'b0
;
619
assign
pat1_rise1
[
2
] =
1'b0
;
620
assign
pat1_fall1
[
2
] =
1'b1
;
621
622
assign
pat1_rise0
[
1
] =
1'b1
;
623
assign
pat1_fall0
[
1
] =
1'b0
;
624
assign
pat1_rise1
[
1
] =
1'b1
;
625
assign
pat1_fall1
[
1
] =
1'b0
;
626
627
assign
pat1_rise0
[
0
] =
1'b1
;
628
assign
pat1_fall0
[
0
] =
1'b0
;
629
assign
pat1_rise1
[
0
] =
1'b0
;
630
assign
pat1_fall1
[
0
] =
1'b1
;
631
632
// Second cycle pattern 55AA9966
633
assign
pat2_rise0
[
3
] =
1'b0
;
634
assign
pat2_fall0
[
3
] =
1'b1
;
635
assign
pat2_rise1
[
3
] =
1'b1
;
636
assign
pat2_fall1
[
3
] =
1'b0
;
637
638
assign
pat2_rise0
[
2
] =
1'b1
;
639
assign
pat2_fall0
[
2
] =
1'b0
;
640
assign
pat2_rise1
[
2
] =
1'b0
;
641
assign
pat2_fall1
[
2
] =
1'b1
;
642
643
assign
pat2_rise0
[
1
] =
1'b0
;
644
assign
pat2_fall0
[
1
] =
1'b1
;
645
assign
pat2_rise1
[
1
] =
1'b0
;
646
assign
pat2_fall1
[
1
] =
1'b1
;
647
648
assign
pat2_rise0
[
0
] =
1'b1
;
649
assign
pat2_fall0
[
0
] =
1'b0
;
650
assign
pat2_rise1
[
0
] =
1'b1
;
651
assign
pat2_fall1
[
0
] =
1'b0
;
652
653
//Pattern to distinguish between early write and incorrect read
654
// First cycle pattern AA5555AA
655
assign
early1_rise0
[
3
] =
2'b1
;
656
assign
early1_fall0
[
3
] =
2'b0
;
657
assign
early1_rise1
[
3
] =
2'b0
;
658
assign
early1_fall1
[
3
] =
2'b1
;
659
660
assign
early1_rise0
[
2
] =
2'b0
;
661
assign
early1_fall0
[
2
] =
2'b1
;
662
assign
early1_rise1
[
2
] =
2'b1
;
663
assign
early1_fall1
[
2
] =
2'b0
;
664
665
assign
early1_rise0
[
1
] =
2'b1
;
666
assign
early1_fall0
[
1
] =
2'b0
;
667
assign
early1_rise1
[
1
] =
2'b0
;
668
assign
early1_fall1
[
1
] =
2'b1
;
669
670
assign
early1_rise0
[
0
] =
2'b0
;
671
assign
early1_fall0
[
0
] =
2'b1
;
672
assign
early1_rise1
[
0
] =
2'b1
;
673
assign
early1_fall1
[
0
] =
2'b0
;
674
675
// Second cycle pattern 9966BB11
676
assign
early2_rise0
[
3
] =
2'b1
;
677
assign
early2_fall0
[
3
] =
2'b0
;
678
assign
early2_rise1
[
3
] =
2'b1
;
679
assign
early2_fall1
[
3
] =
2'b0
;
680
681
assign
early2_rise0
[
2
] =
2'b0
;
682
assign
early2_fall0
[
2
] =
2'b1
;
683
assign
early2_rise1
[
2
] =
2'b0
;
684
assign
early2_fall1
[
2
] =
2'b0
;
685
686
assign
early2_rise0
[
1
] =
2'b0
;
687
assign
early2_fall0
[
1
] =
2'b1
;
688
assign
early2_rise1
[
1
] =
2'b1
;
689
assign
early2_fall1
[
1
] =
2'b0
;
690
691
assign
early2_rise0
[
0
] =
2'b1
;
692
assign
early2_fall0
[
0
] =
2'b0
;
693
assign
early2_rise1
[
0
] =
2'b1
;
694
assign
early2_fall1
[
0
] =
2'b1
;
695
end
696
endgenerate
697
698
// Each bit of each byte is compared to expected pattern.
699
// This was done to prevent (and "drastically decrease") the chance that
700
// invalid data clocked in when the DQ bus is tri-state (along with a
701
// combination of the correct data) will resemble the expected data
702
// pattern. A better fix for this is to change the training pattern and/or
703
// make the pattern longer.
704
generate
705
genvar
pt_i
;
706
if
(
nCK_PER_CLK
==
4
)
begin
:
gen_pat_match_div4
707
for
(
pt_i
=
0
;
pt_i
<
DRAM_WIDTH
;
pt_i
=
pt_i
+
1
)
begin
:
gen_pat_match
708
always
@(
posedge
clk
)
begin
709
if
(
sr_rise0_r
[
pt_i
] ==
pat_rise0
[
pt_i
%
4
])
710
pat_match_rise0_r
[
pt_i
] <= #TCQ
1'b1
;
711
else
712
pat_match_rise0_r
[
pt_i
] <= #TCQ
1'b0
;
713
714
if
(
sr_fall0_r
[
pt_i
] ==
pat_fall0
[
pt_i
%
4
])
715
pat_match_fall0_r
[
pt_i
] <= #TCQ
1'b1
;
716
else
717
pat_match_fall0_r
[
pt_i
] <= #TCQ
1'b0
;
718
719
if
(
sr_rise1_r
[
pt_i
] ==
pat_rise1
[
pt_i
%
4
])
720
pat_match_rise1_r
[
pt_i
] <= #TCQ
1'b1
;
721
else
722
pat_match_rise1_r
[
pt_i
] <= #TCQ
1'b0
;
723
724
if
(
sr_fall1_r
[
pt_i
] ==
pat_fall1
[
pt_i
%
4
])
725
pat_match_fall1_r
[
pt_i
] <= #TCQ
1'b1
;
726
else
727
pat_match_fall1_r
[
pt_i
] <= #TCQ
1'b0
;
728
729
if
(
sr_rise2_r
[
pt_i
] ==
pat_rise2
[
pt_i
%
4
])
730
pat_match_rise2_r
[
pt_i
] <= #TCQ
1'b1
;
731
else
732
pat_match_rise2_r
[
pt_i
] <= #TCQ
1'b0
;
733
734
if
(
sr_fall2_r
[
pt_i
] ==
pat_fall2
[
pt_i
%
4
])
735
pat_match_fall2_r
[
pt_i
] <= #TCQ
1'b1
;
736
else
737
pat_match_fall2_r
[
pt_i
] <= #TCQ
1'b0
;
738
739
if
(
sr_rise3_r
[
pt_i
] ==
pat_rise3
[
pt_i
%
4
])
740
pat_match_rise3_r
[
pt_i
] <= #TCQ
1'b1
;
741
else
742
pat_match_rise3_r
[
pt_i
] <= #TCQ
1'b0
;
743
744
if
(
sr_fall3_r
[
pt_i
] ==
pat_fall3
[
pt_i
%
4
])
745
pat_match_fall3_r
[
pt_i
] <= #TCQ
1'b1
;
746
else
747
pat_match_fall3_r
[
pt_i
] <= #TCQ
1'b0
;
748
end
749
750
always
@(
posedge
clk
)
begin
751
if
(
sr_rise0_r
[
pt_i
] ==
pat_rise1
[
pt_i
%
4
])
752
early1_match_rise0_r
[
pt_i
] <= #TCQ
1'b1
;
753
else
754
early1_match_rise0_r
[
pt_i
] <= #TCQ
1'b0
;
755
756
if
(
sr_fall0_r
[
pt_i
] ==
pat_fall1
[
pt_i
%
4
])
757
early1_match_fall0_r
[
pt_i
] <= #TCQ
1'b1
;
758
else
759
early1_match_fall0_r
[
pt_i
] <= #TCQ
1'b0
;
760
761
if
(
sr_rise1_r
[
pt_i
] ==
pat_rise2
[
pt_i
%
4
])
762
early1_match_rise1_r
[
pt_i
] <= #TCQ
1'b1
;
763
else
764
early1_match_rise1_r
[
pt_i
] <= #TCQ
1'b0
;
765
766
if
(
sr_fall1_r
[
pt_i
] ==
pat_fall2
[
pt_i
%
4
])
767
early1_match_fall1_r
[
pt_i
] <= #TCQ
1'b1
;
768
else
769
early1_match_fall1_r
[
pt_i
] <= #TCQ
1'b0
;
770
771
if
(
sr_rise2_r
[
pt_i
] ==
pat_rise3
[
pt_i
%
4
])
772
early1_match_rise2_r
[
pt_i
] <= #TCQ
1'b1
;
773
else
774
early1_match_rise2_r
[
pt_i
] <= #TCQ
1'b0
;
775
776
if
(
sr_fall2_r
[
pt_i
] ==
pat_fall3
[
pt_i
%
4
])
777
early1_match_fall2_r
[
pt_i
] <= #TCQ
1'b1
;
778
else
779
early1_match_fall2_r
[
pt_i
] <= #TCQ
1'b0
;
780
781
if
(
sr_rise3_r
[
pt_i
] ==
early_rise0
[
pt_i
%
4
])
782
early1_match_rise3_r
[
pt_i
] <= #TCQ
1'b1
;
783
else
784
early1_match_rise3_r
[
pt_i
] <= #TCQ
1'b0
;
785
786
if
(
sr_fall3_r
[
pt_i
] ==
early_fall0
[
pt_i
%
4
])
787
early1_match_fall3_r
[
pt_i
] <= #TCQ
1'b1
;
788
else
789
early1_match_fall3_r
[
pt_i
] <= #TCQ
1'b0
;
790
end
791
792
always
@(
posedge
clk
)
begin
793
if
(
sr_rise0_r
[
pt_i
] ==
pat_rise2
[
pt_i
%
4
])
794
early2_match_rise0_r
[
pt_i
] <= #TCQ
1'b1
;
795
else
796
early2_match_rise0_r
[
pt_i
] <= #TCQ
1'b0
;
797
798
if
(
sr_fall0_r
[
pt_i
] ==
pat_fall2
[
pt_i
%
4
])
799
early2_match_fall0_r
[
pt_i
] <= #TCQ
1'b1
;
800
else
801
early2_match_fall0_r
[
pt_i
] <= #TCQ
1'b0
;
802
803
if
(
sr_rise1_r
[
pt_i
] ==
pat_rise3
[
pt_i
%
4
])
804
early2_match_rise1_r
[
pt_i
] <= #TCQ
1'b1
;
805
else
806
early2_match_rise1_r
[
pt_i
] <= #TCQ
1'b0
;
807
808
if
(
sr_fall1_r
[
pt_i
] ==
pat_fall3
[
pt_i
%
4
])
809
early2_match_fall1_r
[
pt_i
] <= #TCQ
1'b1
;
810
else
811
early2_match_fall1_r
[
pt_i
] <= #TCQ
1'b0
;
812
813
if
(
sr_rise2_r
[
pt_i
] ==
early_rise0
[
pt_i
%
4
])
814
early2_match_rise2_r
[
pt_i
] <= #TCQ
1'b1
;
815
else
816
early2_match_rise2_r
[
pt_i
] <= #TCQ
1'b0
;
817
818
if
(
sr_fall2_r
[
pt_i
] ==
early_fall0
[
pt_i
%
4
])
819
early2_match_fall2_r
[
pt_i
] <= #TCQ
1'b1
;
820
else
821
early2_match_fall2_r
[
pt_i
] <= #TCQ
1'b0
;
822
823
if
(
sr_rise3_r
[
pt_i
] ==
early_rise1
[
pt_i
%
4
])
824
early2_match_rise3_r
[
pt_i
] <= #TCQ
1'b1
;
825
else
826
early2_match_rise3_r
[
pt_i
] <= #TCQ
1'b0
;
827
828
if
(
sr_fall3_r
[
pt_i
] ==
early_fall1
[
pt_i
%
4
])
829
early2_match_fall3_r
[
pt_i
] <= #TCQ
1'b1
;
830
else
831
early2_match_fall3_r
[
pt_i
] <= #TCQ
1'b0
;
832
end
833
end
834
835
836
always
@(
posedge
clk
)
begin
837
pat_match_rise0_and_r
<= #TCQ &
pat_match_rise0_r
;
838
pat_match_fall0_and_r
<= #TCQ &
pat_match_fall0_r
;
839
pat_match_rise1_and_r
<= #TCQ &
pat_match_rise1_r
;
840
pat_match_fall1_and_r
<= #TCQ &
pat_match_fall1_r
;
841
pat_match_rise2_and_r
<= #TCQ &
pat_match_rise2_r
;
842
pat_match_fall2_and_r
<= #TCQ &
pat_match_fall2_r
;
843
pat_match_rise3_and_r
<= #TCQ &
pat_match_rise3_r
;
844
pat_match_fall3_and_r
<= #TCQ &
pat_match_fall3_r
;
845
pat_data_match_r
<= #TCQ (
pat_match_rise0_and_r
&&
846
pat_match_fall0_and_r
&&
847
pat_match_rise1_and_r
&&
848
pat_match_fall1_and_r
&&
849
pat_match_rise2_and_r
&&
850
pat_match_fall2_and_r
&&
851
pat_match_rise3_and_r
&&
852
pat_match_fall3_and_r
);
853
pat_data_match_valid_r
<= #TCQ
rd_active_r3
;
854
end
855
856
always
@(
posedge
clk
)
begin
857
early1_match_rise0_and_r
<= #TCQ &
early1_match_rise0_r
;
858
early1_match_fall0_and_r
<= #TCQ &
early1_match_fall0_r
;
859
early1_match_rise1_and_r
<= #TCQ &
early1_match_rise1_r
;
860
early1_match_fall1_and_r
<= #TCQ &
early1_match_fall1_r
;
861
early1_match_rise2_and_r
<= #TCQ &
early1_match_rise2_r
;
862
early1_match_fall2_and_r
<= #TCQ &
early1_match_fall2_r
;
863
early1_match_rise3_and_r
<= #TCQ &
early1_match_rise3_r
;
864
early1_match_fall3_and_r
<= #TCQ &
early1_match_fall3_r
;
865
early1_data_match_r
<= #TCQ (
early1_match_rise0_and_r
&&
866
early1_match_fall0_and_r
&&
867
early1_match_rise1_and_r
&&
868
early1_match_fall1_and_r
&&
869
early1_match_rise2_and_r
&&
870
early1_match_fall2_and_r
&&
871
early1_match_rise3_and_r
&&
872
early1_match_fall3_and_r
);
873
end
874
875
always
@(
posedge
clk
)
begin
876
early2_match_rise0_and_r
<= #TCQ &
early2_match_rise0_r
;
877
early2_match_fall0_and_r
<= #TCQ &
early2_match_fall0_r
;
878
early2_match_rise1_and_r
<= #TCQ &
early2_match_rise1_r
;
879
early2_match_fall1_and_r
<= #TCQ &
early2_match_fall1_r
;
880
early2_match_rise2_and_r
<= #TCQ &
early2_match_rise2_r
;
881
early2_match_fall2_and_r
<= #TCQ &
early2_match_fall2_r
;
882
early2_match_rise3_and_r
<= #TCQ &
early2_match_rise3_r
;
883
early2_match_fall3_and_r
<= #TCQ &
early2_match_fall3_r
;
884
early2_data_match_r
<= #TCQ (
early2_match_rise0_and_r
&&
885
early2_match_fall0_and_r
&&
886
early2_match_rise1_and_r
&&
887
early2_match_fall1_and_r
&&
888
early2_match_rise2_and_r
&&
889
early2_match_fall2_and_r
&&
890
early2_match_rise3_and_r
&&
891
early2_match_fall3_and_r
);
892
end
893
894
end
else
if
(
nCK_PER_CLK
==
2
)
begin
:
gen_pat_match_div2
895
896
for
(
pt_i
=
0
;
pt_i
<
DRAM_WIDTH
;
pt_i
=
pt_i
+
1
)
begin
:
gen_pat_match
897
always
@(
posedge
clk
)
begin
898
if
(
sr_rise0_r
[
pt_i
] ==
pat1_rise0
[
pt_i
%
4
])
899
pat1_match_rise0_r
[
pt_i
] <= #TCQ
1'b1
;
900
else
901
pat1_match_rise0_r
[
pt_i
] <= #TCQ
1'b0
;
902
903
if
(
sr_fall0_r
[
pt_i
] ==
pat1_fall0
[
pt_i
%
4
])
904
pat1_match_fall0_r
[
pt_i
] <= #TCQ
1'b1
;
905
else
906
pat1_match_fall0_r
[
pt_i
] <= #TCQ
1'b0
;
907
908
if
(
sr_rise1_r
[
pt_i
] ==
pat1_rise1
[
pt_i
%
4
])
909
pat1_match_rise1_r
[
pt_i
] <= #TCQ
1'b1
;
910
else
911
pat1_match_rise1_r
[
pt_i
] <= #TCQ
1'b0
;
912
913
if
(
sr_fall1_r
[
pt_i
] ==
pat1_fall1
[
pt_i
%
4
])
914
pat1_match_fall1_r
[
pt_i
] <= #TCQ
1'b1
;
915
else
916
pat1_match_fall1_r
[
pt_i
] <= #TCQ
1'b0
;
917
end
918
919
always
@(
posedge
clk
)
begin
920
if
(
sr_rise0_r
[
pt_i
] ==
pat2_rise0
[
pt_i
%
4
])
921
pat2_match_rise0_r
[
pt_i
] <= #TCQ
1'b1
;
922
else
923
pat2_match_rise0_r
[
pt_i
] <= #TCQ
1'b0
;
924
925
if
(
sr_fall0_r
[
pt_i
] ==
pat2_fall0
[
pt_i
%
4
])
926
pat2_match_fall0_r
[
pt_i
] <= #TCQ
1'b1
;
927
else
928
pat2_match_fall0_r
[
pt_i
] <= #TCQ
1'b0
;
929
930
if
(
sr_rise1_r
[
pt_i
] ==
pat2_rise1
[
pt_i
%
4
])
931
pat2_match_rise1_r
[
pt_i
] <= #TCQ
1'b1
;
932
else
933
pat2_match_rise1_r
[
pt_i
] <= #TCQ
1'b0
;
934
935
if
(
sr_fall1_r
[
pt_i
] ==
pat2_fall1
[
pt_i
%
4
])
936
pat2_match_fall1_r
[
pt_i
] <= #TCQ
1'b1
;
937
else
938
pat2_match_fall1_r
[
pt_i
] <= #TCQ
1'b0
;
939
end
940
941
always
@(
posedge
clk
)
begin
942
if
(
sr_rise0_r
[
pt_i
] ==
early1_rise0
[
pt_i
%
4
])
943
early1_match_rise0_r
[
pt_i
] <= #TCQ
1'b1
;
944
else
945
early1_match_rise0_r
[
pt_i
] <= #TCQ
1'b0
;
946
947
if
(
sr_fall0_r
[
pt_i
] ==
early1_fall0
[
pt_i
%
4
])
948
early1_match_fall0_r
[
pt_i
] <= #TCQ
1'b1
;
949
else
950
early1_match_fall0_r
[
pt_i
] <= #TCQ
1'b0
;
951
952
if
(
sr_rise1_r
[
pt_i
] ==
early1_rise1
[
pt_i
%
4
])
953
early1_match_rise1_r
[
pt_i
] <= #TCQ
1'b1
;
954
else
955
early1_match_rise1_r
[
pt_i
] <= #TCQ
1'b0
;
956
957
if
(
sr_fall1_r
[
pt_i
] ==
early1_fall1
[
pt_i
%
4
])
958
early1_match_fall1_r
[
pt_i
] <= #TCQ
1'b1
;
959
else
960
early1_match_fall1_r
[
pt_i
] <= #TCQ
1'b0
;
961
end
962
963
// early2 in this case does not mean 2 cycles early but
964
// the second cycle of read data in 2:1 mode
965
always
@(
posedge
clk
)
begin
966
if
(
sr_rise0_r
[
pt_i
] ==
early2_rise0
[
pt_i
%
4
])
967
early2_match_rise0_r
[
pt_i
] <= #TCQ
1'b1
;
968
else
969
early2_match_rise0_r
[
pt_i
] <= #TCQ
1'b0
;
970
971
if
(
sr_fall0_r
[
pt_i
] ==
early2_fall0
[
pt_i
%
4
])
972
early2_match_fall0_r
[
pt_i
] <= #TCQ
1'b1
;
973
else
974
early2_match_fall0_r
[
pt_i
] <= #TCQ
1'b0
;
975
976
if
(
sr_rise1_r
[
pt_i
] ==
early2_rise1
[
pt_i
%
4
])
977
early2_match_rise1_r
[
pt_i
] <= #TCQ
1'b1
;
978
else
979
early2_match_rise1_r
[
pt_i
] <= #TCQ
1'b0
;
980
981
if
(
sr_fall1_r
[
pt_i
] ==
early2_fall1
[
pt_i
%
4
])
982
early2_match_fall1_r
[
pt_i
] <= #TCQ
1'b1
;
983
else
984
early2_match_fall1_r
[
pt_i
] <= #TCQ
1'b0
;
985
end
986
end
987
988
always
@(
posedge
clk
)
begin
989
pat1_match_rise0_and_r
<= #TCQ &
pat1_match_rise0_r
;
990
pat1_match_fall0_and_r
<= #TCQ &
pat1_match_fall0_r
;
991
pat1_match_rise1_and_r
<= #TCQ &
pat1_match_rise1_r
;
992
pat1_match_fall1_and_r
<= #TCQ &
pat1_match_fall1_r
;
993
pat1_data_match_r
<= #TCQ (
pat1_match_rise0_and_r
&&
994
pat1_match_fall0_and_r
&&
995
pat1_match_rise1_and_r
&&
996
pat1_match_fall1_and_r
);
997
pat1_data_match_r1
<= #TCQ
pat1_data_match_r
;
998
999
pat2_match_rise0_and_r
<= #TCQ &
pat2_match_rise0_r
&&
rd_active_r3
;
1000
pat2_match_fall0_and_r
<= #TCQ &
pat2_match_fall0_r
&&
rd_active_r3
;
1001
pat2_match_rise1_and_r
<= #TCQ &
pat2_match_rise1_r
&&
rd_active_r3
;
1002
pat2_match_fall1_and_r
<= #TCQ &
pat2_match_fall1_r
&&
rd_active_r3
;
1003
pat2_data_match_r
<= #TCQ (
pat2_match_rise0_and_r
&&
1004
pat2_match_fall0_and_r
&&
1005
pat2_match_rise1_and_r
&&
1006
pat2_match_fall1_and_r
);
1007
1008
// For 2:1 mode, read valid is asserted for 2 clock cycles -
1009
// here we generate a "match valid" pulse that is only 1 clock
1010
// cycle wide that is simulatenous when the match calculation
1011
// is complete
1012
pat_data_match_valid_r
<= #TCQ
rd_active_r4
& ~
rd_active_r5
;
1013
end
1014
1015
always
@(
posedge
clk
)
begin
1016
early1_match_rise0_and_r
<= #TCQ &
early1_match_rise0_r
;
1017
early1_match_fall0_and_r
<= #TCQ &
early1_match_fall0_r
;
1018
early1_match_rise1_and_r
<= #TCQ &
early1_match_rise1_r
;
1019
early1_match_fall1_and_r
<= #TCQ &
early1_match_fall1_r
;
1020
early1_data_match_r
<= #TCQ (
early1_match_rise0_and_r
&&
1021
early1_match_fall0_and_r
&&
1022
early1_match_rise1_and_r
&&
1023
early1_match_fall1_and_r
);
1024
early1_data_match_r1
<= #TCQ
early1_data_match_r
;
1025
1026
early2_match_rise0_and_r
<= #TCQ &
early2_match_rise0_r
&&
rd_active_r3
;
1027
early2_match_fall0_and_r
<= #TCQ &
early2_match_fall0_r
&&
rd_active_r3
;
1028
early2_match_rise1_and_r
<= #TCQ &
early2_match_rise1_r
&&
rd_active_r3
;
1029
early2_match_fall1_and_r
<= #TCQ &
early2_match_fall1_r
&&
rd_active_r3
;
1030
early2_data_match_r
<= #TCQ (
early2_match_rise0_and_r
&&
1031
early2_match_fall0_and_r
&&
1032
early2_match_rise1_and_r
&&
1033
early2_match_fall1_and_r
);
1034
end
1035
1036
end
1037
endgenerate
1038
1039
// Need to delay it by 3 cycles in order to wait for Phaser_Out
1040
// coarse delay to take effect before issuing a write command
1041
always
@(
posedge
clk
)
begin
1042
wrcal_pat_resume_r1
<= #TCQ
wrcal_pat_resume_r
;
1043
wrcal_pat_resume_r2
<= #TCQ
wrcal_pat_resume_r1
;
1044
wrcal_pat_resume
<= #TCQ
wrcal_pat_resume_r2
;
1045
end
1046
1047
always
@(
posedge
clk
)
begin
1048
if
(
rst
)
1049
tap_inc_wait_cnt
<= #TCQ
'd0
;
1050
else
if
((
cal2_state_r
==
CAL2_DQ_IDEL_DEC
) ||
1051
(
cal2_state_r
==
CAL2_IFIFO_RESET
) ||
1052
(
cal2_state_r
==
CAL2_SANITY_WAIT
))
1053
tap_inc_wait_cnt
<= #TCQ
tap_inc_wait_cnt
+
1
;
1054
else
1055
tap_inc_wait_cnt
<= #TCQ
'd0
;
1056
end
1057
1058
always
@(
posedge
clk
)
begin
1059
if
(
rst
)
1060
not_empty_wait_cnt
<= #TCQ
'd0
;
1061
else
if
((
cal2_state_r
==
CAL2_READ_WAIT
) &&
wrcal_rd_wait
)
1062
not_empty_wait_cnt
<= #TCQ
not_empty_wait_cnt
+
1
;
1063
else
1064
not_empty_wait_cnt
<= #TCQ
'd0
;
1065
end
1066
1067
always
@(
posedge
clk
)
1068
cal2_state_r1
<= #TCQ
cal2_state_r
;
1069
1070
//*****************************************************************
1071
// Write Calibration state machine
1072
//*****************************************************************
1073
1074
// when calibrating, check to see if the expected pattern is received.
1075
// Otherwise delay DQS to align to correct CK edge.
1076
// NOTES:
1077
// 1. An error condition can occur due to two reasons:
1078
// a. If the matching logic does not receive the expected data
1079
// pattern. However, the error may be "recoverable" because
1080
// the write calibration is still in progress. If an error is
1081
// found the write calibration logic delays DQS by an additional
1082
// clock cycle and restarts the pattern detection process.
1083
// By design, if the write path timing is incorrect, the correct
1084
// data pattern will never be detected.
1085
// b. Valid data not found even after incrementing Phaser_Out
1086
// coarse delay line.
1087
1088
1089
always
@(
posedge
clk
)
begin
1090
if
(
rst
)
begin
1091
wrcal_dqs_cnt_r
<= #TCQ
'b0
;
1092
cal2_done_r
<= #TCQ
1'b0
;
1093
cal2_prech_req_r
<= #TCQ
1'b0
;
1094
cal2_state_r
<= #TCQ
CAL2_IDLE
;
1095
wrcal_pat_err
<= #TCQ
1'b0
;
1096
wrcal_pat_resume_r
<= #TCQ
1'b0
;
1097
wrcal_act_req
<= #TCQ
1'b0
;
1098
cal2_if_reset
<= #TCQ
1'b0
;
1099
temp_wrcal_done
<= #TCQ
1'b0
;
1100
wrlvl_byte_redo
<= #TCQ
1'b0
;
1101
early1_data
<= #TCQ
1'b0
;
1102
early2_data
<= #TCQ
1'b0
;
1103
idelay_ld
<= #TCQ
1'b0
;
1104
idelay_ld_done
<= #TCQ
1'b0
;
1105
pat1_detect
<= #TCQ
1'b0
;
1106
early1_detect
<= #TCQ
1'b0
;
1107
wrcal_sanity_chk_done
<= #TCQ
1'b0
;
1108
wrcal_sanity_chk_err
<= #TCQ
1'b0
;
1109
end
else
begin
1110
cal2_prech_req_r
<= #TCQ
1'b0
;
1111
case
(
cal2_state_r
)
1112
CAL2_IDLE
:
begin
1113
wrcal_pat_err
<= #TCQ
1'b0
;
1114
if
(
wrcal_start
)
begin
1115
cal2_if_reset
<= #TCQ
1'b0
;
1116
if
(
SIM_CAL_OPTION
==
"SKIP_CAL"
)
1117
// If skip write calibration, then proceed to end.
1118
cal2_state_r
<= #TCQ
CAL2_DONE
;
1119
else
1120
cal2_state_r
<= #TCQ
CAL2_READ_WAIT
;
1121
end
1122
end
1123
1124
// General wait state to wait for read data to be output by the
1125
// IN_FIFO
1126
CAL2_READ_WAIT
:
begin
1127
wrcal_pat_resume_r
<= #TCQ
1'b0
;
1128
cal2_if_reset
<= #TCQ
1'b0
;
1129
// Wait until read data is received, and pattern matching
1130
// calculation is complete. NOTE: Need to add a timeout here
1131
// in case for some reason data is never received (or rather
1132
// the PHASER_IN and IN_FIFO think they never receives data)
1133
if
(
pat_data_match_valid_r
&& (
nCK_PER_CLK
==
4
))
begin
1134
if
(
pat_data_match_r
)
1135
// If found data match, then move on to next DQS group
1136
cal2_state_r
<= #TCQ
CAL2_NEXT_DQS
;
1137
else
begin
1138
if
(
wrcal_sanity_chk_r
)
1139
cal2_state_r
<= #TCQ
CAL2_ERR
;
1140
// If writes are one or two cycles early then redo
1141
// write leveling for the byte
1142
else
if
(
early1_data_match_r
)
begin
1143
early1_data
<= #TCQ
1'b1
;
1144
early2_data
<= #TCQ
1'b0
;
1145
wrlvl_byte_redo
<= #TCQ
1'b1
;
1146
cal2_state_r
<= #TCQ
CAL2_WRLVL_WAIT
;
1147
end
else
if
(
early2_data_match_r
)
begin
1148
early1_data
<= #TCQ
1'b0
;
1149
early2_data
<= #TCQ
1'b1
;
1150
wrlvl_byte_redo
<= #TCQ
1'b1
;
1151
cal2_state_r
<= #TCQ
CAL2_WRLVL_WAIT
;
1152
// Read late due to incorrect MPR idelay value
1153
// Decrement Idelay to '0'for the current byte
1154
end
else
if
(~
idelay_ld_done
)
begin
1155
cal2_state_r
<= #TCQ
CAL2_DQ_IDEL_DEC
;
1156
idelay_ld
<= #TCQ
1'b1
;
1157
end
else
1158
cal2_state_r
<= #TCQ
CAL2_ERR
;
1159
end
1160
end
else
if
(
pat_data_match_valid_r
&& (
nCK_PER_CLK
==
2
))
begin
1161
if
((
pat1_data_match_r1
&&
pat2_data_match_r
) ||
1162
(
pat1_detect
&&
pat2_data_match_r
))
1163
// If found data match, then move on to next DQS group
1164
cal2_state_r
<= #TCQ
CAL2_NEXT_DQS
;
1165
else
if
(
pat1_data_match_r1
&& ~
pat2_data_match_r
)
begin
1166
cal2_state_r
<= #TCQ
CAL2_READ_WAIT
;
1167
pat1_detect
<= #TCQ
1'b1
;
1168
end
else
begin
1169
// If writes are one or two cycles early then redo
1170
// write leveling for the byte
1171
if
(
wrcal_sanity_chk_r
)
1172
cal2_state_r
<= #TCQ
CAL2_ERR
;
1173
else
if
((
early1_data_match_r1
&&
early2_data_match_r
) ||
1174
(
early1_detect
&&
early2_data_match_r
))
begin
1175
early1_data
<= #TCQ
1'b1
;
1176
early2_data
<= #TCQ
1'b0
;
1177
wrlvl_byte_redo
<= #TCQ
1'b1
;
1178
cal2_state_r
<= #TCQ
CAL2_WRLVL_WAIT
;
1179
end
else
if
(
early1_data_match_r1
&& ~
early2_data_match_r
)
begin
1180
early1_detect
<= #TCQ
1'b1
;
1181
cal2_state_r
<= #TCQ
CAL2_READ_WAIT
;
1182
// Read late due to incorrect MPR idelay value
1183
// Decrement Idelay to '0'for the current byte
1184
end
else
if
(~
idelay_ld_done
)
begin
1185
cal2_state_r
<= #TCQ
CAL2_DQ_IDEL_DEC
;
1186
idelay_ld
<= #TCQ
1'b1
;
1187
end
else
1188
cal2_state_r
<= #TCQ
CAL2_ERR
;
1189
end
1190
end
else
if
(
not_empty_wait_cnt
==
'd31
)
1191
cal2_state_r
<= #TCQ
CAL2_ERR
;
1192
end
1193
1194
CAL2_WRLVL_WAIT
:
begin
1195
early1_detect
<= #TCQ
1'b0
;
1196
if
(
wrlvl_byte_done
&& ~
wrlvl_byte_done_r
)
1197
wrlvl_byte_redo
<= #TCQ
1'b0
;
1198
if
(
wrlvl_byte_done
)
begin
1199
if
(
rd_active_r1
&& ~
rd_active_r
)
begin
1200
cal2_state_r
<= #TCQ
CAL2_IFIFO_RESET
;
1201
cal2_if_reset
<= #TCQ
1'b1
;
1202
early1_data
<= #TCQ
1'b0
;
1203
early2_data
<= #TCQ
1'b0
;
1204
end
1205
end
1206
end
1207
1208
CAL2_DQ_IDEL_DEC
:
begin
1209
if
(
tap_inc_wait_cnt
==
'd4
)
begin
1210
idelay_ld
<= #TCQ
1'b0
;
1211
cal2_state_r
<= #TCQ
CAL2_IFIFO_RESET
;
1212
cal2_if_reset
<= #TCQ
1'b1
;
1213
idelay_ld_done
<= #TCQ
1'b1
;
1214
end
1215
end
1216
1217
CAL2_IFIFO_RESET
:
begin
1218
if
(
tap_inc_wait_cnt
==
'd15
)
begin
1219
cal2_if_reset
<= #TCQ
1'b0
;
1220
if
(
wrcal_sanity_chk_r
)
1221
cal2_state_r
<= #TCQ
CAL2_DONE
;
1222
else
if
(
idelay_ld_done
)
begin
1223
wrcal_pat_resume_r
<= #TCQ
1'b1
;
1224
cal2_state_r
<= #TCQ
CAL2_READ_WAIT
;
1225
end
else
1226
cal2_state_r
<= #TCQ
CAL2_IDLE
;
1227
end
1228
end
1229
1230
// Final processing for current DQS group. Move on to next group
1231
CAL2_NEXT_DQS
:
begin
1232
// At this point, we've just found the correct pattern for the
1233
// current DQS group.
1234
1235
// Request bank/row precharge, and wait for its completion. Always
1236
// precharge after each DQS group to avoid tRAS(max) violation
1237
if
(
wrcal_sanity_chk_r
&& (
wrcal_dqs_cnt_r
!=
DQS_WIDTH
-
1
))
begin
1238
cal2_prech_req_r
<= #TCQ
1'b0
;
1239
wrcal_dqs_cnt_r
<= #TCQ
wrcal_dqs_cnt_r
+
1
;
1240
cal2_state_r
<= #TCQ
CAL2_SANITY_WAIT
;
1241
end
else
1242
cal2_prech_req_r
<= #TCQ
1'b1
;
1243
idelay_ld_done
<= #TCQ
1'b0
;
1244
pat1_detect
<= #TCQ
1'b0
;
1245
if
(
prech_done
)
1246
if
(((
DQS_WIDTH
==
1
) || (
SIM_CAL_OPTION
==
"FAST_CAL"
)) ||
1247
(
wrcal_dqs_cnt_r
==
DQS_WIDTH
-
1
))
begin
1248
// If either FAST_CAL is enabled and first DQS group is
1249
// finished, or if the last DQS group was just finished,
1250
// then end of write calibration
1251
if
(
wrcal_sanity_chk_r
)
begin
1252
cal2_if_reset
<= #TCQ
1'b1
;
1253
cal2_state_r
<= #TCQ
CAL2_IFIFO_RESET
;
1254
end
else
1255
cal2_state_r
<= #TCQ
CAL2_DONE
;
1256
end
else
begin
1257
// Continue to next DQS group
1258
wrcal_dqs_cnt_r
<= #TCQ
wrcal_dqs_cnt_r
+
1
;
1259
cal2_state_r
<= #TCQ
CAL2_READ_WAIT
;
1260
end
1261
end
1262
1263
CAL2_SANITY_WAIT
:
begin
1264
if
(
tap_inc_wait_cnt
==
'd15
)
begin
1265
cal2_state_r
<= #TCQ
CAL2_READ_WAIT
;
1266
wrcal_pat_resume_r
<= #TCQ
1'b1
;
1267
end
1268
end
1269
1270
// Finished with read enable calibration
1271
CAL2_DONE
:
begin
1272
if
(
wrcal_sanity_chk
&& ~
wrcal_sanity_chk_r
)
begin
1273
cal2_done_r
<= #TCQ
1'b0
;
1274
wrcal_dqs_cnt_r
<= #TCQ
'd0
;
1275
cal2_state_r
<= #TCQ
CAL2_IDLE
;
1276
end
else
1277
cal2_done_r
<= #TCQ
1'b1
;
1278
cal2_prech_req_r
<= #TCQ
1'b0
;
1279
cal2_if_reset
<= #TCQ
1'b0
;
1280
if
(
wrcal_sanity_chk_r
)
1281
wrcal_sanity_chk_done
<= #TCQ
1'b1
;
1282
end
1283
1284
// Assert error signal indicating that writes timing is incorrect
1285
CAL2_ERR
:
begin
1286
wrcal_pat_resume_r
<= #TCQ
1'b0
;
1287
if
(
wrcal_sanity_chk_r
)
1288
wrcal_sanity_chk_err
<= #TCQ
1'b1
;
1289
else
1290
wrcal_pat_err
<= #TCQ
1'b1
;
1291
cal2_state_r
<= #TCQ
CAL2_ERR
;
1292
end
1293
endcase
1294
end
1295
end
1296
1297
// Delay assertion of wrcal_done for write calibration by a few cycles after
1298
// we've reached CAL2_DONE
1299
always
@(
posedge
clk
)
1300
if
(
rst
)
1301
cal2_done_r1
<= #TCQ
1'b0
;
1302
else
1303
cal2_done_r1
<= #TCQ
cal2_done_r
;
1304
1305
always
@(
posedge
clk
)
1306
if
(
rst
|| (
wrcal_sanity_chk
&& ~
wrcal_sanity_chk_r
))
1307
wrcal_done
<= #TCQ
1'b0
;
1308
else
if
(
cal2_done_r
)
1309
wrcal_done
<= #TCQ
1'b1
;
1310
1311
endmodule
Generated on Wed Apr 18 2018 10:55:28 for AMC13 by
1.8.1