Line Coverage for Module :
prim_ram_1p_scr
| Line No. | Total | Covered | Percent |
TOTAL | | 53 | 52 | 98.11 |
CONT_ASSIGN | 133 | 1 | 1 | 100.00 |
CONT_ASSIGN | 135 | 1 | 1 | 100.00 |
CONT_ASSIGN | 136 | 1 | 1 | 100.00 |
CONT_ASSIGN | 145 | 1 | 1 | 100.00 |
CONT_ASSIGN | 154 | 1 | 1 | 100.00 |
CONT_ASSIGN | 163 | 1 | 1 | 100.00 |
CONT_ASSIGN | 171 | 1 | 1 | 100.00 |
CONT_ASSIGN | 182 | 1 | 1 | 100.00 |
CONT_ASSIGN | 186 | 1 | 1 | 100.00 |
CONT_ASSIGN | 190 | 1 | 1 | 100.00 |
CONT_ASSIGN | 194 | 1 | 1 | 100.00 |
CONT_ASSIGN | 198 | 1 | 1 | 100.00 |
CONT_ASSIGN | 206 | 1 | 1 | 100.00 |
CONT_ASSIGN | 211 | 1 | 1 | 100.00 |
CONT_ASSIGN | 232 | 1 | 1 | 100.00 |
CONT_ASSIGN | 245 | 1 | 1 | 100.00 |
CONT_ASSIGN | 274 | 1 | 1 | 100.00 |
CONT_ASSIGN | 280 | 1 | 1 | 100.00 |
CONT_ASSIGN | 306 | 1 | 1 | 100.00 |
CONT_ASSIGN | 336 | 1 | 1 | 100.00 |
CONT_ASSIGN | 361 | 1 | 1 | 100.00 |
CONT_ASSIGN | 370 | 1 | 1 | 100.00 |
ALWAYS | 376 | 10 | 9 | 90.00 |
CONT_ASSIGN | 404 | 1 | 1 | 100.00 |
CONT_ASSIGN | 448 | 1 | 1 | 100.00 |
CONT_ASSIGN | 449 | 1 | 1 | 100.00 |
ALWAYS | 452 | 18 | 18 | 100.00 |
132 logic [MuBi4Width-1:0] read_en_b_buf, write_en_buf_b_d;
133 1/1 assign gnt_o = req_i & key_valid_i;
Tests: T1 T2 T3
134
135 1/1 assign read_en = mubi4_bool_to_mubi(gnt_o & ~write_i);
Tests: T1 T2 T3
136 1/1 assign write_en_d = mubi4_bool_to_mubi(gnt_o & write_i);
Tests: T1 T2 T3
137
138 prim_buf #(
139 .Width(MuBi4Width)
140 ) u_read_en_buf (
141 .in_i (read_en),
142 .out_o(read_en_b_buf)
143 );
144
145 1/1 assign read_en_buf = mubi4_t'(read_en_b_buf);
Tests: T1 T2 T3
146
147 prim_buf #(
148 .Width(MuBi4Width)
149 ) u_write_en_d_buf (
150 .in_i (write_en_d),
151 .out_o(write_en_buf_b_d)
152 );
153
154 1/1 assign write_en_buf_d = mubi4_t'(write_en_buf_b_d);
Tests: T1 T2 T3
155
156 mubi4_t write_pending_q;
157 mubi4_t addr_collision_d, addr_collision_q;
158 logic [AddrWidth-1:0] addr_scr;
159 logic [AddrWidth-1:0] waddr_scr_q;
160 mubi4_t addr_match;
161 logic [MuBi4Width-1:0] addr_match_buf;
162
163 1/1 assign addr_match = (addr_scr == waddr_scr_q) ? MuBi4True : MuBi4False;
Tests: T1 T2 T3
164 prim_buf #(
165 .Width(MuBi4Width)
166 ) u_addr_match_buf (
167 .in_i (addr_match),
168 .out_o(addr_match_buf)
169 );
170
171 1/1 assign addr_collision_d = mubi4_and_hi(mubi4_and_hi(mubi4_or_hi(write_en_q,
Tests: T1 T2 T3
172 write_pending_q), read_en_buf), mubi4_t'(addr_match_buf));
173
174 // Macro requests and write strobe
175 // The macro operation is silenced if an integrity error is seen
176 logic intg_error_buf, intg_error_w_q;
177 prim_buf u_intg_error (
178 .in_i(intg_error_i),
179 .out_o(intg_error_buf)
180 );
181 logic macro_req;
182 1/1 assign macro_req = ~intg_error_w_q & ~intg_error_buf &
Tests: T1 T2 T3
183 mubi4_test_true_loose(mubi4_or_hi(mubi4_or_hi(read_en_buf, write_en_q), write_pending_q));
184 // We are allowed to write a pending write transaction to the memory if there is no incoming read.
185 logic macro_write;
186 1/1 assign macro_write = mubi4_test_true_loose(mubi4_or_hi(write_en_q, write_pending_q)) &
Tests: T1 T2 T3
187 ~mubi4_test_true_loose(read_en_buf) & ~intg_error_w_q;
188 // New read write collision
189 logic rw_collision;
190 1/1 assign rw_collision = mubi4_test_true_loose(mubi4_and_hi(write_en_q, read_en_buf));
Tests: T1 T2 T3
191
192 // Write currently processed inside this module. Although we are sending an immediate d_valid
193 // back to the host, the write could take longer due to the scrambling.
194 1/1 assign write_pending_o = macro_write | mubi4_test_true_loose(write_en_buf_d);
Tests: T1 T2 T3
195
196 // When a read is followed after a write with the same address, we return the data from the
197 // holding register.
198 1/1 assign wr_collision_o = mubi4_test_true_loose(addr_collision_q);
Tests: T1 T2 T3
199
200 ////////////////////////
201 // Address Scrambling //
202 ////////////////////////
203
204 // We only select the pending write address in case there is no incoming read transaction.
205 logic [AddrWidth-1:0] addr_mux;
206 1/1 assign addr_mux = (mubi4_test_true_loose(read_en_buf)) ? addr_scr : waddr_scr_q;
Tests: T1 T2 T3
207
208 // This creates a bijective address mapping using a substitution / permutation network.
209 if (NumAddrScrRounds > 0) begin : gen_addr_scr
210 logic [AddrWidth-1:0] addr_scr_nonce;
211 1/1 assign addr_scr_nonce = nonce_i[NonceWidth - AddrWidth +: AddrWidth];
Tests: T1 T2 T3
212
213 prim_subst_perm #(
214 .DataWidth ( AddrWidth ),
215 .NumRounds ( NumAddrScrRounds ),
216 .Decrypt ( 0 )
217 ) u_prim_subst_perm (
218 .data_i ( addr_i ),
219 // Since the counter mode concatenates {nonce_i[NonceWidth-1-AddrWidth:0], addr} to form
220 // the IV, the upper AddrWidth bits of the nonce are not used and can be used for address
221 // scrambling. In cases where N parallel PRINCE blocks are used due to a data
222 // width > 64bit, N*AddrWidth nonce bits are left dangling.
223 .key_i ( addr_scr_nonce ),
224 .data_o ( addr_scr )
225 );
226 end else begin : gen_no_addr_scr
227 assign addr_scr = addr_i;
228 end
229
230 // We latch the non-scrambled address for error reporting.
231 logic [AddrWidth-1:0] raddr_q;
232 1/1 assign raddr_o = 32'(raddr_q);
Tests: T1 T2 T3
233
234 //////////////////////////////////////////////
235 // Keystream Generation for Data Scrambling //
236 //////////////////////////////////////////////
237
238 // This encrypts the IV consisting of the nonce and address using the key provided in order to
239 // generate the keystream for the data. Note that we instantiate a register halfway within this
240 // primitive to balance the delay between request and response side.
241 localparam int DataNonceWidth = 64 - AddrWidth;
242 logic [NumParScr*64-1:0] keystream;
243 logic [NumParScr-1:0][DataNonceWidth-1:0] data_scr_nonce;
244 for (genvar k = 0; k < NumParScr; k++) begin : gen_par_scr
245 1/1 assign data_scr_nonce[k] = nonce_i[k * DataNonceWidth +: DataNonceWidth];
Tests: T1 T2 T3
246
247 prim_prince #(
248 .DataWidth (64),
249 .KeyWidth (128),
250 .NumRoundsHalf (NumPrinceRoundsHalf),
251 .UseOldKeySched (1'b0),
252 .HalfwayDataReg (1'b1), // instantiate a register halfway in the primitive
253 .HalfwayKeyReg (1'b0) // no need to instantiate a key register as the key remains static
254 ) u_prim_prince (
255 .clk_i,
256 .rst_ni,
257 .valid_i ( gnt_o ),
258 // The IV is composed of a nonce and the row address
259 //.data_i ( {nonce_i[k * (64 - AddrWidth) +: (64 - AddrWidth)], addr} ),
260 .data_i ( {data_scr_nonce[k], addr_i} ),
261 // All parallel scramblers use the same key
262 .key_i,
263 // Since we operate in counter mode, this can always be set to encryption mode
264 .dec_i ( 1'b0 ),
265 // Output keystream to be XOR'ed
266 .data_o ( keystream[k * 64 +: 64] ),
267 .valid_o ( )
268 );
269
270 // Unread unused bits from keystream
271 if (k == NumParKeystr-1 && (Width % 64) > 0) begin : gen_unread_last
272 localparam int UnusedWidth = 64 - (Width % 64);
273 logic [UnusedWidth-1:0] unused_keystream;
274 1/1 assign unused_keystream = keystream[(k+1) * 64 - 1 -: UnusedWidth];
Tests: T1 T2 T3
275 end
276 end
277
278 // Replicate keystream if needed
279 logic [Width-1:0] keystream_repl;
280 1/1 assign keystream_repl = Width'({NumParKeystr{keystream}});
Tests: T1 T2 T3
281
282 /////////////////////
283 // Data Scrambling //
284 /////////////////////
285
286 // Data scrambling is a two step process. First, we XOR the write data with the keystream obtained
287 // by operating a reduced-round PRINCE cipher in CTR-mode. Then, we diffuse data within each byte
288 // in order to get a limited "avalanche" behavior in case parts of the bytes are flipped as a
289 // result of a malicious attempt to tamper with the data in memory. We perform the diffusion only
290 // within bytes in order to maintain the ability to write individual bytes. Note that the
291 // keystream XOR is performed first for the write path such that it can be performed last for the
292 // read path. This allows us to hide a part of the combinational delay of the PRINCE primitive
293 // behind the propagation delay of the SRAM macro and the per-byte diffusion step.
294
295 logic [Width-1:0] rdata_scr, rdata;
296 logic [Width-1:0] wdata_scr_d, wdata_scr_q, wdata_q;
297 for (genvar k = 0; k < (Width + DiffWidth - 1) / DiffWidth; k++) begin : gen_diffuse_data
298 // If the Width is not divisible by DiffWidth, we need to adjust the width of the last slice.
299 localparam int LocalWidth = (Width - k * DiffWidth >= DiffWidth) ? DiffWidth :
300 (Width - k * DiffWidth);
301
302 // Write path. Note that since this does not fan out into the interconnect, the write path is
303 // not as critical as the read path below in terms of timing.
304 // Apply the keystream first
305 logic [LocalWidth-1:0] wdata_xor;
306 1/1 assign wdata_xor = wdata_q[k*DiffWidth +: LocalWidth] ^
Tests: T1 T2 T3
307 keystream_repl[k*DiffWidth +: LocalWidth];
308
309 // Byte aligned diffusion using a substitution / permutation network
310 prim_subst_perm #(
311 .DataWidth ( LocalWidth ),
312 .NumRounds ( NumDiffRounds ),
313 .Decrypt ( 0 )
314 ) u_prim_subst_perm_enc (
315 .data_i ( wdata_xor ),
316 .key_i ( '0 ),
317 .data_o ( wdata_scr_d[k*DiffWidth +: LocalWidth] )
318 );
319
320 // Read path. This is timing critical. The keystream XOR operation is performed last in order to
321 // hide the combinational delay of the PRINCE primitive behind the propagation delay of the
322 // SRAM and the byte diffusion.
323 // Reverse diffusion first
324 logic [LocalWidth-1:0] rdata_xor;
325 prim_subst_perm #(
326 .DataWidth ( LocalWidth ),
327 .NumRounds ( NumDiffRounds ),
328 .Decrypt ( 1 )
329 ) u_prim_subst_perm_dec (
330 .data_i ( rdata_scr[k*DiffWidth +: LocalWidth] ),
331 .key_i ( '0 ),
332 .data_o ( rdata_xor )
333 );
334
335 // Apply Keystream, replicate it if needed
336 1/1 assign rdata[k*DiffWidth +: LocalWidth] = rdata_xor ^
Tests: T1 T2 T3
337 keystream_repl[k*DiffWidth +: LocalWidth];
338 end
339
340 ////////////////////////////////////////////////
341 // Scrambled data register and forwarding mux //
342 ////////////////////////////////////////////////
343
344 // This is the scrambled data holding register for pending writes. This is needed in order to make
345 // back to back patterns of the form WR -> RD -> WR work:
346 //
347 // cycle: 0 | 1 | 2 | 3 |
348 // incoming op: WR0 | RD | WR1 | - |
349 // prince: - | WR0 | RD | WR1 |
350 // memory op: - | RD | WR0 | WR1 |
351 //
352 // The read transaction in cycle 1 interrupts the first write transaction which has already used
353 // the PRINCE primitive for scrambling. If this sequence is followed by another write back-to-back
354 // in cycle 2, we cannot use the PRINCE primitive a second time for the first write, and hence
355 // need an additional holding register that can buffer the scrambled data of the first write in
356 // cycle 1.
357
358 // Clear this if we can write the memory in this cycle. Set only if the current write cannot
359 // proceed due to an incoming read operation.
360 mubi4_t write_scr_pending_d;
361 1/1 assign write_scr_pending_d = (macro_write) ? MuBi4False :
Tests: T1 T2 T3
362 (rw_collision) ? MuBi4True :
363 write_pending_q;
364
365 // Select the correct scrambled word to be written, based on whether the word in the scrambled
366 // data holding register is valid or not. Note that the write_scr_q register could in theory be
367 // combined with the wdata_q register. We don't do that here for timing reasons, since that would
368 // require another read data mux to inject the scrambled data into the read descrambling path.
369 logic [Width-1:0] wdata_scr;
370 1/1 assign wdata_scr = (mubi4_test_true_loose(write_pending_q)) ? wdata_scr_q : wdata_scr_d;
Tests: T1 T2 T3
371
372 mubi4_t rvalid_q;
373 logic intg_error_r_q;
374 logic [Width-1:0] wmask_q;
375 always_comb begin : p_forward_mux
376 1/1 rdata_o = '0;
Tests: T1 T2 T3
377 1/1 rvalid_o = 1'b0;
Tests: T1 T2 T3
378 // Kill the read response in case an integrity error was seen.
379 1/1 if (!intg_error_r_q && mubi4_test_true_loose(rvalid_q)) begin
Tests: T1 T2 T3
380 1/1 rvalid_o = 1'b1;
Tests: T5 T7 T8
381 // In case of a collision, we forward the valid bytes of the write data from the unscrambled
382 // holding register.
383 1/1 if (mubi4_test_true_loose(addr_collision_q)) begin
Tests: T5 T7 T8
384 1/1 for (int k = 0; k < Width; k++) begin
Tests: T53 T54 T55
385 1/1 if (wmask_q[k]) begin
Tests: T53 T54 T55
386 1/1 rdata_o[k] = wdata_q[k];
Tests: T53 T54 T55
387 end else begin
388 0/1 ==> rdata_o[k] = rdata[k];
389 end
390 end
391 // regular reads. note that we just return zero in case
392 // an integrity error was signalled.
393 end else begin
394 1/1 rdata_o = rdata;
Tests: T5 T7 T8
395 end
396 end
MISSING_ELSE
397 end
398
399 ///////////////
400 // Registers //
401 ///////////////
402 logic ram_alert;
403
404 1/1 assign alert_o = mubi4_test_invalid(write_en_q) | mubi4_test_invalid(addr_collision_q) |
Tests: T1 T2 T3
405 mubi4_test_invalid(write_pending_q) | mubi4_test_invalid(rvalid_q) |
406 ram_alert;
407
408 prim_flop #(
409 .Width(MuBi4Width),
410 .ResetValue(MuBi4Width'(MuBi4False))
411 ) u_write_en_flop (
412 .clk_i,
413 .rst_ni,
414 .d_i(MuBi4Width'(write_en_buf_d)),
415 .q_o({write_en_q})
416 );
417
418 prim_flop #(
419 .Width(MuBi4Width),
420 .ResetValue(MuBi4Width'(MuBi4False))
421 ) u_addr_collision_flop (
422 .clk_i,
423 .rst_ni,
424 .d_i(MuBi4Width'(addr_collision_d)),
425 .q_o({addr_collision_q})
426 );
427
428 prim_flop #(
429 .Width(MuBi4Width),
430 .ResetValue(MuBi4Width'(MuBi4False))
431 ) u_write_pending_flop (
432 .clk_i,
433 .rst_ni,
434 .d_i(MuBi4Width'(write_scr_pending_d)),
435 .q_o({write_pending_q})
436 );
437
438 prim_flop #(
439 .Width(MuBi4Width),
440 .ResetValue(MuBi4Width'(MuBi4False))
441 ) u_rvalid_flop (
442 .clk_i,
443 .rst_ni,
444 .d_i(MuBi4Width'(read_en_buf)),
445 .q_o({rvalid_q})
446 );
447
448 1/1 assign read_en_b = mubi4_test_true_loose(read_en_buf);
Tests: T1 T2 T3
449 1/1 assign write_en_b = mubi4_test_true_loose(write_en_buf_d);
Tests: T1 T2 T3
450
451 always_ff @(posedge clk_i or negedge rst_ni) begin : p_wdata_buf
452 1/1 if (!rst_ni) begin
Tests: T1 T2 T3
453 1/1 intg_error_r_q <= 1'b0;
Tests: T1 T2 T3
454 1/1 intg_error_w_q <= 1'b0;
Tests: T1 T2 T3
455 1/1 raddr_q <= '0;
Tests: T1 T2 T3
456 1/1 waddr_scr_q <= '0;
Tests: T1 T2 T3
457 1/1 wmask_q <= '0;
Tests: T1 T2 T3
458 1/1 wdata_q <= '0;
Tests: T1 T2 T3
459 1/1 wdata_scr_q <= '0;
Tests: T1 T2 T3
460 end else begin
461 1/1 intg_error_r_q <= intg_error_buf;
Tests: T1 T2 T3
462
463 1/1 if (read_en_b) begin
Tests: T1 T2 T3
464 1/1 raddr_q <= addr_i;
Tests: T5 T7 T8
465 end
MISSING_ELSE
466 1/1 if (write_en_b) begin
Tests: T1 T2 T3
467 1/1 waddr_scr_q <= addr_scr;
Tests: T2 T12 T5
468 1/1 wmask_q <= wmask_i;
Tests: T2 T12 T5
469 1/1 wdata_q <= wdata_i;
Tests: T2 T12 T5
470 1/1 intg_error_w_q <= intg_error_buf;
Tests: T2 T12 T5
471 end
MISSING_ELSE
472 1/1 if (rw_collision) begin
Tests: T1 T2 T3
473 1/1 wdata_scr_q <= wdata_scr_d;
Tests: T6 T11 T9
474 end
MISSING_ELSE
Cond Coverage for Module :
prim_ram_1p_scr
| Total | Covered | Percent |
Conditions | 11 | 11 | 100.00 |
Logical | 11 | 11 | 100.00 |
Non-Logical | 0 | 0 | |
Event | 0 | 0 | |
LINE 133
EXPRESSION (req_i & key_valid_i)
--1-- -----2-----
-1- | -2- | Status | Tests |
0 | 1 | Covered | T1,T2,T3 |
1 | 0 | Covered | T6,T9,T10 |
1 | 1 | Covered | T2,T12,T5 |
LINE 163
EXPRESSION ((addr_scr == waddr_scr_q) ? MuBi4True : MuBi4False)
------------1------------
-1- | Status | Tests |
0 | Covered | T1,T2,T3 |
1 | Covered | T5,T7,T8 |
LINE 163
SUB-EXPRESSION (addr_scr == waddr_scr_q)
------------1------------
-1- | Status | Tests |
0 | Covered | T1,T2,T3 |
1 | Covered | T5,T7,T8 |
LINE 361
EXPRESSION (macro_write ? MuBi4False : (rw_collision ? MuBi4True : write_pending_q))
-----1-----
-1- | Status | Tests |
0 | Covered | T1,T2,T3 |
1 | Covered | T2,T12,T5 |
LINE 361
SUB-EXPRESSION (rw_collision ? MuBi4True : write_pending_q)
------1-----
-1- | Status | Tests |
0 | Covered | T1,T2,T3 |
1 | Covered | T6,T11,T9 |
Branch Coverage for Module :
prim_ram_1p_scr
| Line No. | Total | Covered | Percent |
Branches |
|
15 |
15 |
100.00 |
TERNARY |
163 |
2 |
2 |
100.00 |
TERNARY |
361 |
3 |
3 |
100.00 |
IF |
379 |
3 |
3 |
100.00 |
IF |
452 |
7 |
7 |
100.00 |
163 assign addr_match = (addr_scr == waddr_scr_q) ? MuBi4True : MuBi4False;
-1-
==>
==>
Branches:
-1- | Status | Tests |
1 |
Covered |
T5,T7,T8 |
0 |
Covered |
T1,T2,T3 |
361 assign write_scr_pending_d = (macro_write) ? MuBi4False :
-1-
==>
362 (rw_collision) ? MuBi4True :
-2-
==>
==>
Branches:
-1- | -2- | Status | Tests |
1 |
- |
Covered |
T2,T12,T5 |
0 |
1 |
Covered |
T6,T11,T9 |
0 |
0 |
Covered |
T1,T2,T3 |
379 if (!intg_error_r_q && mubi4_test_true_loose(rvalid_q)) begin
-1-
380 rvalid_o = 1'b1;
381 // In case of a collision, we forward the valid bytes of the write data from the unscrambled
382 // holding register.
383 if (mubi4_test_true_loose(addr_collision_q)) begin
-2-
384 for (int k = 0; k < Width; k++) begin
==>
385 if (wmask_q[k]) begin
386 rdata_o[k] = wdata_q[k];
387 end else begin
388 rdata_o[k] = rdata[k];
389 end
390 end
391 // regular reads. note that we just return zero in case
392 // an integrity error was signalled.
393 end else begin
394 rdata_o = rdata;
==>
395 end
396 end
MISSING_ELSE
==>
Branches:
-1- | -2- | Status | Tests |
1 |
1 |
Covered |
T53,T54,T55 |
1 |
0 |
Covered |
T5,T7,T8 |
0 |
- |
Covered |
T1,T2,T3 |
452 if (!rst_ni) begin
-1-
453 intg_error_r_q <= 1'b0;
==>
454 intg_error_w_q <= 1'b0;
455 raddr_q <= '0;
456 waddr_scr_q <= '0;
457 wmask_q <= '0;
458 wdata_q <= '0;
459 wdata_scr_q <= '0;
460 end else begin
461 intg_error_r_q <= intg_error_buf;
462
463 if (read_en_b) begin
-2-
464 raddr_q <= addr_i;
==>
465 end
MISSING_ELSE
==>
466 if (write_en_b) begin
-3-
467 waddr_scr_q <= addr_scr;
==>
468 wmask_q <= wmask_i;
469 wdata_q <= wdata_i;
470 intg_error_w_q <= intg_error_buf;
471 end
MISSING_ELSE
==>
472 if (rw_collision) begin
-4-
473 wdata_scr_q <= wdata_scr_d;
==>
474 end
MISSING_ELSE
==>
Branches:
-1- | -2- | -3- | -4- | Status | Tests |
1 |
- |
- |
- |
Covered |
T1,T2,T3 |
0 |
1 |
- |
- |
Covered |
T5,T7,T8 |
0 |
0 |
- |
- |
Covered |
T1,T2,T3 |
0 |
- |
1 |
- |
Covered |
T2,T12,T5 |
0 |
- |
0 |
- |
Covered |
T1,T2,T3 |
0 |
- |
- |
1 |
Covered |
T6,T11,T9 |
0 |
- |
- |
0 |
Covered |
T1,T2,T3 |
Assert Coverage for Module :
prim_ram_1p_scr
Assertion Details
DepthPow2Check_A
Name | Attempts | Real Successes | Failures | Incomplete |
Total |
897 |
897 |
0 |
0 |
T1 |
1 |
1 |
0 |
0 |
T2 |
1 |
1 |
0 |
0 |
T3 |
1 |
1 |
0 |
0 |
T4 |
1 |
1 |
0 |
0 |
T5 |
1 |
1 |
0 |
0 |
T6 |
1 |
1 |
0 |
0 |
T7 |
1 |
1 |
0 |
0 |
T8 |
1 |
1 |
0 |
0 |
T12 |
1 |
1 |
0 |
0 |
T13 |
1 |
1 |
0 |
0 |
DiffWidthMinimum_A
Name | Attempts | Real Successes | Failures | Incomplete |
Total |
897 |
897 |
0 |
0 |
T1 |
1 |
1 |
0 |
0 |
T2 |
1 |
1 |
0 |
0 |
T3 |
1 |
1 |
0 |
0 |
T4 |
1 |
1 |
0 |
0 |
T5 |
1 |
1 |
0 |
0 |
T6 |
1 |
1 |
0 |
0 |
T7 |
1 |
1 |
0 |
0 |
T8 |
1 |
1 |
0 |
0 |
T12 |
1 |
1 |
0 |
0 |
T13 |
1 |
1 |
0 |
0 |
DiffWidthWithParity_A
Name | Attempts | Real Successes | Failures | Incomplete |
Total |
897 |
897 |
0 |
0 |
T1 |
1 |
1 |
0 |
0 |
T2 |
1 |
1 |
0 |
0 |
T3 |
1 |
1 |
0 |
0 |
T4 |
1 |
1 |
0 |
0 |
T5 |
1 |
1 |
0 |
0 |
T6 |
1 |
1 |
0 |
0 |
T7 |
1 |
1 |
0 |
0 |
T8 |
1 |
1 |
0 |
0 |
T12 |
1 |
1 |
0 |
0 |
T13 |
1 |
1 |
0 |
0 |