@@ -217,6 +217,7 @@ def test_unary_encoder_random_gaussian(backend, nqubits, seed):
217
217
backend .assert_allclose (stddev , theoretical_norm , atol = 1e-1 )
218
218
219
219
220
+ @pytest .mark .parametrize ("seed" , [10 ])
220
221
@pytest .mark .parametrize ("optimize_controls" , [False , True ])
221
222
@pytest .mark .parametrize ("complex_data" , [False , True ])
222
223
@pytest .mark .parametrize ("full_hwp" , [False , True ])
@@ -229,6 +230,7 @@ def test_hamming_weight_encoder(
229
230
full_hwp ,
230
231
complex_data ,
231
232
optimize_controls ,
233
+ seed ,
232
234
):
233
235
n_choose_k = int (binom (nqubits , weight ))
234
236
dims = 2 ** nqubits
@@ -237,15 +239,16 @@ def test_hamming_weight_encoder(
237
239
initial_string = np .array ([1 ] * weight + [0 ] * (nqubits - weight ))
238
240
indices = _ehrlich_algorithm (initial_string , False )
239
241
indices = [int (string , 2 ) for string in indices ]
242
+ indices_lex = np .sort (np .copy (indices ))
240
243
241
- rng = np .random .default_rng (10 )
244
+ rng = np .random .default_rng (seed )
242
245
data = rng .random (n_choose_k )
243
246
if complex_data :
244
247
data = data .astype (complex ) + 1j * rng .random (n_choose_k )
245
248
data /= np .linalg .norm (data )
246
249
247
250
target = np .zeros (dims , dtype = dtype )
248
- target [indices ] = data
251
+ target [indices_lex ] = data
249
252
target = backend .cast (target , dtype = target .dtype )
250
253
251
254
circuit = hamming_weight_encoder (
0 commit comments