Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[BUG] metric_tensor with batches of tapes breaks on legacy devices #7072

Open
1 task done
dwierichs opened this issue Mar 11, 2025 · 5 comments
Open
1 task done

[BUG] metric_tensor with batches of tapes breaks on legacy devices #7072

dwierichs opened this issue Mar 11, 2025 · 5 comments
Labels
bug 🐛 Something isn't working

Comments

@dwierichs
Copy link
Contributor

Expected behavior

The metric_tensor can be computed also if a measurement of a Hamiltonian leads to a batch of tapes being created, and also if a legacy device is used.

Note that the metric tensor should not differ between the tapes in the batch, so only one tensor needs to be computed.

Actual behavior

On a legacy device like default.mixed in the legacy device API, metric_tensor with a Hamiltonian measurement that leads to a batch of tapes breaks.

Additional information

Also see this discussion in the forum.

Source code

import pennylane as qml
from pennylane import numpy as pnp

hamiltonian = qml.Hamiltonian([0.5, 0.4], [qml.X(0)@qml.Z(1), qml.Z(0)])

dev = qml.device("default.mixed", wires=4)

def ansatz(params, wires=[0, 1, 2, 3]):
    for i in range(4):
        qml.RY(params[i], wires=i)

@qml.qnode(dev)
def cost(params):
    ansatz(params)
    return qml.expval(hamiltonian)

params = pnp.array([0.1, 0.2, 0.3, 0.4])

qml.metric_tensor(cost)(params)

Tracebacks

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
Cell In[2], line 21
     17     return qml.expval(hamiltonian)
     19 params = np.array([0.1, 0.2, 0.3, 0.4])
---> 21 qml.metric_tensor(cost)(params)
     23 # max_iterations = 10
     24 # step_size = 0.5 
     25 
   (...)
     48 # trajectory_qng = np.array(trajectory_qng)
     49 # qngd_cost = np.array(qngd_cost)

File ~/repos/pennylane/pennylane/workflow/qnode.py:881, in QNode.__call__(self, *args, **kwargs)
    878     from ._capture_qnode import capture_qnode  # pylint: disable=import-outside-toplevel
    880     return capture_qnode(self, *args, **kwargs)
--> 881 return self._impl_call(*args, **kwargs)

File ~/repos/pennylane/pennylane/workflow/qnode.py:854, in QNode._impl_call(self, *args, **kwargs)
    851 # Calculate the classical jacobians if necessary
    852 self._transform_program.set_classical_component(self, args, kwargs)
--> 854 res = qml.execute(
    855     (tape,),
    856     device=self.device,
    857     diff_method=self.diff_method,
    858     interface=self.interface,
    859     transform_program=self._transform_program,
    860     gradient_kwargs=self.gradient_kwargs,
    861     **self.execute_kwargs,
    862 )
    863 res = res[0]
    865 # convert result to the interface in case the qfunc has no parameters

File ~/repos/pennylane/pennylane/workflow/execution.py:239, in execute(tapes, device, diff_method, interface, transform_program, grad_on_execution, cache, cachesize, max_diff, device_vjp, postselect_mode, mcm_method, gradient_kwargs, mcm_config, config, inner_transform)
    234 transform_program, inner_transform = _setup_transform_program(
    235     transform_program, device, config, cache, cachesize
    236 )
    238 #### Executing the configured setup #####
--> 239 tapes, post_processing = transform_program(tapes)
    241 if transform_program.is_informative:
    242     return post_processing(tapes)

File ~/repos/pennylane/pennylane/transforms/core/transform_program.py:575, in TransformProgram.__call__(self, tapes)
    573 start = 0
    574 start_classical = 0
--> 575 classical_jacobians = self._get_classical_jacobian(i)
    576 argnums = self._get_argnums(i)
    577 for j, tape in enumerate(tapes):

File ~/repos/pennylane/pennylane/transforms/core/transform_program.py:525, in TransformProgram._get_classical_jacobian(self, index)
    520     raise qml.QuantumFunctionError(
    521         "argnum does not work with the Jax interface. You should use argnums instead."
    522     )
    524 f = partial(_classical_preprocessing, qnode, self[:index])
--> 525 classical_jacobian = _jac_map[interface](f, argnums, *args, **kwargs)
    527 # autograd and tf cant handle pytrees, so need to unsqueeze the squeezing
    528 # done in _classical_preprocessing
    529 tape = qml.workflow.construct_tape(qnode, level=0)(*args, **kwargs)

File ~/repos/pennylane/pennylane/transforms/core/transform_program.py:50, in _autograd_jac(classical_function, argnums, *args, **kwargs)
     48 if not qml.math.get_trainable_indices(args) and argnums is None:
     49     raise qml.QuantumFunctionError("No trainable parameters.")
---> 50 return qml.jacobian(classical_function, argnum=argnums)(*args, **kwargs)

File ~/repos/pennylane/pennylane/_grad.py:532, in jacobian.<locals>._jacobian_function(*args, **kwargs)
    526 if not _argnum:
    527     warnings.warn(
    528         "Attempted to differentiate a function with no trainable parameters. "
    529         "If this is unintended, please add trainable parameters via the "
    530         "'requires_grad' attribute or 'argnum' keyword."
    531     )
--> 532 jac = tuple(_jacobian(_error_if_not_array(func), arg)(*args, **kwargs) for arg in _argnum)
    534 return jac[0] if unpack else jac

File ~/repos/pennylane/pennylane/_grad.py:532, in <genexpr>(.0)
    526 if not _argnum:
    527     warnings.warn(
    528         "Attempted to differentiate a function with no trainable parameters. "
    529         "If this is unintended, please add trainable parameters via the "
    530         "'requires_grad' attribute or 'argnum' keyword."
    531     )
--> 532 jac = tuple(_jacobian(_error_if_not_array(func), arg)(*args, **kwargs) for arg in _argnum)
    534 return jac[0] if unpack else jac

File ~/venvs/dev/lib/python3.10/site-packages/autograd/wrap_util.py:20, in unary_to_nary.<locals>.nary_operator.<locals>.nary_f(*args, **kwargs)
     18 else:
     19     x = tuple(args[i] for i in argnum)
---> 20 return unary_operator(unary_f, x, *nary_op_args, **nary_op_kwargs)

File ~/venvs/dev/lib/python3.10/site-packages/autograd/differential_operators.py:60, in jacobian(fun, x)
     50 @unary_to_nary
     51 def jacobian(fun, x):
     52     """
     53     Returns a function which computes the Jacobian of `fun` with respect to
     54     positional argument number `argnum`, which must be a scalar or array. Unlike
   (...)
     58     (out1, out2, ...) then the Jacobian has shape (out1, out2, ..., in1, in2, ...).
     59     """
---> 60     vjp, ans = _make_vjp(fun, x)
     61     ans_vspace = vspace(ans)
     62     jacobian_shape = ans_vspace.shape + vspace(x).shape

File ~/venvs/dev/lib/python3.10/site-packages/autograd/core.py:10, in make_vjp(fun, x)
      8 def make_vjp(fun, x):
      9     start_node = VJPNode.new_root()
---> 10     end_value, end_node =  trace(start_node, fun, x)
     11     if end_node is None:
     12         def vjp(g): return vspace(x).zeros()

File ~/venvs/dev/lib/python3.10/site-packages/autograd/tracer.py:10, in trace(start_node, fun, x)
      8 with trace_stack.new_trace() as t:
      9     start_box = new_box(x, t, start_node)
---> 10     end_box = fun(start_box)
     11     if isbox(end_box) and end_box._trace == start_box._trace:
     12         return end_box._value, end_box._node

File ~/venvs/dev/lib/python3.10/site-packages/autograd/wrap_util.py:15, in unary_to_nary.<locals>.nary_operator.<locals>.nary_f.<locals>.unary_f(x)
     13 else:
     14     subargs = subvals(args, zip(argnum, x))
---> 15 return fun(*subargs, **kwargs)

File ~/repos/pennylane/pennylane/_grad.py:261, in _error_if_not_array.<locals>.new_f(*args, **kwargs)
    259 output = f(*args, **kwargs)
    260 if output.__class__.__module__.split(".")[0] not in {"autograd", "pennylane", "numpy"}:
--> 261     raise ValueError(
    262         f"autograd can only differentiate with respect to arrays, not {type(output)}. Ensure the output class is an autograd array."
    263     )
    264 return output

ValueError: autograd can only differentiate with respect to arrays, not <class 'tuple'>. Ensure the output class is an autograd array.

System information

pl dev

Existing GitHub issues

  • I have searched existing GitHub issues to make sure the issue does not already exist.
@dwierichs dwierichs added the bug 🐛 Something isn't working label Mar 11, 2025
@albi3ro
Copy link
Contributor

albi3ro commented Mar 12, 2025

Note that this is not specific to metric_tensor or legacy devices. It happens with any combination of batch transform and transform with a classical cotransform:

For example:

import pennylane as qml
from pennylane import numpy as pnp

hamiltonian = qml.Hamiltonian([0.5, 0.4], [qml.X(0)@qml.Z(1), qml.Z(0)])

dev = qml.device("default.qubit", wires=4)

def ansatz(params, wires=[0, 1, 2, 3]):
    for i in range(4):
        qml.RY(params[i], wires=i)

@qml.transforms.split_non_commuting
@qml.qnode(dev)
def cost(params):
    ansatz(params)
    return qml.expval(hamiltonian)

params = pnp.array([0.1, 0.2, 0.3, 0.4])

qml.gradients.param_shift(cost)(params)

Gives rise to the same error.

@dwierichs
Copy link
Contributor Author

This bug is about ordering of transforms, I suppose.
So your observation makes sense, @albi3ro. For legacy devices, the default ordering of transforms causes a problem, but for custom transform programs, it also arises with non-legacy devices 👍

@albi3ro
Copy link
Contributor

albi3ro commented Mar 12, 2025

And lightning qubit with broadcasting also causes the error:

dev = qml.device("lightning.qubit", wires=4)

def ansatz(params, wires=[0, 1, 2, 3]):
    for i in range(4):
        qml.RY(params[i], wires=i)

@qml.qnode(dev)
def cost(params):
    ansatz(params)
    return qml.expval(qml.Z(0))

params = pnp.array([[0.1, 1.1], [0.2, 1.2], [0.3, 1.3], [0.4, 1.4]])

qml.gradients.param_shift(cost)(params)

@albi3ro
Copy link
Contributor

albi3ro commented Mar 12, 2025

We should at least start throwing at error here instead of waiting for the ml framework to fall over:

# autograd and tf cant handle pytrees, so need to squeeze batches

@CatalinaAlbornoz
Copy link
Contributor

We have a new report of a weird issue with metric_tensor (see Forum thread 8118). In the error traceback I see that the issue originates in some methods in metric_tensor.py.

Could this be related to the same issues surfaced in this issue already?

Here's the full error traceback

/usr/local/lib/python3.11/dist-packages/pennylane/transforms/decompose.py:337: UserWarning: Operator PhaseDamping does not define a decomposition and was not found in the target gate set. To remove this warning, add the operator name (PhaseDamping) or type (<class 'pennylane.ops.channel.PhaseDamping'>) to the gate set.
  warnings.warn(
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
[<ipython-input-2-1944c42fa16c>](https://localhost:8080/#) in <cell line: 0>()
     45     return qml.density_matrix(wires=[0, 1])
     46 
---> 47 qfi_matrix = qml.gradients.quantum_fisher(bell_state_circuit)(theta_values, coeff_z_values, 0.1)
     48 print(qfi_matrix)

11 frames
[/usr/local/lib/python3.11/dist-packages/pennylane/workflow/qnode.py](https://localhost:8080/#) in __call__(self, *args, **kwargs)
    903         if qml.capture.enabled():
    904             return capture_qnode(self, *args, **kwargs)
--> 905         return self._impl_call(*args, **kwargs)
    906 
    907 

[/usr/local/lib/python3.11/dist-packages/pennylane/workflow/qnode.py](https://localhost:8080/#) in _impl_call(self, *args, **kwargs)
    879         self._transform_program.set_classical_component(self, args, kwargs)
    880 
--> 881         res = qml.execute(
    882             (tape,),
    883             device=self.device,

[/usr/local/lib/python3.11/dist-packages/pennylane/workflow/execution.py](https://localhost:8080/#) in execute(tapes, device, diff_method, interface, transform_program, inner_transform, config, grad_on_execution, gradient_kwargs, cache, cachesize, max_diff, device_vjp, mcm_config, gradient_fn)
    228 
    229     if transform_program.is_informative:
--> 230         return post_processing(tapes)
    231 
    232     results = run(tapes, device, config, inner_transform)

[/usr/local/lib/python3.11/dist-packages/pennylane/transforms/core/transform_program.py](https://localhost:8080/#) in _apply_postprocessing_stack(results, postprocessing_stack)
    194     """
    195     for postprocessing in reversed(postprocessing_stack):
--> 196         results = postprocessing(results)
    197     return results
    198 

[/usr/local/lib/python3.11/dist-packages/pennylane/transforms/core/transform_program.py](https://localhost:8080/#) in _batch_postprocessing(results, individual_fns, slices)
    164 
    165     """
--> 166     return tuple(fn(results[sl]) for fn, sl in zip(individual_fns, slices))
    167 
    168 

[/usr/local/lib/python3.11/dist-packages/pennylane/transforms/core/transform_program.py](https://localhost:8080/#) in <genexpr>(.0)
    164 
    165     """
--> 166     return tuple(fn(results[sl]) for fn, sl in zip(individual_fns, slices))
    167 
    168 

[/usr/local/lib/python3.11/dist-packages/pennylane/gradients/metric_tensor.py](https://localhost:8080/#) in _contract_metric_tensor_with_cjac(mt, cjac, tape)
     64             return mt
     65 
---> 66     return _mt_cjac_tdot(mt, cjac)
     67 
     68 

[/usr/local/lib/python3.11/dist-packages/pennylane/gradients/metric_tensor.py](https://localhost:8080/#) in _mt_cjac_tdot(mt, c)
     31 
     32 def _mt_cjac_tdot(mt, c):
---> 33     return qml.math.tensordot(c, qml.math.tensordot(mt, c, axes=[[-1], [0]]), axes=[[0], [0]])
     34 
     35 

[/usr/local/lib/python3.11/dist-packages/pennylane/math/multi_dispatch.py](https://localhost:8080/#) in wrapper(*args, **kwargs)
    151             kwargs["like"] = interface
    152 
--> 153             return fn(*args, **kwargs)
    154 
    155         return wrapper

[/usr/local/lib/python3.11/dist-packages/pennylane/math/multi_dispatch.py](https://localhost:8080/#) in tensordot(tensor1, tensor2, axes, like)
    402     """
    403     tensor1, tensor2 = np.coerce([tensor1, tensor2], like=like)
--> 404     return np.tensordot(tensor1, tensor2, axes=axes, like=like)
    405 
    406 

[/usr/local/lib/python3.11/dist-packages/autoray/autoray.py](https://localhost:8080/#) in do(fn, like, *args, **kwargs)
     79     backend = _choose_backend(fn, args, kwargs, like=like)
     80     func = get_lib_fn(backend, fn)
---> 81     return func(*args, **kwargs)
     82 
     83 

[/usr/local/lib/python3.11/dist-packages/numpy/core/numeric.py](https://localhost:8080/#) in tensordot(a, b, axes)
   1097                 axes_b[k] += ndb
   1098     if not equal:
-> 1099         raise ValueError("shape-mismatch for sum")
   1100 
   1101     # Move the axes to sum over to the end of "a"

ValueError: shape-mismatch for sum

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
bug 🐛 Something isn't working
Projects
None yet
Development

No branches or pull requests

3 participants