Skip to content

Commit be1599f

Browse files
maliasadimlxd
andauthored
Add VJP support to PL-Lightning (#181)
* Add vecMatrixProd * Add vjp to lightning_qubit.py and tests * Update formatting * Add class VectorJacobianProduct * Update formatting * Update C++ class * Update clang-tidy * Update VectorJacobianProduct public methods * Add Test_VJP.cpp * Update cpp tests * Update cpp tests * Complete adding cpp tests * Update formatting w/ clang-tidy-12 * Add Bindings * Update Bindings * Fix issue with vjp pybinds and Add more tests * Update #181 * Apply codecov suggestions * Apply code review suggestions * Fix rendering math formulas in docs * Apply code factor suggestions * Update python tests * Update vector_jacobian_product method * Add adjoint_diff_support_check method * Add more tests for batch_vjp * Update VJP Python bindings * Update tests Co-authored-by: Lee James O'Riordan <[email protected]>
1 parent 82c197b commit be1599f

12 files changed

+1855
-37
lines changed

.github/CHANGELOG.md

+5-4
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,9 @@
22

33
### New features since last release
44

5+
* Add VJP support to PL-Lightning.
6+
[(#181)](https://github.com/PennyLaneAI/pennylane-lightning/pull/181)
7+
58
* Added examples folder containing aggregate gate performance test.
69
[(#165)](https://github.com/PennyLaneAI/pennylane-lightning/pull/165)
710

@@ -14,18 +17,16 @@
1417

1518
### Documentation
1619

17-
* Compile guide for MSVC is added.
20+
* Lightning setup.py build process uses CMake.
1821
[(#176)](https://github.com/PennyLaneAI/pennylane-lightning/pull/176)
1922

2023
### Bug fixes
2124

2225
### Contributors
2326

24-
Chae-Yeun Park
25-
2627
This release contains contributions from (in alphabetical order):
2728

28-
Ali Asadi, Isidor Schoch
29+
Ali Asadi, Chae-Yeun Park, Isidor Schoch
2930

3031
---
3132

pennylane_lightning/lightning_qubit.py

+191-9
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020

2121
import numpy as np
2222
from pennylane import (
23+
math,
2324
BasisState,
2425
DeviceError,
2526
QuantumFunctionError,
@@ -42,13 +43,15 @@
4243
StateVectorC64,
4344
StateVectorC128,
4445
AdjointJacobianC128,
46+
VectorJacobianProductC128,
4547
)
4648
else:
4749
from .lightning_qubit_ops import (
4850
apply,
4951
StateVectorC64,
5052
StateVectorC128,
5153
AdjointJacobianC128,
54+
VectorJacobianProductC128,
5255
)
5356
from ._serialize import _serialize_obs, _serialize_ops
5457

@@ -171,17 +174,15 @@ def apply_lightning(self, state, operations):
171174

172175
return np.reshape(state_vector, state.shape)
173176

174-
def adjoint_jacobian(self, tape, starting_state=None, use_device_state=False):
175-
if self.shots is not None:
176-
warn(
177-
"Requested adjoint differentiation to be computed with finite shots."
178-
" The derivative is always exact when using the adjoint differentiation method.",
179-
UserWarning,
180-
)
177+
def adjoint_diff_support_check(self, tape):
178+
"""Check Lightning adjoint differentiation method support for a tape.
181179
182-
if len(tape.trainable_params) == 0:
183-
return np.array(0)
180+
Raise ``QuantumFunctionError`` if ``tape`` contains not supported measurements,
181+
observables, or operations by the Lightning adjoint differentiation method.
184182
183+
Args:
184+
tape (.QuantumTape): quantum tape to differentiate
185+
"""
185186
for m in tape.measurements:
186187
if m.return_type is not Expectation:
187188
raise QuantumFunctionError(
@@ -216,6 +217,20 @@ def adjoint_jacobian(self, tape, starting_state=None, use_device_state=False):
216217
'the "adjoint" differentiation method'
217218
)
218219

220+
def adjoint_jacobian(self, tape, starting_state=None, use_device_state=False):
221+
if self.shots is not None:
222+
warn(
223+
"Requested adjoint differentiation to be computed with finite shots."
224+
" The derivative is always exact when using the adjoint differentiation method.",
225+
UserWarning,
226+
)
227+
228+
if len(tape.trainable_params) == 0:
229+
return np.array(0)
230+
231+
# Check adjoint diff support
232+
self.adjoint_diff_support_check(tape)
233+
219234
# Initialization of state
220235
if starting_state is not None:
221236
ket = np.ravel(starting_state)
@@ -248,6 +263,173 @@ def adjoint_jacobian(self, tape, starting_state=None, use_device_state=False):
248263
)
249264
return jac
250265

266+
def vector_jacobian_product(self, tape, dy, starting_state=None, use_device_state=False):
267+
"""Generate the the vector-Jacobian products of a tape.
268+
269+
Args:
270+
tape (.QuantumTape): quantum tape to differentiate
271+
dy (tensor_like): Gradient-output vector. Must have shape
272+
matching the output shape of the corresponding tape.
273+
274+
Keyword Args:
275+
starting_state (tensor_like): post-forward pass state to start execution with. It should be
276+
complex-valued. Takes precedence over ``use_device_state``.
277+
use_device_state (bool): use current device state to initialize. A forward pass of the same
278+
circuit should be the last thing the device has executed. If a ``starting_state`` is
279+
provided, that takes precedence.
280+
281+
Returns:
282+
tuple[array or None, tensor_like or None]: A tuple of the adjoint-jacobian and the Vector-Jacobian
283+
product. Returns ``None`` if the tape has no trainable parameters.
284+
"""
285+
if self.shots is not None:
286+
warn(
287+
"Requested adjoint differentiation to be computed with finite shots."
288+
" The derivative is always exact when using the adjoint differentiation method.",
289+
UserWarning,
290+
)
291+
292+
num_params = len(tape.trainable_params)
293+
294+
if num_params == 0:
295+
return None, None
296+
297+
if math.allclose(dy, 0):
298+
return None, math.convert_like(np.zeros([num_params]), dy)
299+
300+
# Check adjoint diff support
301+
self.adjoint_diff_support_check(tape)
302+
303+
# Initialization of state
304+
if starting_state is not None:
305+
ket = np.ravel(starting_state)
306+
else:
307+
if not use_device_state:
308+
self.reset()
309+
self.execute(tape)
310+
ket = np.ravel(self._pre_rotated_state)
311+
312+
VJP = VectorJacobianProductC128()
313+
314+
obs_serialized = _serialize_obs(tape, self.wire_map)
315+
ops_serialized, use_sp = _serialize_ops(tape, self.wire_map)
316+
317+
ops_serialized = VJP.create_ops_list(*ops_serialized)
318+
319+
trainable_params = sorted(tape.trainable_params)
320+
first_elem = 1 if trainable_params[0] == 0 else 0
321+
322+
tp_shift = (
323+
trainable_params if not use_sp else [i - 1 for i in trainable_params[first_elem:]]
324+
) # exclude first index if explicitly setting sv
325+
326+
jac, vjp = VJP.vjp(
327+
math.reshape(dy, [-1]),
328+
StateVectorC128(ket),
329+
obs_serialized,
330+
ops_serialized,
331+
tp_shift,
332+
tape.num_params,
333+
)
334+
return jac, vjp
335+
336+
def compute_vjp(self, dy, jac, num=None):
337+
"""Convenience function to compute the vector-Jacobian product for a given
338+
vector of gradient outputs and a Jacobian.
339+
340+
Args:
341+
dy (tensor_like): vector of gradient outputs
342+
jac (tensor_like): Jacobian matrix. For an n-dimensional ``dy``
343+
vector, the first n-dimensions of ``jac`` should match
344+
the shape of ``dy``.
345+
346+
Keyword Args:
347+
num (int): The length of the flattened ``dy`` argument. This is an
348+
optional argument, but can be useful to provide if ``dy`` potentially
349+
has no shape (for example, due to tracing or just-in-time compilation).
350+
351+
Returns:
352+
tensor_like: the vector-Jacobian product
353+
"""
354+
if jac is None:
355+
return None
356+
357+
dy_row = math.reshape(dy, [-1])
358+
359+
if num is None:
360+
num = math.shape(dy_row)[0]
361+
362+
if not isinstance(dy_row, np.ndarray):
363+
jac = math.convert_like(jac, dy_row)
364+
365+
jac = math.reshape(jac, [num, -1])
366+
num_params = jac.shape[1]
367+
368+
if math.allclose(dy, 0):
369+
return math.convert_like(np.zeros([num_params]), dy)
370+
371+
VJP = VectorJacobianProductC128()
372+
373+
vjp_tensor = VJP.compute_vjp_from_jac(
374+
math.reshape(jac, [-1]),
375+
dy_row,
376+
num,
377+
num_params,
378+
)
379+
return vjp_tensor
380+
381+
def batch_vjp(
382+
self, tapes, dys, reduction="append", starting_state=None, use_device_state=False
383+
):
384+
"""Generate the the vector-Jacobian products of a batch of tapes.
385+
386+
Args:
387+
tapes (Sequence[.QuantumTape]): sequence of quantum tapes to differentiate
388+
dys (Sequence[tensor_like]): Sequence of gradient-output vectors ``dy``. Must be the
389+
same length as ``tapes``. Each ``dy`` tensor should have shape
390+
matching the output shape of the corresponding tape.
391+
392+
Keyword Args:
393+
reduction (str): Determines how the vector-Jacobian products are returned.
394+
If ``append``, then the output of the function will be of the form
395+
``List[tensor_like]``, with each element corresponding to the VJP of each
396+
input tape. If ``extend``, then the output VJPs will be concatenated.
397+
starting_state (tensor_like): post-forward pass state to start execution with. It should be
398+
complex-valued. Takes precedence over ``use_device_state``.
399+
use_device_state (bool): use current device state to initialize. A forward pass of the same
400+
circuit should be the last thing the device has executed. If a ``starting_state`` is
401+
provided, that takes precedence.
402+
403+
Returns:
404+
tuple[List[array or None], List[tensor_like or None]]: A tuple containing a list
405+
of adjoint-jacobians and a list of vector-Jacobian products. ``None`` elements corresponds
406+
to tapes with no trainable parameters.
407+
"""
408+
vjps = []
409+
jacs = []
410+
411+
# Loop through the tapes and dys vector
412+
for tape, dy in zip(tapes, dys):
413+
jac, vjp = self.vector_jacobian_product(
414+
tape,
415+
dy,
416+
starting_state=starting_state,
417+
use_device_state=use_device_state,
418+
)
419+
if vjp is None:
420+
if reduction == "append":
421+
vjps.append(None)
422+
jacs.append(jac)
423+
continue
424+
if isinstance(reduction, str):
425+
getattr(vjps, reduction)(vjp)
426+
getattr(jacs, reduction)(jac)
427+
elif callable(reduction):
428+
reduction(vjps, vjp)
429+
reduction(jacs, jac)
430+
431+
return jacs, vjps
432+
251433

252434
if not CPP_BINARY_AVAILABLE:
253435

pennylane_lightning/src/algorithms/AdjointDiff.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -761,6 +761,6 @@ template <class T = double> class AdjointJacobian {
761761
}
762762
}
763763
}
764-
};
764+
}; // class AdjointJacobian
765765

766766
} // namespace Pennylane::Algorithms

pennylane_lightning/src/algorithms/CMakeLists.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
project(lightning_algorithms LANGUAGES CXX)
22
set(CMAKE_CXX_STANDARD 17)
33

4-
set(ALGORITHM_FILES AdjointDiff.hpp AdjointDiff.cpp CACHE INTERNAL "" FORCE)
4+
set(ALGORITHM_FILES AdjointDiff.hpp AdjointDiff.cpp JacobianProd.hpp JacobianProd.cpp CACHE INTERNAL "" FORCE)
55
add_library(lightning_algorithms STATIC ${ALGORITHM_FILES})
66

77
target_link_libraries(lightning_algorithms PRIVATE pennylane_lightning_compile_options
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
// Copyright 2021 Xanadu Quantum Technologies Inc.
2+
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#include "JacobianProd.hpp"
16+
17+
// explicit instantiation
18+
template class Pennylane::Algorithms::VectorJacobianProduct<float>;
19+
template class Pennylane::Algorithms::VectorJacobianProduct<double>;

0 commit comments

Comments
 (0)