Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update LT docs #971

Merged
merged 7 commits into from
Nov 4, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .github/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,9 @@

### Bug fixes

* Fix `qml.state()` support for `lightning.tensor`. The state returned by C++ backend is not normalized anymore to ensure the return state is aligned with `default.qubit`.
[(#971)](https://github.com/PennyLaneAI/pennylane-lightning/pull/971)

* Fix `liblightning_kokkos_catalyst.so` not copied to correct build path for editable installation.
[(#968)](https://github.com/PennyLaneAI/pennylane-lightning/pull/968)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -413,7 +413,7 @@ class TNCudaBase : public TensornetBase<PrecisionT, Derived> {

DataBuffer<CFP_t, int> d_output_tensor(length, getDevTag(), true);

get_accessor_(d_output_tensor.getData(), length, projected_modes,
get_accessor_(d_output_tensor.getData(), projected_modes,
projected_mode_values, numHyperSamples);

d_output_tensor.CopyGpuDataToHost(host_data, length);
Expand All @@ -423,33 +423,30 @@ class TNCudaBase : public TensornetBase<PrecisionT, Derived> {
* @brief Get a slice of the full state tensor.
*
* @param tensor_data Pointer to the device memory for state tensor data.
* @param tensor_data_size Size of the state tensor data.
* @param projected_modes Projected modes to get the state tensor for.
* @param projected_mode_values Values of the projected modes.
* @param numHyperSamples Number of hyper samples to use in the calculation
* and is set to 1 by default.
*/
void get_state_tensor(CFP_t *tensor_data,
const std::size_t tensor_data_size,
const std::vector<int32_t> &projected_modes,
const std::vector<int64_t> &projected_mode_values,
const int32_t numHyperSamples = 1) const {
get_accessor_(tensor_data, tensor_data_size, projected_modes,
projected_mode_values, numHyperSamples);
get_accessor_(tensor_data, projected_modes, projected_mode_values,
numHyperSamples);
}

private:
/**
* @brief Get accessor of a state tensor
*
* @param tensor_data Pointer to the device memory for state tensor data.
* @param tensor_data_size Size of the tensor data.
* @param projected_modes Projected modes to get the state tensor for.
* @param projected_mode_values Values of the projected modes.
* @param numHyperSamples Number of hyper samples to use in the calculation
* and is set to 1 by default.
*/
void get_accessor_(CFP_t *tensor_data, const std::size_t tensor_data_size,
void get_accessor_(CFP_t *tensor_data,
const std::vector<int32_t> &projected_modes,
const std::vector<int64_t> &projected_mode_values,
const int32_t numHyperSamples = 1) const {
Expand Down Expand Up @@ -519,14 +516,6 @@ class TNCudaBase : public TensornetBase<PrecisionT, Derived> {

PL_CUDA_IS_SUCCESS(cudaStreamSynchronize(getDevTag().getStreamID()));

const ComplexT scale_scalar = ComplexT{1.0, 0.0} / stateNorm2;

CFP_t scale_scalar_cu{scale_scalar.real(), scale_scalar.imag()};

scaleC_CUDA<CFP_t, CFP_t>(scale_scalar_cu, tensor_data,
tensor_data_size, getDevTag().getDeviceID(),
getDevTag().getStreamID(), getCublasCaller());

PL_CUTENSORNET_IS_SUCCESS(
cutensornetDestroyWorkspaceDescriptor(workDesc));
PL_CUTENSORNET_IS_SUCCESS(cutensornetDestroyAccessor(accessor));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,9 +130,8 @@ template <class TensorNetT> class MeasurementsTNCuda {
std::vector<int64_t> projectedModeValues(projected_modes.size(), 0);

if (projected_modes.size() == 0) {
tensor_network_.get_state_tensor(d_output_tensor.getData(),
d_output_tensor.getLength(), {},
{}, numHyperSamples);
tensor_network_.get_state_tensor(d_output_tensor.getData(), {}, {},
numHyperSamples);
getProbs_CUDA(d_output_tensor.getData(), d_output_probs.getData(),
length, static_cast<int>(thread_per_block),
tensor_network_.getDevTag().getStreamID());
Expand All @@ -154,7 +153,7 @@ template <class TensorNetT> class MeasurementsTNCuda {
}

tensor_network_.get_state_tensor(
d_output_tensor.getData(), length, projected_modes,
d_output_tensor.getData(), projected_modes,
projectedModeValues, numHyperSamples);

getProbs_CUDA(d_output_tensor.getData(), tmp_probs.getData(),
Expand Down
5 changes: 0 additions & 5 deletions tests/test_gates.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,11 +102,6 @@ def test_gate_unitary_correct(op, op_name):
if wires == 1 and device_name == "lightning.tensor":
pytest.skip("Skipping single wire device on lightning.tensor.")

if op_name == "QubitUnitary" and device_name == "lightning.tensor":
pytest.skip(
"Skipping QubitUnitary on lightning.tensor. It can't be decomposed into 1-wire or 2-wire gates"
)

dev = qml.device(device_name, wires=wires)

@qml.qnode(dev)
Expand Down
Loading