Skip to content

Commit

Permalink
renanme files, density matrix tests
Browse files Browse the repository at this point in the history
  • Loading branch information
GiggleLiu committed Jun 1, 2022
1 parent 317708b commit 53b34dd
Show file tree
Hide file tree
Showing 12 changed files with 108 additions and 61 deletions.
1 change: 0 additions & 1 deletion .codecov.yml

This file was deleted.

32 changes: 0 additions & 32 deletions .gitlab-ci.yml

This file was deleted.

9 changes: 5 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

CUDA support for [Yao.jl](https://github.com/QuantumBFS/Yao.jl).

**We are in an early-release beta. Expect some adventures and rough edges.**
**Only tested locally, expect some adventures and rough edges.**

## Installation

Expand Down Expand Up @@ -51,7 +51,7 @@ cureg |> relax!(4,1,3) |> cpu
```

Constructors `curand_state`, `cuzero_state`, `cuproduct_state`, `cuuniform_state` and `cughz_state` are tailored for GPU,
they are often faster than uploading a CPU register to CPU.
they are faster than uploading a CPU register to CPU.

## Features
### Supported Gates
Expand All @@ -71,10 +71,11 @@ they are often faster than uploading a CPU register to CPU.
- focus!, relax!
- join
- density_matrix
- fidelity (not including density matrix)
- expect

### Other Operations
- statistic functional diff blocks
- expect for statistic functional
- autodiff is supported when the only parameterized gates are rotation gates in a circuit.

## The Team

Expand Down
11 changes: 11 additions & 0 deletions benchmarks/paralleldot.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
function paralleldot(matrices::CuVector, ptrA, ptrB)
@inline function kernel(ctx, matrices)
inds = @cartesianidx state
i = inds[1]
piecewise(state, inds)[i] *= anyone(i-1, mask) ? d : a
return
end
gpu_call(kernel, state, a, d, mask; elements=length(state))
return state
end

4 changes: 0 additions & 4 deletions bors.toml

This file was deleted.

7 changes: 4 additions & 3 deletions src/CuYao.jl
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,14 @@ import Yao.YaoArrayRegister: u1rows!, unrows!, autostatic, instruct!, swaprows!
import LinearAlgebra: norm
import Base: kron, getindex

export cpu, cu, GPUReg, cuzero_state, cuuniform_state, curand_state, cuproduct_state, cughz_state
export cpu, cu, AbstractCuArrayReg, CuArrayReg, CuBatchedArrayReg, CuDensityMatrix,
cuzero_state, cuuniform_state, curand_state, cuproduct_state, cughz_state

const Ints = NTuple{<:Any, Int}

include("CUDApatch.jl")
include("GPUReg.jl")
include("gpuapplys.jl")
include("register.jl")
include("instructs.jl")

function __init__()
CUDA.allowscalar(false)
Expand Down
2 changes: 1 addition & 1 deletion src/gpuapplys.jl → src/instructs.jl
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ function instruct!(::Val{2}, state::DenseCuVecOrMat, ::Val{:PSWAP}, locs::Tuple{
state
end

function YaoBlocks._apply_fallback!(r::GPUReg{B,T}, b::AbstractBlock) where {B,T}
function YaoBlocks._apply_fallback!(r::AbstractCuArrayReg{B,T}, b::AbstractBlock) where {B,T}
YaoBlocks._check_size(r, b)
r.state .= CUDA.adapt(CuArray{T}, mat(T, b)) * r.state
return r
Expand Down
31 changes: 23 additions & 8 deletions src/GPUReg.jl → src/register.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,10 @@ cu(reg::BatchedArrayReg{D}) where D = BatchedArrayReg{D}(CuArray(reg.state), reg
cpu(reg::BatchedArrayReg{D}) where D = BatchedArrayReg{D}(Array(reg.state), reg.nbatch)
cu(reg::DensityMatrix{D}) where D = DensityMatrix{D}(CuArray(reg.state))
cpu(reg::DensityMatrix{D}) where D = DensityMatrix{D}(Array(reg.state))
const GPUReg{D, T, MT} = AbstractArrayReg{D, T, MT} where MT<:DenseCuArray
const AbstractCuArrayReg{D, T, MT} = AbstractArrayReg{D, T, MT} where MT<:DenseCuArray
const CuArrayReg{D, T, MT} = ArrayReg{D, T, MT} where MT<:DenseCuArray
const CuBatchedArrayReg{D, T, MT} = BatchedArrayReg{D, T, MT} where MT<:DenseCuArray
const CuDensityMatrix{D, T, MT} = DensityMatrix{D, T, MT} where MT<:DenseCuArray

function batch_normalize!(s::DenseCuArray, p::Real=2)
p!=2 && throw(ArgumentError("p must be 2!"))
Expand All @@ -30,7 +33,7 @@ function measure(::ComputationalBasis, reg::BatchedArrayReg{D, T, MT} where MT<:
return _measure(rng, basis(reg), pl |> Matrix, nshots)
end

function measure!(::RemoveMeasured, ::ComputationalBasis, reg::GPUReg{D}, ::AllLocs; rng::AbstractRNG=Random.GLOBAL_RNG) where D
function measure!(::RemoveMeasured, ::ComputationalBasis, reg::AbstractCuArrayReg{D}, ::AllLocs; rng::AbstractRNG=Random.GLOBAL_RNG) where D
regm = reg |> rank3
B = size(regm, 3)
nregm = similar(regm, D ^ nremain(reg), B)
Expand All @@ -51,7 +54,7 @@ function measure!(::RemoveMeasured, ::ComputationalBasis, reg::GPUReg{D}, ::AllL
return reg isa ArrayReg ? Array(res)[] : res
end

function measure!(::NoPostProcess, ::ComputationalBasis, reg::GPUReg{D, T}, ::AllLocs; rng::AbstractRNG=Random.GLOBAL_RNG) where {D, T}
function measure!(::NoPostProcess, ::ComputationalBasis, reg::AbstractCuArrayReg{D, T}, ::AllLocs; rng::AbstractRNG=Random.GLOBAL_RNG) where {D, T}
regm = reg |> rank3
B = size(regm, 3)
pl = dropdims(mapreduce(abs2, +, regm, dims=2), dims=2)
Expand All @@ -74,7 +77,7 @@ end
function YaoArrayRegister.measure!(
::NoPostProcess,
bb::BlockedBasis,
reg::GPUReg{D,T},
reg::AbstractCuArrayReg{D,T},
::AllLocs;
rng::AbstractRNG = Random.GLOBAL_RNG,
) where {D,T}
Expand Down Expand Up @@ -108,7 +111,7 @@ function YaoArrayRegister.measure!(
return reg isa ArrayReg ? bb.values[res_cpu[]] : CuArray(bb.values[res_cpu])
end

function measure!(rst::ResetTo, ::ComputationalBasis, reg::GPUReg{D, T}, ::AllLocs; rng::AbstractRNG=Random.GLOBAL_RNG) where {D, T}
function measure!(rst::ResetTo, ::ComputationalBasis, reg::AbstractCuArrayReg{D, T}, ::AllLocs; rng::AbstractRNG=Random.GLOBAL_RNG) where {D, T}
regm = reg |> rank3
B = size(regm, 3)
pl = dropdims(mapreduce(abs2, +, regm, dims=2), dims=2)
Expand Down Expand Up @@ -170,15 +173,15 @@ function YaoArrayRegister.batched_kron!(C::CuArray{T3, 3}, A::DenseCuArray, B::D
return C
end

function join(reg1::GPUReg{D}, reg2::GPUReg{D}) where {D}
function join(reg1::AbstractCuArrayReg{D}, reg2::AbstractCuArrayReg{D}) where {D}
@assert nbatch(reg1) == nbatch(reg2)
s1 = reg1 |> rank3
s2 = reg2 |> rank3
state = YaoArrayRegister.batched_kron(s1, s2)
return arrayreg(copy(reshape(state, size(state, 1), :)); nlevel=D, nbatch=nbatch(reg1))
end

function Yao.insert_qudits!(reg::GPUReg{D}, loc::Int; nqudits::Int=1) where D
function Yao.insert_qudits!(reg::AbstractCuArrayReg{D}, loc::Int; nqudits::Int=1) where D
na = nactive(reg)
focus!(reg, 1:loc-1)
reg2 = join(zero_state(nqudits; nbatch=nbatch(reg)) |> cu, reg) |> relax! |> focus!((1:na+nqudits)...)
Expand Down Expand Up @@ -262,10 +265,22 @@ end

#=
for FUNC in [:measure!, :measure!]
@eval function $FUNC(rng::AbstractRNG, op::AbstractBlock, reg::GPUReg, al::AllLocs; kwargs...) where B
@eval function $FUNC(rng::AbstractRNG, op::AbstractBlock, reg::AbstractCuArrayReg, al::AllLocs; kwargs...) where B
E, V = eigen!(mat(op) |> Matrix)
ei = Eigen(E|>cu, V|>cu)
$FUNC(rng::AbstractRNG, ei, reg, al; kwargs...)
end
end
=#

function YaoBlocks.expect(op::AbstractBlock, dm::CuDensityMatrix{D}) where D
return tr(apply(ArrayReg{D}(dm.state), op).state)
end

measure(
::ComputationalBasis,
reg::CuDensityMatrix,
::AllLocs;
nshots::Int = 1,
rng::AbstractRNG = Random.GLOBAL_RNG,
) = YaoArrayRegister._measure(rng, basis(reg), Array(reg |> probs), nshots)
52 changes: 52 additions & 0 deletions test/extra.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
using CuYao, Test, CUDA
CUDA.allowscalar(false)

@testset "gradient" begin
reg1 = rand_state(10)
reg2 = rand_state(10)
c = EasyBuild.variational_circuit(10)
g1, g2 = fidelity'(reg1=>c, reg2)
cg1, cg2 = fidelity'(cu(reg1)=>c, cu(reg2))
@test g1.first cpu(cg1.first)
@test g1.second cg1.second
@test g2 cpu(cg2)

h = EasyBuild.heisenberg(10)
g1 = expect'(h, reg1=>c)
cg1 = expect'(h, cu(reg1)=>c)
@test g1.first cpu(cg1.first)
@test g1.second cg1.second
end

@testset "apply density matrix" begin
reg = rand_state(6)
creg = cu(reg)
rho = density_matrix(reg, (3,4))
crho = density_matrix(creg, (3,4))
@test rho cpu(crho)
g = put(2, 1=>Rx(0.3))
@test cpu(apply(crho, g)) apply(rho, g)
rho = density_matrix(reg)
crho = density_matrix(creg)
@test rho cpu(crho)
@test probs(crho) probs(creg)
g = put(6, 1=>Rx(0.3))
@test cpu(apply(crho, g)) apply(rho, g)

# channel
c = UnitaryChannel([put(6, 1=>Rx(0.3)), put(6, 2=>Z)], [0.4, 0.6])
@test cpu(apply(crho, c)) apply(rho, c)
end

@testset "expect on density matrix" begin
reg = rand_state(6)
rho = density_matrix(reg, (3,4,5))
crho = cu(rho)
h = EasyBuild.heisenberg(3)
a = expect(h, rho)
b = expect(h, crho)
@test a b
# fidelity
@test_broken fidelity(crho, crho) 1
@test measure(crho; nshots=2) isa Vector
end
File renamed without changes.
12 changes: 6 additions & 6 deletions test/GPUReg.jl → test/register.jl
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ end
@testset "constructor an measure" begin
reg = rand_state(10)
greg = reg |> cu
@test greg isa GPUReg
@test greg isa AbstractCuArrayReg
@test eltype(greg.state) == ComplexF64
myvec(x) = Vector(x)
myvec(x::Number) = [x]
Expand Down Expand Up @@ -125,14 +125,14 @@ end
@testset "zero_state, et al" begin
for b = [4, NoBatch()]
reg = cuzero_state(3; nbatch=b)
@test cpu(reg) zero_state(3; nbatch=b) && reg isa GPUReg
@test cpu(reg) zero_state(3; nbatch=b) && reg isa AbstractCuArrayReg
reg = curand_state(3; nbatch=b)
@test reg isa GPUReg
@test reg isa AbstractCuArrayReg
reg = cuuniform_state(3; nbatch=b)
@test cpu(reg) uniform_state(3; nbatch=b) && reg isa GPUReg
@test cpu(reg) uniform_state(3; nbatch=b) && reg isa AbstractCuArrayReg
reg = cuproduct_state(bit"110"; nbatch=b)
@test cpu(reg) product_state(bit"110"; nbatch=b) && reg isa GPUReg
@test cpu(reg) product_state(bit"110"; nbatch=b) && reg isa AbstractCuArrayReg
reg = cughz_state(3; nbatch=b)
@test cpu(reg) ghz_state(3; nbatch=b) && reg isa GPUReg
@test cpu(reg) ghz_state(3; nbatch=b) && reg isa AbstractCuArrayReg
end
end
8 changes: 6 additions & 2 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,13 @@ CUDA.allowscalar(false)
end

@testset "GPU reg" begin
include("GPUReg.jl")
include("register.jl")
end

@testset "gpu applies" begin
include("gpuapplys.jl")
include("instructs.jl")
end

@testset "extra" begin
include("extra.jl")
end

0 comments on commit 53b34dd

Please sign in to comment.