From da6732fffc4163b2afc443e0140c1711d20bd6cc Mon Sep 17 00:00:00 2001 From: Lukas Devos Date: Tue, 21 Jan 2025 08:57:10 -0500 Subject: [PATCH] Last updates before v0.12 tag (#230) * Add `repeat(::MPO, ...)` * Invert `MPOHamiltonian` definition order * fix environment allocation oopsie * IDMRG1 -> IDMRG * Add missing docstrings * Large docs updates * exclude outdated envs docs * Bump broken KrylovKit version * Fix build * Minor cosmetics * Add/expand docstrings * More cosmetics * Add return signatures to main algorithm docstrings * Fix typo * Fix typos * update KrylovKit version --------- Co-authored-by: leburgel --- Project.toml | 2 +- docs/Project.toml | 2 + docs/make.jl | 6 +- docs/src/man/algorithms.md | 330 ++++++++---------- docs/src/man/environments.md | 28 +- docs/src/man/intro.md | 40 ++- docs/src/man/operators.md | 75 ++-- docs/src/man/states.md | 177 +++++----- src/MPSKit.jl | 2 +- src/algorithms/ED.jl | 10 +- src/algorithms/approximate/approximate.jl | 10 +- src/algorithms/approximate/idmrg.jl | 2 +- src/algorithms/excitation/excitations.jl | 10 +- src/algorithms/fidelity_susceptibility.jl | 17 +- .../groundstate/find_groundstate.jl | 11 +- src/algorithms/groundstate/idmrg.jl | 4 +- src/algorithms/propagator/corvector.jl | 8 +- src/algorithms/statmech/leading_boundary.jl | 11 +- src/algorithms/timestep/time_evolve.jl | 8 +- src/algorithms/timestep/timeevmpo.jl | 11 + src/environments/infinite_envs.jl | 8 +- src/operators/mpo.jl | 1 + src/operators/mpohamiltonian.jl | 12 +- src/utility/multiline.jl | 6 + test/algorithms.jl | 14 +- 25 files changed, 421 insertions(+), 384 deletions(-) diff --git a/Project.toml b/Project.toml index 9354041d6..4536fe32b 100644 --- a/Project.toml +++ b/Project.toml @@ -28,7 +28,7 @@ BlockTensorKit = "0.1.1" Compat = "3.47, 4.10" DocStringExtensions = "0.9.3" HalfIntegers = "1.6.0" -KrylovKit = "0.8.3, 0.9" +KrylovKit = "0.8.3, 0.9.2" LinearAlgebra = "1.6" LoggingExtras = "~1.0" OhMyThreads = "0.7.0" diff --git a/docs/Project.toml b/docs/Project.toml index 83ae9b5ef..4b3710893 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -3,7 +3,9 @@ Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" DocumenterCitations = "daee34ce-89f3-4625-b898-19384cb65244" DocumenterInterLinks = "d12716ef-a0f6-4df4-a9f1-a5a34e75c656" KrylovKit = "0b1a1467-8014-51b9-945f-bf0ae24f4b77" +LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" MPSKit = "bb1c41ca-d63c-52ed-829e-0820dda26502" +MPSKitModels = "ca635005-6f8c-4cd1-b51d-8491250ef2ab" Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" Polynomials = "f27b6e38-b328-58d1-80ce-0feddd5e7a45" Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" diff --git a/docs/make.jl b/docs/make.jl index 982ecb25f..fc9fe248b 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -28,7 +28,8 @@ bib = CitationBibliography(bibpath; style=:authoryear) # interlinks links = InterLinks("TensorKit" => "https://jutho.github.io/TensorKit.jl/stable/", "TensorOperations" => "https://jutho.github.io/TensorOperations.jl/stable/", - "KrylovKit" => "https://jutho.github.io/KrylovKit.jl/stable/") + "KrylovKit" => "https://jutho.github.io/KrylovKit.jl/stable/", + "BlockTensorKit" => "https://lkdvos.github.io/BlockTensorKit.jl/dev/") # include MPSKit in all doctests DocMeta.setdocmeta!(MPSKit, :DocTestSetup, :(using MPSKit, TensorKit); recursive=true) @@ -48,13 +49,14 @@ makedocs(; "man/states.md", "man/operators.md", "man/algorithms.md", - "man/environments.md", + # "man/environments.md", "man/parallelism.md", "man/lattices.md"], "Examples" => "examples/index.md", "Library" => "lib/lib.md", "References" => "references.md"], checkdocs=:exports, + doctest=true, plugins=[bib, links]) deploydocs(; repo="github.com/QuantumKitHub/MPSKit.jl.git", push_preview=true) diff --git a/docs/src/man/algorithms.md b/docs/src/man/algorithms.md index 05fd5965d..378bb1af0 100644 --- a/docs/src/man/algorithms.md +++ b/docs/src/man/algorithms.md @@ -1,160 +1,131 @@ ```@meta -DocTestSetup = :( using MPSKit, TensorKit) +DocTestSetup = :(using MPSKit, TensorKit, MPSKitModels) ``` # [Algorithms](@id um_algorithms) -## Minimizing the energy +Here is a collection of the algorithms that have been added to MPSKit.jl. +If a particular algorithm is missing, feel free to let us know via an issue, or contribute via a PR. -There are a number of different possible energy-minimization algorithms, depending on the -system size. Exclusive to finite systems are +## Groundstates - - DMRG +One of the most prominent use-cases of MPS is to obtain the groundstate of a given (quasi-) one-dimensional quantum Hamiltonian. +In MPSKit.jl, this can be achieved through `find_groundstate`: - - DMRG2 - -Exclusive to infinite systems are - - - IDMRG - - - IDMRG2 - - - VUMPS - -with a last algorithm - GradientGrassmann - implemented for both finite and infinite -systems. +```@docs; canonical=false +find_groundstate +``` -WindowMPS, which is a finite patch of mutable tensors embedded in an infinite MPS, is -handled as a finite system where we only optimize over the patch of mutable tensors. +There is a variety of algorithms that have been developed over the years, and many of them have been implemented in MPSKit. +Keep in mind that some of them are exclusive to finite or infinite systems, while others may work for both. +Many of these algorithms have different advantages and disadvantages, and figuring out the optimal algorithm is not always straightforward, since this may strongly depend on the model. +Here, we enumerate some of their properties in hopes of pointing you in the right direction. ### DMRG -```julia -state = FiniteMPS(20,ℂ^2,ℂ^10); -operator = nonsym_ising_ham(); -(groundstate,environments,delta) = find_groundstate!(state,operator,DMRG()) -``` +Probably the most widely used algorithm for optimizing groundstates with MPS is [`DMRG`](@ref) and its variants. +This algorithm sweeps through the system, optimizing a single site while keeping all others fixed. +Since this local problem can be solved efficiently, the global optimal state follows by alternating through the system. +However, because of the single-site nature of this algorithm, this can never alter the bond dimension of the state, such that there is no way of dynamically increasing the precision. +This can become particularly relevant in the cases where symmetries are involved, since then finding a good distribution of charges is also required. +To circumvent this, it is also possible to optimize over two sites at the same time with [`DMRG2`](@ref), followed by a truncation back to the single site states. +This can dynamically change the bond dimension but comes at an increase in cost. -The DMRG algorithm sweeps through the system, optimizing every site. Because of its -single-site behaviour, this will always keep the bond dimension fixed. If you do want to -increase the bond dimension dynamically, then there are two options. Either you use the -two-site variant of DMRG (DMRG2()), or you make use of the finalize option. Finalize is a -function that gets called at the end of every DMRG iteration. Within that function call one -can modify the state. - -```julia -function my_finalize(iter,state,H,envs) - println("Hello from iteration $iter") - return state,envs; -end - -(groundstate,environments,delta) = find_groundstate!(state,operator,DMRG(finalize = my_finalize)) +```@docs; canonical=false +DMRG +DMRG2 ``` -### DMRG2 +For infinite systems, a similar approach can be used by dynamically adding new sites to the middle of the system and optimizing over them. +This gradually increases the system size until the boundary effects are no longer felt. +However, because of this approach, for critical systems this algorithm can be quite slow to converge, since the number of steps needs to be larger than the correlation length of the system. +Again, both a single-site and a two-site version are implemented, to have the option to dynamically increase the bonddimension at a higher cost. -```julia -state = FiniteMPS(20,ℂ^2,ℂ^10); -operator = nonsym_ising_ham(); -(groundstate,environments,delta) = find_groundstate!(state,operator,DMRG2(trscheme=truncbelow(1e-7))); +```@docs; canonical=false +IDMRG +IDMRG2 ``` -The twosite variant of DMRG, which optimizes blocks of two sites and then decomposes them -into 2 MPS tensors using the svd decomposition. By truncating the singular values up to a -desired precision, one can dynamically grow and shrink the bond dimension as needed. -However, this truncation in turn introduces an error, which is why a state converged with -DMRG2 can often be slightly further converged by subsequently using DMRG. - -### IDMRG - -```julia -state = InfiniteMPS([ℂ^2],[ℂ^10]); -operator = nonsym_ising_ham(); -(groundstate,environments,delta) = find_groundstate(state,operator,IDMRG1()) -``` +### VUMPS -The DMRG algorithm for finite systems can be generalized to infinite MPS. The idea is to -start with a finite system and grow the system size, while we are sweeping through the -system. This is again a single site algorithm, and therefore preserves the initial bond -dimension. +[`VUMPS`](@ref) is an (I)DMRG inspired algorithm that can be used to Variationally find the groundstate as a Uniform (infinite) Matrix Product State. +In particular, a local update is followed by a re-gauging procedure that effectively replaces the entire network with the newly updated tensor. +Compared to IDMRG, this often achieves a higher rate of convergence, since updates are felt throughout the system immediately. +Nevertheless, this algorithm only works whenever the state is injective, i.e. there is a unique ground state. +Since this is a single-site algorithm, this cannot alter the bond dimension. -### IDMRG2 -```julia -state = InfiniteMPS([ℂ^2,ℂ^2],[ℂ^10,ℂ^10]); -operator = repeat(nonsym_ising_ham(),2); -(groundstate,environments,delta) = find_groundstate(state,operator,IDMRG2(trscheme=truncbelow(1e-5))) +```@docs; canonical=false +VUMPS ``` -The generalization of DMRG2 to infinite systems has the same caveats as its finite -counterpart. We furthermore require a unitcell ≥ 2. As a rule of thumb, a truncation cutoff -of 1e-5 is already really good. +### Gradient descent -### VUMPS +Both finite and infinite matrix product states can be parametrized by a set of isometric tensors, +which we can optimize over. +Making use of the geometry of the manifold (a Grassmann manifold), we can greatly outperform naive optimization strategies. +Compared to the other algorithms, quite often the convergence rate in the tail of the optimization procedure is higher, such that often the fastest method combines a different algorithm far from convergence with this algorithm close to convergence. +Since this is again a single-site algorithm, there is no way to alter the bond dimension. -VUMPS is an (I)DMRG inspired algorithm that can be used to find the groundstate of infinite -matrix product states -```julia -state = InfiniteMPS([ℂ^2],[ℂ^10]); -operator = nonsym_ising_ham(); -(groundstate,environments,delta) = find_groundstate(state,operator,VUMPS()) +```@docs; canonical=false +GradientGrassmann ``` -much like DMRG, it cannot modify the bond dimension, and this has to be done manually in the -finalize function. - -### Gradient descent +## Time evolution -Both finite and infinite matrix product states can be parametrized by a set of unitary -matrices, and we can then perform gradient descent on this unitary manifold. Due to some -technical reasons (gauge freedom), this manifold further restricts to a grassmann manifold. +Given a particular state, it can also often be useful to have the ability to examine the evolution of certain properties over time. +To that end, there are two main approaches to solving the Schrödinger equation in MPSKit. -```julia -state = InfiniteMPS([ℂ^2],[ℂ^10]); -operator = nonsym_ising_ham(); -(groundstate,environments,delta) = find_groundstate(state,operator,GradientGrassmann()) +```math +i \hbar \frac{d}{dt} \Psi = H \Psi \implies \Psi(t) = \exp{\left(-iH(t - t_0)\right)} \Psi(t_0) ``` -## Time evolution +```@docs; canonical=false +timestep +time_evolve +make_time_mpo +``` ### TDVP -There is an implementation of the one-site TDVP scheme for finite and infinite MPS: -```julia -(newstate,environments) = timestep(state,operator,dt,TDVP()) -``` +The first is focused around approximately solving the equation for a small timestep, and repeating this until the desired evolution is achieved. +This can be achieved by projecting the equation onto the tangent space of the MPS, and then solving the results. +This procedure is commonly referred to as the [`TDVP`](@ref) algorithm, which again has a two-site variant to allow for dynamically altering the bond dimension. -and the two-site scheme for finite MPS (TDVP2()). Similarly to DMRG, the one site scheme -will preserve the bond dimension, and expansion has to be done manually. +```@docs; canonical=false +TDVP +TDVP2 +``` ### Time evolution MPO -We have rudimentary support for turning an MPO hamiltonian into a time evolution MPO. +The other approach instead tries to first approximately represent the evolution operator, and only then attempts to apply this operator to the initial state. +Typically the first step happens through [`make_time_mpo`](@ref), while the second can be achieved through [`approximate`](@ref). +Here, there are several algorithms available -```julia -make_time_mpo(H,dt,alg::WI) -make_time_mpo(H,dt,alg::WII) +```@docs; canonical=false +WI +WII +TaylorCluster ``` -two algorithms are available, corresponding to different orders of precision. It is possible -to then multiply a state by this MPO, or to approximate (MPO,state) by a new state +## Excitations + +It might also be desirable to obtain information beyond the lowest energy state of a given system, and study the dispersion relation. +While it is typically not feasible to resolve states in the middle of the energy spectrum, there are several ways to target a few of the lowest-lying energy states. -```julia -state = InfiniteMPS([ℂ^2],[ℂ^10]); -operator = nonsym_ising_ham(); -mpo = make_time_mpo(operator, 0.1, WII()); -approximate(state, (mpo, state), VUMPS()) +```@docs; canonical=false +excitations ``` -This feature is at the moment not very well supported. - -## Excitations +```@setup excitations +using TensorKit, MPSKit, MPSKitModels +``` ### Quasiparticle Ansatz The Quasiparticle Ansatz offers an approach to compute low-energy eigenstates in quantum systems, playing a key role in both finite and infinite systems. It leverages localized -perturbations for approximations, as detailed in [this -paper](https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.111.080401). +perturbations for approximations, as detailed in [haegeman2013](@cite). #### Finite Systems: @@ -166,13 +137,11 @@ in the transverse field Ising model, we calculate the first excited state as sho provided code snippet, amd check the accuracy against theoretical values. Some deviations are expected, both due to finite-bond-dimension and finite-size effects. - - -```julia +```@example excitations # Model parameters g = 10.0 L = 16 -H = transverse_field_ising(; g) +H = transverse_field_ising(FiniteChain(L); g) # Finding the ground state ψ₀ = FiniteMPS(L, ℂ^2, ℂ^32) @@ -181,10 +150,6 @@ H = transverse_field_ising(; g) # Computing excitations using the Quasiparticle Ansatz Es, ϕs = excitations(H, QuasiparticleAnsatz(), ψ; num=1) isapprox(Es[1], 2(g - 1); rtol=1e-2) - -# output - -true ``` #### Infinite Systems: @@ -194,9 +159,7 @@ in the unit cell in a plane-wave superposition, requiring momentum specification [Haldane gap](https://iopscience.iop.org/article/10.1088/0953-8984/1/19/001) computation in the Heisenberg model illustrates this approach. - - -```julia +```@example excitations # Setting up the model and momentum momentum = π H = heisenberg_XXX() @@ -208,10 +171,6 @@ H = heisenberg_XXX() # Excitation calculations Es, ϕs = excitations(H, QuasiparticleAnsatz(), momentum, ψ) isapprox(Es[1], 0.41047925; atol=1e-4) - -# output - -true ``` #### Charged excitations: @@ -221,32 +180,41 @@ trivial total charge. However, quasiparticles with different charges can be obta the sector keyword. For instance, in the transverse field Ising model, we consider an excitation built up of flipping a single spin, aligning with `Z2Irrep(1)`. - - -```julia +```@example excitations g = 10.0 L = 16 -H = transverse_field_ising(Z2Irrep; g) +H = transverse_field_ising(Z2Irrep, FiniteChain(L); g) ψ₀ = FiniteMPS(L, Z2Space(0 => 1, 1 => 1), Z2Space(0 => 16, 1 => 16)) ψ, = find_groundstate(ψ₀, H; verbosity=0) Es, ϕs = excitations(H, QuasiparticleAnsatz(), ψ; num=1, sector=Z2Irrep(1)) isapprox(Es[1], 2(g - 1); rtol=1e-2) # infinite analytical result +``` -# output - -true +```@docs; canonical=false +QuasiparticleAnsatz ``` ### Finite excitations For finite systems we can also do something else - find the groundstate of the hamiltonian + -``weight \sum_i | psi_i > < psi_i ``. This is also supported by calling +``\\text{weight} \sum_i | \\psi_i ⟩ ⟨ \\psi_i ``. This is also supported by calling -```julia -th = nonsym_ising_ham() -ts = FiniteMPS(10,ℂ^2,ℂ^12); -(ts,envs,_) = find_groundstate(ts,th,DMRG(verbosity=0)); -(energies,Bs) = excitations(th,FiniteExcited(),ts,envs); +```@example excitations +# Model parameters +g = 10.0 +L = 16 +H = transverse_field_ising(FiniteChain(L); g) + +# Finding the ground state +ψ₀ = FiniteMPS(L, ℂ^2, ℂ^32) +ψ, = find_groundstate(ψ₀, H; verbosity=0) + +Es, ϕs = excitations(H, FiniteExcited(), ψ; num=1) +isapprox(Es[1], 2(g - 1); rtol=1e-2) +``` + +```@docs; canonical=false +FiniteExcited ``` ### "Chepiga Ansatz" @@ -260,52 +228,30 @@ where excitations are distributed throughout the entire system. Consequently, th energy spectrum can be extracted by diagonalizing the effective Hamiltonian (without any additional DMRG costs!). The states of these excitations are then represented by the ground state MPS, with one site substituted by the corresponding eigenvector. This approach is -often referred to as the 'Chepiga ansatz', named after one of the authors of this paper. +often referred to as the 'Chepiga ansatz', named after one of the authors of this paper +[chepiga2017](@cite). This is supported via the following syntax: - - -```julia -g = 1.0 +```@example excitations +g = 10.0 L = 16 -H = transverse_field_ising(; g) +H = transverse_field_ising(FiniteChain(L); g) ψ₀ = FiniteMPS(L, ComplexSpace(2), ComplexSpace(32)) ψ, envs, = find_groundstate(ψ₀, H; verbosity=0) E₀ = real(sum(expectation_value(ψ, H, envs))) Es, ϕs = excitations(H, ChepigaAnsatz(), ψ, envs; num=1) -isapprox(Es[1], 2(g - 1); rtol=1e-2) # infinite analytical result - -# output - -true +isapprox(Es[1] - E₀, 2(g - 1); rtol=1e-2) # infinite analytical result ``` In order to improve the accuracy, a two-site version also exists, which varies two neighbouring sites: - - -```julia -g = 1.0 -L = 16 -H = transverse_field_ising(; g) -ψ₀ = FiniteMPS(L, ComplexSpace(2), ComplexSpace(32)) -ψ, envs, = find_groundstate(ψ₀, H; verbosity=0) -E₀ = real(sum(expectation_value(ψ, H, envs))) +```@example excitations Es, ϕs = excitations(H, ChepigaAnsatz2(), ψ, envs; num=1) -isapprox(Es[1], 2(g - 1); rtol=1e-2) # infinite analytical result - -# output - -true +isapprox(Es[1] - E₀, 2(g - 1); rtol=1e-2) # infinite analytical result ``` -The algorithm is described in more detail in the following paper: - -- Chepiga, N., & Mila, F. (2017). Excitation spectrum and density matrix renormalization - group iterations. Physical Review B, 96(5), 054425. - ## `changebonds` Many of the previously mentioned algorithms do not possess a way to dynamically change to @@ -354,10 +300,10 @@ disadvantages: of both expanding as well as truncating the bond dimension. Here, `trscheme` controls the truncation of the full state after the two-site update. -## leading boundary +## Leading boundary -For statmech partition functions we want to find the approximate leading boundary MPS. Again -this can be done with VUMPS: +For statistical mechanics partition functions we want to find the approximate leading +boundary MPS. Again this can be done with VUMPS: ```julia th = nonsym_ising_mpo() @@ -365,9 +311,13 @@ ts = InfiniteMPS([ℂ^2],[ℂ^20]); (ts,envs,_) = leading_boundary(ts,th,VUMPS(maxiter=400,verbosity=false)); ``` -if the mpo satisfies certain properties (positive and hermitian), it may also be possible to +If the mpo satisfies certain properties (positive and hermitian), it may also be possible to use GradientGrassmann. +```@docs; canonical=false +leading_boundary +``` + ## `approximate` Often, it is useful to approximate a given MPS by another, typically by one of a different @@ -386,8 +336,15 @@ one of the above categories. ### Dynamical DMRG Dynamical DMRG has been described in other papers and is a way to find the propagator. The -basic idea is that to calculate ``G(z) = < V | (H-z)^{-1} | V > `` , one can variationally -find ``(H-z) |W > = | V > `` and then the propagator simply equals ``G(z) = < V | W >``. +basic idea is that to calculate ``G(z) = ⟨ V | (H-z)^{-1} | V ⟩ `` , one can variationally +find ``(H-z) |W ⟩ = | V ⟩ `` and then the propagator simply equals ``G(z) = ⟨ V | W ⟩``. + +```@docs; canonical=false +propagator +DynamicalDMRG +NaiveInvert +Jeckelmann +``` ### fidelity susceptibility @@ -395,27 +352,28 @@ The fidelity susceptibility measures how much the groundstate changes when tunin parameter in your hamiltonian. Divergences occur at phase transitions, making it a valuable measure when no order parameter is known. -```julia -fidelity_susceptibility(groundstate,H_0,perturbing_Hams::AbstractVector) +```@docs; canonical=false +fidelity_susceptibility ``` -### periodic boundary conditions +### Boundary conditions -You can impose periodic boundary conditions on the hamiltonian itself, while still using a -normal OBC finite matrix product states. This is straightforward to implement but -competitive with more advanced PBC MPS algorithms. +You can impose periodic or open boundary conditions on an infinite Hamiltonian, to generate a finite counterpart. +In particular, for periodic boundary conditions we still return an MPO that does not form a closed loop, such that it can be used with regular matrix product states. +This is straightforward to implement but, and while this effectively squares the bond dimension, it is still competitive with more advanced periodic MPS algorithms. -### exact diagonalization +```@docs; canonical=false +open_boundary_conditions +periodic_boundary_conditions +``` + +### Exact diagonalization As a side effect, our code support exact diagonalization. The idea is to construct a finite matrix product state with maximal bond dimension, and then optimize the middle site. Because we never truncated the bond dimension, this single site effectively parametrizes the entire hilbert space. -```julia -exact_diagonalization(periodic_boundary_conditions(su2_xxx_ham(spin=1),10),which=:SR) # find the groundstate on 10 sites -``` - -```@meta -DocTestSetup = nothing +```@docs; canonical=false +exact_diagonalization ``` diff --git a/docs/src/man/environments.md b/docs/src/man/environments.md index 02299fc5b..0b8d9b42f 100644 --- a/docs/src/man/environments.md +++ b/docs/src/man/environments.md @@ -1,34 +1,40 @@ # [Environments](@id um_environments) -In many tensor network algorithms we encounter partially contracted tensor networks. In dmrg for example, one needs to know the sum of all the hamiltonian contributions left and right of the site that we want to optimize. If you then optimize the neighboring site to the right, you only need to add one new contribution to the previous sum of hamiltonian contributions. +In many tensor network algorithms we encounter partially contracted tensor networks. +In DMRG for example, one needs to know the sum of all the hamiltonian contributions left and right of the site that we want to optimize. +If you then optimize the neighboring site to the right, you only need to add one new contribution to the previous sum of hamiltonian contributions. -This kind of information is stored in the environment objects (at the moment called "Cache" in our code, but the name is subject to change). The goal is that the user should preferably never have to deal with the caches, but being aware of the inner workings may allow you to write more efficient code. That is why they are nonetheless included in the manual. +This kind of information is stored in the environment objects. +The goal is that the user should preferably never have to deal with these objects, but being aware of the inner workings may allow you to write more efficient code. +That is why they are nonetheless included in the manual. ## Finite Environments When you create a state and a hamiltonian: ```julia -state = FiniteMPS(rand,ComplexF64,20,ℂ^2,ℂ^10); +state = FiniteMPS(rand, ComplexF64, 20, ℂ^2, ℂ^10); operator = nonsym_ising_ham(); ``` an environment object can be created by calling ```julia -cache = environments(state,operator) +envs = environments(state, operator) ``` The partially contracted mpohamiltonian left of site i can then be queried using: + ```julia -leftenv(cache,i,state) +@time leftenv(envs, i, state) ``` -This may take some time, but a subsequent call to +This may take some time, but a subsequent call should be a lot quicker + ```julia -leftenv(cache,i-1,state) +@time leftenv(envs, i - 1, state) ``` -Should pretty much be free. Behind the scenes the cache stored all tensors it used to calculate leftenv (state.AL[1 .. i]) and when queried again, it checks if the tensors it previously used are identical (using ===). If so, it can simply return the previously stored results. If not, it will recalculate again. If you update a tensor in-place, the caches cannot know using === that the actual tensors have changed. If you do this, you have to call poison!(state,i). +Behind the scenes the `envs` stored all tensors it used to calculate leftenv (state.AL[1 .. i]) and when queried again, it checks if the tensors it previously used are identical (using ===). If so, it can simply return the previously stored results. If not, it will recalculate again. If you update a tensor in-place, the caches cannot know using === that the actual tensors have changed. If you do this, you have to call poison!(state,i). As an optional argument, many algorithms allow you to pass in an environment object, and they also return an updated one. Therefore, for time evolution code, it is more efficient to give it the updated caches every time step, instead of letting it recalculate. @@ -36,9 +42,9 @@ As an optional argument, many algorithms allow you to pass in an environment obj Infinite Environments are very similar : ```julia -state = InfiniteMPS([ℂ^2],[ℂ^10]); -operator = nonsym_ising_ham(); -cache = environments(state,operator) +state = InfiniteMPS(ℂ^2, ℂ^10) +operator = transverse_field_ising() +envs = environments(state, operator) ``` There are also some notable differences. Infinite environments typically require solving linear problems or eigenvalue problems iteratively with finite precision. To find out what precision we used we can type: diff --git a/docs/src/man/intro.md b/docs/src/man/intro.md index acd2de67e..9b74dce2f 100644 --- a/docs/src/man/intro.md +++ b/docs/src/man/intro.md @@ -19,10 +19,11 @@ interpreted as a linear map from its domain to its codomain. Additionally, as ge symmetries are supported, in general the structure of the indices are not just integers, but are given by spaces. -The general syntax for creating a tensor is one of the following equivalent forms: +The general syntax for creating a tensor is similar to the creation of arrays, where the +`axes` or `size` specifiers are replaced with `VectorSpace` objects: ```julia -TensorMap(initializer, scalartype, codomain, domain) -TensorMap(initializer, scalartype, codomain ← domain) # ← is the `\leftarrow` operator +zeros(scalartype, codomain, domain) +rand(scalartype, codomain ← domain) # ← is the `\leftarrow` operator ``` For example, the following creates a random tensor with three legs, each of which has @@ -30,20 +31,23 @@ dimension two, however with different partitions. ```@example tensorkit V1 = ℂ^2 # ℂ is the `\bbC` operator, equivalent to ComplexSpace(10) -t1 = Tensor(rand, Float64, V1 ⊗ V1 ⊗ V1) # all spaces in codomain -t2 = TensorMap(rand, Float64, V1, V1 ⊗ V1) # one space in codomain, two in domain - -try - t1 + t2 # incompatible partition -catch err - println(err) -end - -try - t1 + permute(t2, (1, 2, 3), ()) # incompatible arrows -catch err - println(err) -end +t1 = rand(Float64, V1 ⊗ V1 ⊗ V1) # all spaces in codomain +t2 = rand(Float64, V1, V1 ⊗ V1) # one space in codomain, two in domain +``` + +We can now no longer trivially add them together: + +```@example tensorkit +try #hide +t1 + t2 # incompatible partition +catch err; Base.showerror(stderr, err); end #hide +``` +But this can be resolved by permutation: + +```@example tensorkit +try #hide +t1 + permute(t2, (1, 2, 3), ()) # incompatible arrows +catch err; Base.showerror(stderr, err); end #hide ``` These abstract objects can represent not only plain arrays but also symmetric tensors. The @@ -52,7 +56,7 @@ two. However, now the dimension two is now split over even and odd sectors of ```@example tensorkit V2 = Z2Space(0 => 1, 1 => 1) -t3 = TensorMap(rand, Float64, V2 ⊗ V2, V2) +t3 = rand(Float64, V2 ⊗ V2, V2) ``` For more information, check out the [TensorKit documentation](https://jutho.github.io/TensorKit.jl/stable/)! diff --git a/docs/src/man/operators.md b/docs/src/man/operators.md index 4246f52ab..2a4ef76be 100644 --- a/docs/src/man/operators.md +++ b/docs/src/man/operators.md @@ -8,15 +8,15 @@ represented by a periodic array of MPO tensors. ## FiniteMPO -Starting off with the simplest case, a basic finite MPO is a vector of `MPOTensor` objects. +Starting off with the simplest case, a basic [`FiniteMPO`](@ref) is a vector of `MPOTensor` objects. These objects can be created either directly from a vector of `MPOTensor`s, or starting from -a dense operator (a subtype of `AbstractTensorMap{S,N,N}`), which is then decomposed into a +a dense operator (a subtype of `AbstractTensorMap`), which is then decomposed into a product of local tensors. ![](../assets/mpo.svg) ```@setup operators -using TensorKit, MPSKit +using TensorKit, MPSKit, MPSKitModels ``` ```@example operators @@ -71,6 +71,18 @@ O_xzx_sum * FiniteMPS(3, ℂ^2, ℂ^4) make sure that the virtual spaces do not increase past the maximal virtual space that is dictated by the requirement of being full-rank tensors. +## InfiniteMPO + +This construction can again be extended to the infinite case, where the tensors are repeated periodically. +Therefore, an [`InfiniteMPO`](@ref) is simply a `PeriodicVector` of `MPOTensor` objects. +These can only be constructed from vectors of `MPOTensor`s, since it is impossible to create the infinite operators directly. + +```@example operators +mpo = InfiniteMPO(O_xzx[1:2]) +``` + +Otherwise, their behavior is mostly similar to that of their finite counterparts. + ## FiniteMPOHamiltonian We can also represent quantum Hamiltonians in the same form. This is done by converting a @@ -103,7 +115,7 @@ h = 0.5 chain = fill(ℂ^2, 3) # a finite chain of 4 sites, each with a 2-dimensional Hilbert space single_site_operators = [1 => -h * S_z, 2 => -h * S_z, 3 => -h * S_z] two_site_operators = [(1, 2) => -J * S_x ⊗ S_x, (2, 3) => -J * S_x ⊗ S_x] -H_ising = FiniteMPOHamiltonian(chain, single_site_operators..., two_site_operators...); +H_ising = FiniteMPOHamiltonian(chain, single_site_operators..., two_site_operators...) ``` Various alternative constructions are possible, such as using a `Dict` with key-value pairs @@ -118,8 +130,8 @@ isapprox(H_ising, H_ising′; atol=1e-6) Note that this construction is not limited to nearest-neighbour interactions, or 1D systems. In particular, it is possible to construct quasi-1D realisations of 2D systems, by using -different arrays of `VectorSpace` objects. For example, the 2D Ising model on a square -lattice can be constructed as follows: +different arrays of [`VectorSpace`](@extref TensorKit.VectorSpace) objects. +For example, the 2D Ising model on a square lattice can be constructed as follows: ```@example operators square = fill(ℂ^2, 3, 3) # a 3x3 square lattice @@ -155,6 +167,19 @@ H_ising_2d = FiniteMPOHamiltonian(square, local_operators) + There are various utility functions available for constructing more advanced lattices, for which the [lattices](@ref) section should be consulted. +## InfiniteMPOHamiltonian + +Again, this construction can be extended straightforwardly to the infinite case. +To that end, we simply need to specify all interactions per unit cell. +In particular, an [`InfiniteMPOHamiltonian`](@ref) for the Ising model is obtained via + +```@example operators +J = 1.0 +h = 0.5 +infinite_chain = PeriodicVector([ℂ^2]) # an infinite chain of a local 2-dimensional Hilbert space +H_ising_infinite = InfiniteMPOHamiltonian(infinite_chain, 1 => -h * S_z, (1, 2) => -J * S_x ⊗ S_x) +``` + ### Expert mode The `MPOHamiltonian` constructor is in fact an automated way of constructing the @@ -224,33 +249,14 @@ Vᵣ = [0, 0, 1] expand(Vₗ * prod(Ws) * Vᵣ) ``` -The `FiniteMPOHamiltonian` constructor can also be used to construct the operator from this most -general form, by supplying a 3-dimensional array $W$ to the constructor. Here, the first -dimension specifies the site in the unit cell, the second dimension specifies the row of the -matrix, and the third dimension specifies the column of the matrix. - - - -```julia -data = Array{Any,3}(missing, 1, 3, 3) # missing is interpreted as zero -data[1, 1, 1] = id(ComplexF64, ℂ^2) -data[1, 3, 3] = 1 # regular numbers are interpreted as identity operators -data[1, 1, 2] = -J * S_x -data[1, 2, 3] = S_x -data[1, 1, 3] = -h * S_z -data_range = repeat(data, 4, 1, 1) # make 4 sites long -H_ising″ = FiniteMPOHamiltonian(data_range) -``` +The [`FiniteMPOHamiltonian`](@ref) constructor can also be used to construct the operator from this most +general form, by supplying a vector of [`BlockTensorMap`](@extref BlockTensorKit.BlockTensorMap) objects +to the constructor. Here, the vector specifies the sites in the unit cell, while the blocktensors contain +the rows and columns of the matrix. We can verify this explicitly: -MPSKit will then automatically attach the correct boundary vectors to the Hamiltonian whenever this is required. - -!!! note - While the above example can be constructed from building blocks that are strictly local - operators, i.e. TensorMaps with a single ingoing and outgoing index. - This is not always the case, especially when symmetries are involved. In - those cases, the elements of the matrix $W$ have additional virtual legs that are - contracted between different sites. As such, indexing an MPOHamiltonian object will result - in a TensorMap with four legs. +```@example operators +H_ising[2] # print the blocktensor +``` ### Working with `MPOHamiltonian` objects @@ -263,5 +269,6 @@ available, such as the virtual spaces, or the individual tensors. However, the b structure of the operator means that now the virtual spaces are not just a single space, but a collection (direct sum) of spaces, one for each row/column. - - +```@example operators +left_virtualspace(H_ising, 1), right_virtualspace(H_ising, 1), physicalspace(H_ising, 1) +``` diff --git a/docs/src/man/states.md b/docs/src/man/states.md index 97bb9ecb9..a22b110fd 100644 --- a/docs/src/man/states.md +++ b/docs/src/man/states.md @@ -1,67 +1,86 @@ # [States](@id um_states) +```@setup states +using MPSKit +using TensorKit +using LinearAlgebra: dot +``` ## FiniteMPS -A FiniteMPS is - at its core - a chain of mps tensors. +A [`FiniteMPS`](@ref) is - at its core - a chain of mps tensors. ![](finite_mps_definition.png) ### Usage -A [`FiniteMPS`](@ref) can be created by passing in a vector of tensormaps: +A `FiniteMPS` can be created by passing in a vector of tensormaps: -```julia +```@example states L = 10 data = [rand(ComplexF64, ℂ^1 ⊗ ℂ^2 ← ℂ^1) for _ in 1:L]; -FiniteMPS(data) +state = FiniteMPS(data) ``` Or alternatively by specifying its structure -```julia -max_bond_dimension = ℂ^10 + +```@example states +max_bond_dimension = ℂ^4 physical_space = ℂ^2 -FiniteMPS(rand, ComplexF64, L, physical_space, max_bond_dimension) +state = FiniteMPS(rand, ComplexF64, L, physical_space, max_bond_dimension) ``` You can take dot products, renormalize!, expectation values,.... -### Gauging +### Gauging and canonical forms -There is residual gauge freedom in such a finite mps : +An MPS representation is not unique: for every virtual bond we can insert $C \cdot C^{-1}$ without altering the state. +Then, by redefining the tensors on both sides of the bond to include one factor each, we can change the representation. ![](mps_gauge_freedom.png) -which is often exploited in mps algorithms. The gauging logic is handled behind the scenes, if you call +There are two particularly convenient choices for the gauge at a site, the so-called left and right canonical form. +For the left canonical form, all tensors to the left of a site are gauged such that they become left-isometries. +By convention, we call these tensors `AL`. -```julia -state.AL[3] +```@example states +al = state.AL[3] +al' * al ≈ id(right_virtualspace(al)) ``` -then the state will be gauged such that the third tensor is a left isometry (similarly for state.AR). +Similarly, the right canonical form turns the tensors into right-isometries. +By convention, these are called `AR`. -```julia -state.AC[3] +```@example states +ar = state.AR[3] +repartition(ar, 1, 2) * repartition(ar, 1, 2)' ≈ id(left_virtualspace(ar)) ``` -gauges the state in such a way that all tensors to the left are left isometries, and to the right will be right isometries. -As a result you should have -```julia -norm(state) == norm(state.AC[3]) +It is also possible to mix and match these two forms, where all tensors to the left of a given site are in the left gauge, while all tensors to the right are in the right gauge. +In this case, the final gauge transformation tensor can no longer be absorbed, since that would spoil the gauge either to the left or the right. +This center-gauged tensor is called `C`, which is also the gauge transformation to relate left- and right-gauged tensors. +Finally, for convenience it is also possible to leave a single MPS tensor in the center gauge, which we call `AC = AL * C` + +```@example states +c = state.C[3] # to the right of site 3 +c′ = state.C[2] # to the left of site 3 +al * c ≈ state.AC[3] ≈ repartition(c′ * repartition(ar, 1, 2), 2, 1) ``` -lastly there is also the CR field, with the following property: +These forms are often used throughout MPS algorithms, and the [`FiniteMPS`](@ref) object acts as an automatic manager for this. +It will automatically compute and cache the different forms, and detect when to recompute whenever needed. +For example, in order to compute the overlap of an MPS with itself, we can choose any site and bring that into the center gauge. +Since then both the left and right side simplify to the identity, this simply becomes the overlap of the gauge tensors: -```julia -@tensor a[-1 -2;-3] := state.AL[3][-1 -2;1]*state.C[3][1;-3] -@tensor b[-1 -2;-3] := state.C[2][-1;1]*state.AR[3][1 -2;-3] -a ≈ state.AC[3]; -b ≈ state.AC[3]; +```@example states +d = dot(state, state) +all(c -> dot(c, c) ≈ d, state.C) ``` ### Implementation details -Behind the scenes, a finite mps has 4 fields +Behind the scenes, a `FiniteMPS` has 4 fields + ```julia ALs::Vector{Union{Missing,A}} ARs::Vector{Union{Missing,A}} @@ -69,91 +88,83 @@ ACs::Vector{Union{Missing,A}} Cs::Vector{Union{Missing,B}} ``` -calling `state.AC` returns an "orthoview" instance, which is a very simple dummy object. -When you call get/setindex on an orthoview, it will move the gauge for the underlying state, and return the result. +and calling `AL`, `AR`, `C` or `AC` returns lazy views over these vectors that instantiate the tensors whenever they are requested. +Similarly, changing a tensor will poison the `ARs` to the left of that tensor, and the `ALs` to the right. The idea behind this construction is that one never has to worry about how the state is gauged, as this gets handled automagically. -The following bit of code shows the logic in action: +!!! warning + While a `FiniteMPS` can automatically detect when to recompute the different gauges, this requires that one of the tensors is set using an indexing operation. + In particular, in-place changes to the different tensors will not trigger the recomputation. -```julia -state = FiniteMPS(10, ℂ^2, ℂ^10); # a random initial state -@show ismissing.(state.ALs) # all AL fields are already calculated -@show ismissing.(state.ARs) # all AR fields are missing +## InfiniteMPS -#if we now query state.AC[2], it should calculate and store all AR fields left of position 2 -state.AC[2]; -@show ismissing.(state.ARs) -``` +An [`InfiniteMPS`](@ref) can be thought of as being very similar to a finite mps, where the set of tensors is repeated periodically. -## InfiniteMPS +It can also be created by passing in a vector of `TensorMap`s: + +```@example states +data = [rand(ComplexF64, ℂ^4 ⊗ ℂ^2 ← ℂ^4) for _ in 1:2] +state = InfiniteMPS(data) +``` -An infinite mps can be thought of as a finite mps, where the set of tensors is repeated periodically. +or by initializing it from given spaces -It can be created by passing in a vector of tensormaps: -```julia -data = fill(TensorMap(rand,ComplexF64,ℂ^10*ℂ^2,ℂ^10),2); -InfiniteMPS(data); +```@example states +phys_spaces = fill(ℂ^2, 2) +virt_spaces = [ℂ^4, ℂ^5] # by convention to the right of a site +state = InfiniteMPS(phys_spaces, virt_spaces) ``` -The above code would create an infinite mps with an A-B structure (a 2 site unit cell). +Note that the code above creates an `InfiniteMPS` with a two-site unit cell, where the given virtual spaces are located to the right of their respective sites. -much like a finite mps, we can again query the fields state.AL, state.AR, state.AC and state.C. The implementation is much easier, as they are now just plain fields in the struct +### Gauging and canonical forms + +Much like for `FiniteMPS`, we can again query the gauged tensors `AL`, `AR`, `C` and `AC`. +Here however, the implementation is much easier, since they all have to be recomputed whenever a single tensor changes. +This is a result of periodically repeating the tensors, every `AL` is to the right of the changed site, and every `AR` is to the left. +As a result, the fields are simply ```julia AL::PeriodicArray{A,1} AR::PeriodicArray{A,1} -CR::PeriodicArray{B,1} +C::PeriodicArray{B,1} AC::PeriodicArray{A,1} ``` -The periodic array is an array-like type where all indices are repeated periodically. - ## WindowMPS -WindowMPS is a bit of a mix between an infinite mps and a finite mps. It represents a window of mutable tensors embedded in an infinite mps. +A [`WindowMPS`](@ref) or segment MPS can be seen as a mix between an [`InfiniteMPS`](@ref) and a [`FiniteMPS`](@ref). +It represents a window of mutable tensors (a finite MPS), embedded in an infinite environment (two infinite MPSs). +It can therefore be created accordingly, ensuring that the edges match: -It can be created using: -```julia -mpco = WindowMPS(left_infinite_mps,window_of_tensors,right_infinite_mps) +```@example states +infinite_state = InfiniteMPS(ℂ^2, ℂ^4) +finite_state = FiniteMPS(5, ℂ^2, ℂ^4; left=ℂ^4, right=ℂ^4) +window = WindowMPS(infinite_state, finite_state, infinite_state) ``` -Algorithms will then act on this window of tensors, while leaving the left and right infinite mps's invariant. - -Behind the scenes it uses the same orthoview logic as finitemps. - -## Multiline - -Statistical physics partition functions can be represented by an infinite tensor network which then needs to be contracted. -This is done by finding approximate fixpoint infinite matrix product states. -However, there is no good reason why a single mps should suffice and indeed we find in practice that this can require a nontrivial unit cell in both dimensions. +Algorithms will then act on this window of tensors, while leaving the left and right infinite states invariant. -In other words, the fixpoints can be well described by a set of matrix product states. +## MultilineMPS -Such a set can be created by +A two-dimensional classical partition function can often be represented by an infinite tensor network. +There are many ways to evaluate such a network, but here we focus on the so-called boundary MPS methods. +These first reduce the problem from contracting a two-dimensional network to the contraction of a one-dimensional MPS, by finding the fixed point of the row-to-row (or column-to-column) transfer matrix. +In these cases however, there might be a non-trivial periodicity in both the horizontal as well as vertical direction. +Therefore, in MPSKit they are represented by [`MultilineMPS`](@ref), which are simply a repeating set of [`InfiniteMPS`](@ref). -```julia -data = fill(TensorMap(rand,ComplexF64,ℂ^10*ℂ^2,ℂ^10),2,2); -MultilineMPS(data); +```@example states +state = MultilineMPS(fill(infinite_state, 2)) ``` -MultilineMPS is also used extensively in as of yet unreleased peps code. + +They offer some convenience functionality for using cartesian indexing (row - column): You can access properties by calling -```julia -state.AL[row,collumn] -state.AC[row,collumn] -state.AR[row,collumn] -state.C[row,collumn] +```@example states +row = 2 +col = 2 +al = state.AL[row, col]; ``` -Behind the scenes, we have a type called Multiline, defined as: +These objects are also used extensively in the context of [PEPSKit.jl](https://github.com/QuantumKitHub/PEPSKit.jl). -```julia -struct Multiline{T} - data::PeriodicArray{T,1} -end -``` - -MultilineMPS/MultilineMPO are then defined as -```julia -const MultilineMPS = Multiline{<:InfiniteMPS} -const MultilineMPO = Multiline{<:DenseMPO} diff --git a/src/MPSKit.jl b/src/MPSKit.jl index a83c9e740..1fe569db2 100644 --- a/src/MPSKit.jl +++ b/src/MPSKit.jl @@ -30,7 +30,7 @@ export leftenv, rightenv export find_groundstate, find_groundstate! export leading_boundary export approximate, approximate! -export VUMPS, VOMPS, DMRG, DMRG2, IDMRG1, IDMRG2, GradientGrassmann +export VUMPS, VOMPS, DMRG, DMRG2, IDMRG, IDMRG2, GradientGrassmann export excitations export FiniteExcited, QuasiparticleAnsatz, ChepigaAnsatz, ChepigaAnsatz2 export time_evolve, timestep, timestep!, make_time_mpo diff --git a/src/algorithms/ED.jl b/src/algorithms/ED.jl index 0588fc7c2..585181a23 100644 --- a/src/algorithms/ED.jl +++ b/src/algorithms/ED.jl @@ -1,5 +1,13 @@ """ -Use krylovkit to perform exact diagonalization + exact_diagonalization(opp::FiniteMPOHamiltonian; + sector=first(sectors(oneunit(physicalspace(opp, 1)))), + len::Int=length(opp), num::Int=1, which::Symbol=:SR, + alg=Defaults.alg_eigsolve(; dynamic_tols=false)) + -> vals, state_vecs, convhist + +Use [`KrylovKit.eigsolve`](@extref) to perform exact diagonalization on a +`FiniteMPOHamiltonian` to find its eigenvectors as `FiniteMPS` of maximal rank, essentially +equivalent to dense eigenvectors. """ function exact_diagonalization(opp::FiniteMPOHamiltonian; sector=first(sectors(oneunit(physicalspace(opp, 1)))), diff --git a/src/algorithms/approximate/approximate.jl b/src/algorithms/approximate/approximate.jl index 2ce627d07..e2bd6ee5f 100644 --- a/src/algorithms/approximate/approximate.jl +++ b/src/algorithms/approximate/approximate.jl @@ -1,6 +1,6 @@ @doc """ - approximate(ψ₀, (O, ψ), algorithm, [environments]; kwargs...) - approximate!(ψ₀, (O, ψ), algorithm, [environments]; kwargs...) + approximate(ψ₀, (O, ψ), algorithm, [environments]; kwargs...) -> (ψ, environments) + approximate!(ψ₀, (O, ψ), algorithm, [environments]; kwargs...) -> (ψ, environments) Compute an approximation to the application of an operator `O` to the state `ψ` in the form of an MPS `ψ₀`. @@ -20,7 +20,7 @@ of an MPS `ψ₀`. - `DMRG`: Alternating least square method for maximizing the fidelity with a single-site scheme. - `DMRG2`: Alternating least square method for maximizing the fidelity with a two-site scheme. -- `IDMRG1`: Variant of `DMRG` for maximizing fidelity density in the thermodynamic limit. +- `IDMRG`: Variant of `DMRG` for maximizing fidelity density in the thermodynamic limit. - `IDMRG2`: Variant of `DMRG2` for maximizing fidelity density in the thermodynamic limit. - `VOMPS`: Tangent space method for truncating uniform MPS. """ @@ -40,7 +40,7 @@ function approximate(ψ::InfiniteMPS, end # dispatch to in-place method -function approximate(ψ, toapprox, alg::Union{DMRG,DMRG2,IDMRG1,IDMRG2}, +function approximate(ψ, toapprox, alg::Union{DMRG,DMRG2,IDMRG,IDMRG2}, envs=environments(ψ, toapprox)) return approximate!(copy(ψ), toapprox, alg, envs) end @@ -48,7 +48,7 @@ end # disambiguate function approximate(ψ::InfiniteMPS, toapprox::Tuple{<:InfiniteMPO,<:InfiniteMPS}, - algorithm::Union{IDMRG1,IDMRG2}, + algorithm::Union{IDMRG,IDMRG2}, envs=environments(ψ, toapprox)) envs′ = Multiline([envs]) multi, envs = approximate(convert(MultilineMPS, ψ), diff --git a/src/algorithms/approximate/idmrg.jl b/src/algorithms/approximate/idmrg.jl index ba63230b2..472215629 100644 --- a/src/algorithms/approximate/idmrg.jl +++ b/src/algorithms/approximate/idmrg.jl @@ -1,5 +1,5 @@ function approximate!(ψ::MultilineMPS, toapprox::Tuple{<:MultilineMPO,<:MultilineMPS}, - alg::IDMRG1, envs=environments(ψ, toapprox)) + alg::IDMRG, envs=environments(ψ, toapprox)) log = IterLog("IDMRG") ϵ::Float64 = 2 * alg.tol local iter diff --git a/src/algorithms/excitation/excitations.jl b/src/algorithms/excitation/excitations.jl index 77c1f09a5..2e8855519 100644 --- a/src/algorithms/excitation/excitations.jl +++ b/src/algorithms/excitation/excitations.jl @@ -1,14 +1,14 @@ """ excitations(H, algorithm::QuasiparticleAnsatz, ψ::FiniteQP, [left_environments], - [right_environments]; num=1) + [right_environments]; num=1) -> (energies, states) excitations(H, algorithm::QuasiparticleAnsatz, ψ::InfiniteQP, [left_environments], - [right_environments]; num=1, solver=Defaults.solver) + [right_environments]; num=1, solver=Defaults.solver) -> (energies, states) excitations(H, algorithm::FiniteExcited, ψs::NTuple{<:Any, <:FiniteMPS}; - num=1, init=copy(first(ψs))) + num=1, init=copy(first(ψs))) -> (energies, states) excitations(H, algorithm::ChepigaAnsatz, ψ::FiniteMPS, [envs]; - num=1, pos=length(ψ)÷2) + num=1, pos=length(ψ)÷2) -> (energies, states) excitations(H, algorithm::ChepigaAnsatz2, ψ::FiniteMPS, [envs]; - num=1, pos=length(ψ)÷2) + num=1, pos=length(ψ)÷2) -> (energies, states) Compute the first excited states and their energy gap above a groundstate. diff --git a/src/algorithms/fidelity_susceptibility.jl b/src/algorithms/fidelity_susceptibility.jl index fb8c3b38c..783da61e9 100644 --- a/src/algorithms/fidelity_susceptibility.jl +++ b/src/algorithms/fidelity_susceptibility.jl @@ -1,6 +1,17 @@ -#= -I don't know if I should rescale by system size / unit cell -=# +""" + fidelity_susceptibility(state::Union{FiniteMPS,InfiniteMPS}, H₀::T, + Vs::AbstractVector{T}, [henvs=environments(state, H₀)]; + maxiter=Defaults.maxiter, + tol=Defaults.tol) where {T<:MPOHamiltonian} + +Computes the fidelity susceptibility of a the ground state `state` of a base Hamiltonian +`H₀` with respect to a set of perturbing Hamiltonians `Vs`. Each of the perturbing +Hamiltonians can be interpreted as corresponding to a tuning parameter ``aᵢ`` in a 'total' +Hamiltonian ``H = H₀ + ∑ᵢ aᵢ Vᵢ``. + +Returns a matrix containing the overlaps of the elementary excitations on top of `state` +corresponding to each of the perturbing Hamiltonians. +""" function fidelity_susceptibility(state::Union{FiniteMPS,InfiniteMPS}, H₀::T, Vs::AbstractVector{T}, henvs=environments(state, H₀); maxiter=Defaults.maxiter, diff --git a/src/algorithms/groundstate/find_groundstate.jl b/src/algorithms/groundstate/find_groundstate.jl index 5fd3312c4..de56ddeaa 100644 --- a/src/algorithms/groundstate/find_groundstate.jl +++ b/src/algorithms/groundstate/find_groundstate.jl @@ -1,12 +1,12 @@ """ - find_groundstate(ψ, H, [environments]; kwargs...) - find_groundstate(ψ, H, algorithm, environments) + find_groundstate(ψ₀, H, [environments]; kwargs...) -> (ψ, environments, ϵ) + find_groundstate(ψ₀, H, algorithm, environments) -> (ψ, environments, ϵ) Compute the groundstate for Hamiltonian `H` with initial guess `ψ`. If not specified, an optimization algorithm will be attempted based on the supplied keywords. ## Arguments -- `ψ::AbstractMPS`: initial guess +- `ψ₀::AbstractMPS`: initial guess - `H::AbstractMPO`: operator for which to find the groundstate - `[environments]`: MPS environment manager - `algorithm`: optimization algorithm @@ -15,6 +15,11 @@ optimization algorithm will be attempted based on the supplied keywords. - `tol::Float64`: tolerance for convergence criterium - `maxiter::Int`: maximum amount of iterations - `verbosity::Int`: display progress information + +## Returns +- `ψ::AbstractMPS`: converged groundstate +- `environments`: environments corresponding to the converged state +- `ϵ::Float64`: final convergence error upon terminating the algorithm """ function find_groundstate(ψ::AbstractMPS, H, envs::AbstractMPSEnvironments=environments(ψ, H); diff --git a/src/algorithms/groundstate/idmrg.jl b/src/algorithms/groundstate/idmrg.jl index 4f5eb0933..b60adc9b0 100644 --- a/src/algorithms/groundstate/idmrg.jl +++ b/src/algorithms/groundstate/idmrg.jl @@ -7,7 +7,7 @@ Single site infinite DMRG algorithm for finding the dominant eigenvector. $(TYPEDFIELDS) """ -@kwdef struct IDMRG1{A} <: Algorithm +@kwdef struct IDMRG{A} <: Algorithm "tolerance for convergence criterium" tol::Float64 = Defaults.tol @@ -24,7 +24,7 @@ $(TYPEDFIELDS) alg_eigsolve::A = Defaults.alg_eigsolve() end -function find_groundstate(ost::InfiniteMPS, H, alg::IDMRG1, envs=environments(ost, H)) +function find_groundstate(ost::InfiniteMPS, H, alg::IDMRG, envs=environments(ost, H)) ϵ::Float64 = calc_galerkin(ost, H, ost, envs) ψ = copy(ost) log = IterLog("IDMRG") diff --git a/src/algorithms/propagator/corvector.jl b/src/algorithms/propagator/corvector.jl index 0b778b765..6a0ca6575 100644 --- a/src/algorithms/propagator/corvector.jl +++ b/src/algorithms/propagator/corvector.jl @@ -35,7 +35,7 @@ end """ propagator(ψ₀::AbstractFiniteMPS, z::Number, H::MPOHamiltonian, alg::DynamicalDMRG; init=copy(ψ₀)) -Calculate the propagator ``\\frac{1}{E₀ + z - H}|ψ₀>`` using the dynamical DMRG +Calculate the propagator ``\\frac{1}{E₀ + z - H}|ψ₀⟩`` using the dynamical DMRG algorithm. """ function propagator end @@ -47,11 +47,11 @@ An alternative approach to the dynamical DMRG algorithm, without quadratic terms less controlled approximation. This algorithm minimizes the following cost function ```math -<ψ|(H - E)|ψ> - <ψ|ψ₀> - <ψ₀|ψ> +⟨ψ|(H - E)|ψ⟩ - ⟨ψ|ψ₀⟩ - ⟨ψ₀|ψ⟩ ``` which is equivalent to the original approach if ```math -|ψ₀> = (H - E)|ψ> +|ψ₀⟩ = (H - E)|ψ⟩ ``` See also [`Jeckelmann`](@ref) for the original approach. @@ -105,7 +105,7 @@ $(TYPEDEF) The original flavour of dynamical DMRG, which minimizes the following (quadratic) cost function: ```math -|| (H - E) |ψ₀> - |ψ> || +|| (H - E) |ψ₀⟩ - |ψ⟩ || ``` See also [`NaiveInvert`](@ref) for a less costly but less accurate alternative. diff --git a/src/algorithms/statmech/leading_boundary.jl b/src/algorithms/statmech/leading_boundary.jl index 1fc759dd5..9ca7ebd13 100644 --- a/src/algorithms/statmech/leading_boundary.jl +++ b/src/algorithms/statmech/leading_boundary.jl @@ -1,13 +1,13 @@ @doc """ - leading_boundary(ψ, O, [environments]; kwargs...) - leading_boundary(ψ, O, algorithm, environments) + leading_boundary(ψ₀, O, [environments]; kwargs...) -> (ψ, environments, ϵ) + leading_boundary(ψ₀, O, algorithm, environments) -> (ψ, environments, ϵ) Compute the leading boundary MPS for operator `O` with initial guess `ψ`. If not specified, an optimization algorithm will be attempted based on the supplied keywords. ## Arguments -- `ψ::AbstractMPS`: initial guess +- `ψ₀::AbstractMPS`: initial guess - `O::AbstractMPO`: operator for which to find the leading_boundary - `[environments]`: MPS environment manager - `algorithm`: optimization algorithm @@ -16,6 +16,11 @@ optimization algorithm will be attempted based on the supplied keywords. - `tol::Float64`: tolerance for convergence criterium - `maxiter::Int`: maximum amount of iterations - `verbosity::Int`: display progress information + +## Returns +- `ψ::AbstractMPS`: converged leading boundary MPS +- `environments`: environments corresponding to the converged boundary +- `ϵ::Float64`: final convergence error upon terminating the algorithm """ leading_boundary # TODO: alg selector diff --git a/src/algorithms/timestep/time_evolve.jl b/src/algorithms/timestep/time_evolve.jl index 35d884667..5c0753510 100644 --- a/src/algorithms/timestep/time_evolve.jl +++ b/src/algorithms/timestep/time_evolve.jl @@ -1,6 +1,6 @@ """ - time_evolve(ψ₀, H, t_span, [alg], [envs]; kwargs...) - time_evolve!(ψ₀, H, t_span, [alg], [envs]; kwargs...) + time_evolve(ψ₀, H, t_span, [alg], [envs]; kwargs...) -> (ψ, envs) + time_evolve!(ψ₀, H, t_span, [alg], [envs]; kwargs...) -> (ψ₀, envs) Time-evolve the initial state `ψ₀` with Hamiltonian `H` over a given time span by stepping through each of the time points obtained by iterating t_span. @@ -37,8 +37,8 @@ for (timestep, time_evolve) in zip((:timestep, :timestep!), (:time_evolve, :time end """ - timestep(ψ₀, H, t, dt, [alg], [envs]; kwargs...) - timestep!(ψ₀, H, t, dt, [alg], [envs]; kwargs...) + timestep(ψ₀, H, t, dt, [alg], [envs]; kwargs...) -> (ψ, envs) + timestep!(ψ₀, H, t, dt, [alg], [envs]; kwargs...) -> (ψ₀, envs) Time-step the state `ψ₀` with Hamiltonian `H` over a given time step `dt` at time `t`, solving the Schroedinger equation: ``i ∂ψ/∂t = H ψ``. diff --git a/src/algorithms/timestep/timeevmpo.jl b/src/algorithms/timestep/timeevmpo.jl index 477fc4d41..0467fdc82 100644 --- a/src/algorithms/timestep/timeevmpo.jl +++ b/src/algorithms/timestep/timeevmpo.jl @@ -41,8 +41,19 @@ $(TYPEDFIELDS) compression::Bool = false end +""" + const WI = TaylorCluster(; N=1, extension=false, compression=false) + +First order Taylor expansion for a time-evolution MPO. +""" const WI = TaylorCluster(; N=1, extension=false, compression=false) +@doc """ + make_time_mpo(H::MPOHamiltonian, dt::Number, alg) + +Construct an MPO that approximates ``\\exp(-iHdt)``. +""" make_time_mpo + function make_time_mpo(H::MPOHamiltonian, dt::Number, alg::TaylorCluster; tol=eps(real(scalartype(H)))^(3 / 4)) N = alg.N diff --git a/src/environments/infinite_envs.jl b/src/environments/infinite_envs.jl index f0b0de501..d6ab383e4 100644 --- a/src/environments/infinite_envs.jl +++ b/src/environments/infinite_envs.jl @@ -68,8 +68,8 @@ end function initialize_environments(above::InfiniteMPS, operator::InfiniteMPO, below::InfiniteMPS=above) L = check_length(above, operator, below) - GLs = PeriodicVector([randomize!(allocate_GL(above, operator, below, i)) for i in 1:L]) - GRs = PeriodicVector([randomize!(allocate_GR(above, operator, below, i)) for i in 1:L]) + GLs = PeriodicVector([randomize!(allocate_GL(below, operator, above, i)) for i in 1:L]) + GRs = PeriodicVector([randomize!(allocate_GR(below, operator, above, i)) for i in 1:L]) return GLs, GRs end @@ -113,8 +113,8 @@ end function initialize_environments(above::InfiniteMPS, operator::InfiniteMPOHamiltonian, below::InfiniteMPS=above) L = check_length(above, operator, below) - GLs = PeriodicVector([allocate_GL(above, operator, below, i) for i in 1:L]) - GRs = PeriodicVector([allocate_GR(above, operator, below, i) for i in 1:L]) + GLs = PeriodicVector([allocate_GL(below, operator, above, i) for i in 1:L]) + GRs = PeriodicVector([allocate_GR(below, operator, above, i) for i in 1:L]) # GL = (1, 0, 0) GL = first(GLs) diff --git a/src/operators/mpo.jl b/src/operators/mpo.jl index b27fbcc92..7b4951df6 100644 --- a/src/operators/mpo.jl +++ b/src/operators/mpo.jl @@ -58,6 +58,7 @@ function Base.similar(mpo::MPO, ::Type{O}, L::Int) where {O<:MPOTensor} end Base.repeat(mpo::MPO, n::Int) = MPO(repeat(parent(mpo), n)) +Base.repeat(mpo::MPO, rows::Int, cols::Int) = MultilineMPO(fill(repeat(mpo, cols), rows)) function remove_orphans!(mpo::InfiniteMPO; tol=eps(real(scalartype(mpo)))^(3 / 4)) droptol!.(mpo, tol) diff --git a/src/operators/mpohamiltonian.jl b/src/operators/mpohamiltonian.jl index 564da5b7d..b1747b060 100644 --- a/src/operators/mpohamiltonian.jl +++ b/src/operators/mpohamiltonian.jl @@ -108,7 +108,7 @@ function _find_channel(nonzero_keys; init=2) end function FiniteMPOHamiltonian(lattice::AbstractArray{<:VectorSpace}, - local_operators::Pair...) + local_operators) # initialize vectors for storing the data # TODO: generalize to weird lattice types # nonzero_keys = similar(lattice, Vector{NTuple{2,Int}}) @@ -191,7 +191,7 @@ function FiniteMPOHamiltonian(lattice::AbstractArray{<:VectorSpace}, end function InfiniteMPOHamiltonian(lattice′::AbstractArray{<:VectorSpace}, - local_operators::Pair...) + local_operators) lattice = PeriodicVector(lattice′) # initialize vectors for storing the data # TODO: generalize to weird lattice types @@ -309,13 +309,13 @@ function InfiniteMPOHamiltonian(lattice′::AbstractArray{<:VectorSpace}, end function FiniteMPOHamiltonian(lattice::AbstractArray{<:VectorSpace}, - local_operators::Union{Base.Generator,AbstractDict}) - return FiniteMPOHamiltonian(lattice, local_operators...) + local_operators::Pair...) + return FiniteMPOHamiltonian(lattice, local_operators) end function InfiniteMPOHamiltonian(lattice::AbstractArray{<:VectorSpace}, - local_operators::Union{Base.Generator,AbstractDict}) - return InfiniteMPOHamiltonian(lattice, local_operators...) + local_operators::Pair...) + return InfiniteMPOHamiltonian(lattice, local_operators) end function InfiniteMPOHamiltonian(local_operator::TensorMap{E,S,N,N}) where {E,S,N} diff --git a/src/utility/multiline.jl b/src/utility/multiline.jl index 2b645176f..239c4b03c 100644 --- a/src/utility/multiline.jl +++ b/src/utility/multiline.jl @@ -45,3 +45,9 @@ function Base.circshift(A::Multiline, shifts::Tuple{Int,Int}) end Base.reverse(A::Multiline) = Multiline(reverse(parent(A))) Base.only(A::Multiline) = only(parent(A)) + +function Base.repeat(A::Multiline, rows::Int, cols::Int) + inner = map(Base.Fix2(repeat, cols), A.data) + outer = repeat(inner, rows) + return Multiline(outer) +end diff --git a/test/algorithms.jl b/test/algorithms.jl index b96752383..8d8ad1514 100644 --- a/test/algorithms.jl +++ b/test/algorithms.jl @@ -106,15 +106,15 @@ end @test v < 1e-2 end - @testset "IDMRG1" for unit_cell_size in [1, 3] + @testset "IDMRG" for unit_cell_size in [1, 3] ψ = unit_cell_size == 1 ? InfiniteMPS(ℙ^2, ℙ^D) : repeat(ψ, unit_cell_size) H = repeat(H_ref, unit_cell_size) # test logging ψ, envs, δ = find_groundstate(ψ, H, - IDMRG1(; tol, verbosity=verbosity_full, maxiter=2)) + IDMRG(; tol, verbosity=verbosity_full, maxiter=2)) - ψ, envs, δ = find_groundstate(ψ, H, IDMRG1(; tol, verbosity=verbosity_conv)) + ψ, envs, δ = find_groundstate(ψ, H, IDMRG(; tol, verbosity=verbosity_conv)) v = variance(ψ, H, envs) # test using low variance @@ -275,13 +275,13 @@ end @test abs(dot(ψ₀, ψ)) ≈ 1 atol = atol end - @testset "IDMRG1" begin + @testset "IDMRG" begin # test logging passes ψ, envs, δ = find_groundstate(ψ₀, H_lazy, - IDMRG1(; tol, verbosity=verbosity_full, maxiter=2)) + IDMRG(; tol, verbosity=verbosity_full, maxiter=2)) # compare states - alg = IDMRG1(; tol, verbosity=verbosity_conv, maxiter=300) + alg = IDMRG(; tol, verbosity=verbosity_conv, maxiter=300) ψ, envs, δ = find_groundstate(ψ, H_lazy, alg) @test abs(dot(ψ₀, ψ)) ≈ 1 atol = atol @@ -703,7 +703,7 @@ end ψ1, _ = approximate(ψ, (sW1, ψ), VOMPS(; verbosity)) ψ2, _ = approximate(ψ, (W2, ψ), VOMPS(; verbosity)) - ψ3, _ = approximate(ψ, (W1, ψ), IDMRG1(; verbosity)) + ψ3, _ = approximate(ψ, (W1, ψ), IDMRG(; verbosity)) ψ4, _ = approximate(ψ, (sW2, ψ), IDMRG2(; trscheme=truncdim(20), verbosity)) ψ5, _ = timestep(ψ, H, 0.0, dt, TDVP()) ψ6 = changebonds(W1 * ψ, SvdCut(; trscheme=truncdim(10)))