From 35e992dd6768db932b4e6a337acb4a5203101023 Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 15 Feb 2026 18:48:40 +0000 Subject: [PATCH 1/4] Add SDP solver production readiness plan Comprehensive analysis of what's needed to move SDP cone support from experimental to production quality, covering correctness bugs, test coverage, numerical robustness, performance, KKT solver integration, MOI wrapper completeness, and documentation. https://claude.ai/code/session_01PNGZft69CuPURr3qSAt8xe --- SDP_PRODUCTION_PLAN.md | 291 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 291 insertions(+) create mode 100644 SDP_PRODUCTION_PLAN.md diff --git a/SDP_PRODUCTION_PLAN.md b/SDP_PRODUCTION_PLAN.md new file mode 100644 index 0000000..066ff18 --- /dev/null +++ b/SDP_PRODUCTION_PLAN.md @@ -0,0 +1,291 @@ +# SDP Solver: Experimental → Production Plan + +## Current State Assessment + +The SDP implementation in ConicIP.jl is more complete than the documentation +suggests. The core interior-point loop already handles semidefinite cones end +to end: Nesterov-Todd scaling (`nestod_sdc`), line search (`maxstep_sdc`), +cone arithmetic (`dsdc!`/`xsdc!`), and the MOI wrapper accepts +`PositiveSemidefiniteConeTriangle`. The docstring at `src/ConicIP.jl:431` +declares SDP "NOT supported and purely experimental", but the solver does +produce correct results on small problems. + +What is missing falls into six categories: **correctness coverage**, **numerical +robustness**, **performance**, **KKT solver integration**, **MOI wrapper +completeness**, and **documentation**. + +--- + +## Phase 1: Correctness and Test Coverage + +The single existing SDP test (PSD projection, k=6) is not sufficient to ship. + +### 1a. Expand the SDP test suite + +Add tests covering these scenarios (all currently untested): + +| Test case | Why it matters | +|-----------|----------------| +| **Multiple SDP blocks** (`[("S",k1), ("S",k2)]`) | Exercises `Block` with >1 `VecCongurance` entry | +| **Mixed cones** (`[("R",n), ("Q",m), ("S",k)]`) | Verifies block indexing and heterogeneous scaling | +| **SDP + equality constraints** (G, d non-empty) | Equality rows interact with KKT system | +| **Larger matrix sizes** (k=15, k=45, k=100) | Numerical issues surface at scale | +| **Known-optimal SDP instances** (Lovász theta, max-cut relaxation) | Validates objective value to known solutions | +| **Infeasible SDP** | Confirms infeasibility certificate for SDP cones | +| **Unbounded SDP** | Confirms unboundedness certificate | +| **SDP via MOI/JuMP interface** | End-to-end MOI wrapper test (`PositiveSemidefiniteConeTriangle`) | +| **Dual variable recovery** | Checks that MOI dual extraction returns correct matrix | + +**Files:** `test/runtests.jl`, possibly a new `test/sdp_problems.jl` for the +problem generators. + +### 1b. Validate the `vecm`/`mat` round-trip + +Add property-based tests that for random symmetric matrices X: +- `mat(vecm(X)) == X` (exact round-trip) +- `dot(vecm(X), vecm(Y)) ≈ tr(X*Y)` (inner product preservation) +- These should run at sizes n = 2, 5, 10, 20. + +### 1c. Validate Nesterov-Todd scaling identity + +For random strictly feasible (z, s) pairs in the SDP cone, verify: +- `F*z ≈ inv(F)*s` where `F = nestod_sdc(z, s)` +- `F*z ≈ F'*z` (self-adjointness of the scaled point λ) + +--- + +## Phase 2: Numerical Robustness + +### 2a. Guard `nestod_sdc` against non-PD inputs + +`nestod_sdc` (`src/ConicIP.jl:198-212`) calls `cholesky(mat(s))` and +`cholesky(mat(z))` without checking positive definiteness first. If z or s +drifts to the cone boundary (common in late iterations), Cholesky will throw +a `PosDefException`. + +**Fix:** Add a `try/catch` or eigenvalue check. If the Cholesky fails, fall +back to an eigenvalue-based factorization (`eigen` with clamped eigenvalues), +or signal the solver to reduce the step size. This is the most likely source +of runtime failures on real SDP problems. + +### 2b. Guard `maxstep_sdc` against near-singular matrices + +`maxstep_sdc` (`src/ConicIP.jl:274-295`) computes `X^(-1/2)` via +`X^(-1/2)`. When X has eigenvalues near zero, this is numerically +catastrophic. + +**Fix:** Use the eigendecomposition `X = U*Λ*U'`, compute `X^{-1/2} = +U*Λ^{-1/2}*U'` with explicit clamping of small eigenvalues, and symmetrize the +result. This is more expensive but avoids silent NaN propagation. + +### 2c. Symmetrize `xsdc!` output + +`xsdc!` computes `X*Y + Y*X` (`src/ConicIP.jl:357-362`). Due to floating-point +arithmetic, the result may not be perfectly symmetric after `vecm`. Add an +explicit symmetrization step: `M = 0.5*(M + M')` before `vecm`. + +### 2d. Handle the `lyap` call in `dsdc!` + +`dsdc!` (`src/ConicIP.jl:349-355`) uses `lyap(mat(Y), -mat(x))` from +LinearAlgebra. The Lyapunov solver can fail or return inaccurate results when Y +has eigenvalues close to zero or when Y has eigenvalues that nearly sum to zero. +Add a residual check and fall back to a direct solve if needed. + +--- + +## Phase 3: Performance + +These items correspond to issues already identified in `benchmark/report.md` +but are specifically blocking for production SDP at scale. + +### 3a. Fix `VecCongurance` materialization in KKT solvers + +The `lift()` function in `src/kktsolvers.jl:60-105` has specialized paths for +`SymWoodbury` and `Diagonal` blocks, but **no path for `VecCongurance`**. When +an SDP block is present, `lift()` silently produces an incomplete +factorization. The `kktsolver_sparse` and `kktsolver_2x2` solvers fall back to +`sparse(Block)`, which materializes `VecCongurance` column-by-column into a +dense matrix (`src/ConicIP.jl:71-79`) — O(n³) work creating a full n×n dense +matrix for each SDP block. + +**Fix options (in order of increasing complexity):** + +1. **Direct dense embedding:** In `lift()`, add a `VecCongurance` branch that + extracts the dense matrix directly via `Matrix(Blk)` and inserts its + entries into the sparse IJV arrays. This avoids the column-by-column + application but still materializes the block. + +2. **Cholesky-based sparse lift:** Since `VecCongurance(R)` represents + `x → vecm(R'*mat(x)*R)`, we can express this as a sparse Kronecker + product `(R⊗R) * P` where P is the vec↔vecm permutation/scaling matrix. + This gives `lift()` a structured sparse representation without + materializing. + +3. **SDP-specialized KKT solver:** For problems with large SDP blocks, bypass + `lift()` entirely and use a Schur complement approach that works directly + with the `R` matrix from `VecCongurance`. This is the standard approach in + production SDP solvers (SeDuMi, SDPT3, MOSEK). + +Recommendation: Implement option 1 first to fix correctness, then option 3 +for production-scale performance. + +### 3b. Vectorize `mat()` and `vecm()` + +`mat()` and `vecm()` (`src/ConicIP.jl:93-151`) use scalar loops with +conditional branches for the √2 scaling. These are called multiple times per +iteration per SDP block. + +**Fix:** Use index arithmetic to separate diagonal and off-diagonal entries, +then apply the scaling as a single vectorized multiply. This eliminates the +branch and enables SIMD. + +### 3c. Cache eigendecompositions in `maxstep_sdc` + +`maxstep_sdc` is called twice per iteration (predictor and corrector steps) +and computes `eigvals(Symmetric(X))` each time. The matrix X (the current +primal SDP iterate) is the same in both calls within a single iteration. + +**Fix:** Cache the eigendecomposition of the current iterate and reuse it +across the two line search calls. + +### 3d. Reduce allocations in SDP cone operations + +`nestod_sdc` allocates intermediate matrices for `Ls`, `Lz`, `F.U`, `Λ`, and +`R` on every iteration. For a k×k SDP block these are each k×k matrices. + +**Fix:** Pre-allocate workspace buffers for the SDP cone operations and pass +them through the iteration loop. This requires adding a workspace struct to +`conicIP()` and threading it through `nt_scaling`, `maxstep`, etc. + +--- + +## Phase 4: KKT Solver Correctness for SDP + +### 4a. Add `VecCongurance` branch to `lift()` + +As noted in 3a, `lift()` (`src/kktsolvers.jl:60-105`) has no handling for +`VecCongurance` blocks. This means `kktsolver_sparse` produces wrong results +silently when SDP cones are present. + +**This is a correctness bug, not just a performance issue.** It must be fixed +before SDP can be considered production-ready with any KKT solver other than +`kktsolver_qr` (which bypasses `lift()`). + +### 4b. Verify all three KKT solvers produce correct results for SDP + +After fixing `lift()`, add a test that solves the same SDP problem with +`kktsolver_qr`, `kktsolver_sparse`, and `pivot(kktsolver_2x2)`, and verifies +they produce the same optimal solution. + +### 4c. Benchmark KKT solver selection for SDP + +The current `kktsolver` default selection logic may not be optimal for SDP +problems. Profile each solver on SDP problems of varying size and determine +the appropriate crossover points. + +--- + +## Phase 5: MOI Wrapper Completeness + +### 5a. Dual variable extraction for SDP + +The MOI wrapper returns dual variables in vectorized form. For SDP cones, +users typically expect the dual matrix. The wrapper should: +- Correctly map vectorized duals back to `MOI.ConstraintDual` results +- Verify the scaling convention matches what MOI expects (ConicIP uses √2 + off-diagonal scaling; MOI's `PositiveSemidefiniteConeTriangle` expects the + same, but this should be verified) + +### 5b. Support `PositiveSemidefiniteConeSquare` + +MOI also has `PositiveSemidefiniteConeSquare` for full (non-triangular) matrix +representations. Adding this is straightforward: symmetrize the input and +delegate to the triangle path. + +### 5c. Add MOI `Test` suite for SDP + +Run the standard `MOI.Test` regressions with SDP constraints to catch edge +cases in constraint handling, variable bridging, and result queries. + +### 5d. Support quadratic objectives with SDP constraints + +Currently the MOI wrapper only supports `ScalarAffineFunction` and +`VariableIndex` objectives (`src/MOI_wrapper.jl:60-64`). A +`ScalarQuadraticFunction` objective combined with SDP constraints is a valid +problem class (QCQP with SDP relaxation). Verify this works end-to-end or +document the limitation. + +--- + +## Phase 6: Documentation and API + +### 6a. Remove "experimental" labels + +- Update docstring at `src/ConicIP.jl:431-432` +- Update `docs/src/tutorials/sdp.jl` warning banner +- Update any README references + +### 6b. Document the SDP vectorization convention + +The `vecm`/`mat` convention (scaled lower-triangular, √2 on off-diagonals) is +standard (sometimes called "svec") but must be clearly documented since users +constructing problems via the direct API need to know it. + +### 6c. Add an SDP tutorial with a realistic problem + +The existing tutorial (`docs/src/tutorials/sdp.jl`) only shows PSD projection. +Add a more realistic example: Lovász theta number or a max-cut SDP relaxation, +showing both the direct API and the JuMP interface. + +### 6d. Document solver limitations + +Be explicit about: +- Maximum practical SDP block size (determined by Phase 3 benchmarking) +- Which KKT solver to use for SDP problems +- Known numerical sensitivities + +--- + +## Recommended Execution Order + +``` +Priority Phase Item Description +──────── ────── ───── ───────────────────────────────────────── + P0 4a ★ Fix lift() for VecCongurance (correctness bug) + P0 2a ★ Guard nestod_sdc against non-PD inputs + P1 1a Expand SDP test suite + P1 4b Verify all KKT solvers with SDP + P1 2b Guard maxstep_sdc against near-singular X + P1 2c Symmetrize xsdc! output + P2 1b vecm/mat round-trip tests + P2 1c NT scaling identity tests + P2 2d Guard lyap in dsdc! + P2 5a MOI dual extraction for SDP + P3 3a VecCongurance materialization (performance) + P3 3b Vectorize mat()/vecm() + P3 3c Cache eigendecompositions + P3 3d Reduce allocations + P3 5b PositiveSemidefiniteConeSquare + P4 5c MOI Test suite for SDP + P4 5d Quadratic + SDP verification + P4 4c KKT solver benchmarking for SDP + P4 6a-6d Documentation updates +``` + +P0 items are **blocking correctness bugs** that will cause wrong answers or +crashes on non-trivial SDP problems. P1 items are needed to have confidence +the solver works. P2 items improve robustness. P3 items address performance +for larger problems. P4 items are polish for a public release. + +--- + +## Estimated Scope + +- **P0 (blocking bugs):** 2 items, touching `kktsolvers.jl` and `ConicIP.jl` +- **P1 (must-have):** 4 items, primarily new tests + defensive guards +- **P2 (robustness):** 4 items, scattered across solver core and MOI wrapper +- **P3 (performance):** 4 items, primarily `ConicIP.jl` and `kktsolvers.jl` +- **P4 (polish):** 5 items, docs + MOI test suite + +The P0+P1 items represent the minimum viable path to removing the +"experimental" label. From 2aa6400f88343e2c5fd631c2b86035d761ff5d53 Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 15 Feb 2026 19:53:30 +0000 Subject: [PATCH 2/4] Add dev container configuration with Julia 1.10 https://claude.ai/code/session_01PNGZft69CuPURr3qSAt8xe --- .devcontainer/devcontainer.json | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 .devcontainer/devcontainer.json diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..d41789d --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,12 @@ +{ + "name": "ConicIP.jl", + "image": "julia:1.10", + "postCreateCommand": "julia --project -e 'using Pkg; Pkg.instantiate()'", + "customizations": { + "vscode": { + "extensions": [ + "julialang.language-julia" + ] + } + } +} From b428825aeeb92561a787fb3a015aaa9607cbef09 Mon Sep 17 00:00:00 2001 From: Claude Date: Sun, 15 Feb 2026 19:53:37 +0000 Subject: [PATCH 3/4] Add SDP solver test suite with problem generators and evaluation harness https://claude.ai/code/session_01PNGZft69CuPURr3qSAt8xe --- test/runtests.jl | 2 + test/sdp_harness.jl | 272 ++++++++++++++++++++ test/sdp_problems.jl | 420 +++++++++++++++++++++++++++++++ test/sdp_tests.jl | 583 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 1277 insertions(+) create mode 100644 test/sdp_harness.jl create mode 100644 test/sdp_problems.jl create mode 100644 test/sdp_tests.jl diff --git a/test/runtests.jl b/test/runtests.jl index cd23cd4..9a02a25 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -551,6 +551,8 @@ end end + include("sdp_tests.jl") + @testset "SOC Cone (direct API)" begin Random.seed!(0) diff --git a/test/sdp_harness.jl b/test/sdp_harness.jl new file mode 100644 index 0000000..849f40e --- /dev/null +++ b/test/sdp_harness.jl @@ -0,0 +1,272 @@ +# ────────────────────────────────────────────────────────────── +# SDP Evaluation Harness +# +# Structured framework for running SDP problems through the +# solver, collecting detailed diagnostics, and producing a +# summary report. +# ────────────────────────────────────────────────────────────── + +using Printf + +""" + SDPResult + +Collected diagnostics from a single SDP solve attempt. +""" +struct SDPResult + description :: String + kktsolver :: String + status :: Symbol # solver status + iterations :: Int + solve_time :: Float64 # seconds + alloc_bytes :: Int64 + primal_feas :: Float64 + dual_feas :: Float64 + mu_feas :: Float64 + pobj :: Float64 + dobj :: Float64 + # Validation against known answers + obj_error :: Union{Float64, Nothing} # |pobj - known_obj| if known + X_error :: Union{Float64, Nothing} # ‖X - known_X‖∞ if known + status_match :: Bool # solver status == known_status + passed :: Bool # all checks passed + error_msg :: Union{String, Nothing} # exception message if solver threw +end + +""" + sdp_solve(prob; kktsolver, optTol, verbose, tol) + +Run a single SDP problem through conicIP and validate results. +Returns an `SDPResult` with full diagnostics. +""" +function sdp_solve(prob; + kktsolver = ConicIP.kktsolver_qr, + optTol = 1e-7, + verbose = false, + tol = 1e-3, + maxIters = 100) + + ks_name = _kktsolver_name(kktsolver) + + local sol + local t, bytes + error_msg = nothing + + try + stats = @timed begin + if isempty(prob.G) || size(prob.G, 1) == 0 + conicIP(prob.Q, prob.c, prob.A, prob.b, prob.cone_dims; + kktsolver=kktsolver, optTol=optTol, + verbose=verbose, maxIters=maxIters) + else + conicIP(prob.Q, prob.c, prob.A, prob.b, prob.cone_dims, + prob.G, prob.d; + kktsolver=kktsolver, optTol=optTol, + verbose=verbose, maxIters=maxIters) + end + end + sol = stats.value + t = stats.time + bytes = stats.bytes + catch ex + return SDPResult( + prob.description, ks_name, + :Error, 0, 0.0, 0, + NaN, NaN, NaN, NaN, NaN, + nothing, nothing, + false, false, + sprint(showerror, ex)) + end + + # Validate status + status_match = (sol.status == prob.known_status) + + # Validate objective + obj_error = nothing + if prob.known_obj !== nothing && sol.status == :Optimal + obj_error = abs(sol.pobj - prob.known_obj) + end + + # Validate primal solution (PSD matrix) + X_error = nothing + if prob.known_X !== nothing && sol.status == :Optimal + # Extract PSD solution from dual variable v + # For a pure SDP with A=I, b=0: the variable v contains the SDP iterate + # The primal y is the decision variable; the cone variable is in sol.v + # But the standard test uses sol.y projected through mat() + X_sol = _extract_sdp_matrix(sol, prob) + if X_sol !== nothing + X_error = norm(X_sol - prob.known_X, Inf) + end + end + + # Overall pass/fail + passed = status_match + if obj_error !== nothing + passed = passed && (obj_error < tol) + end + if X_error !== nothing + passed = passed && (X_error < tol) + end + + return SDPResult( + prob.description, ks_name, + sol.status, sol.Iter, t, bytes, + sol.prFeas, sol.duFeas, sol.muFeas, sol.pobj, sol.dobj, + obj_error, X_error, + status_match, passed, + error_msg) +end + +""" + _extract_sdp_matrix(sol, prob) + +Extract the PSD solution matrix from the solver output. +For problems where A=I and there is a single SDP block, +the dual variable `v` or the relationship `y = A\\(v + b)` gives us +the SDP iterate. +""" +function _extract_sdp_matrix(sol, prob) + # For projection problems (Q=I, A=I, b=0), sol.y IS the vectorized solution + # Check if there's a single SDP cone + sdp_cones = filter(c -> c[1] == "S", prob.cone_dims) + if length(sdp_cones) != 1 + return nothing + end + k = sdp_cones[1][2] + # Find offset of SDP cone in the variable vector + offset = 0 + for (ctype, cdim) in prob.cone_dims + if ctype == "S" + break + end + offset += cdim + end + # The primal iterate y gives us the SDP variable for QP formulations + # For LP formulations (Q=0), the dual v gives the cone iterate + if nnz(sparse(prob.Q)) > 0 + # QP: y is the primal, and for projection problems y is the answer + x_vec = sol.y[offset+1:offset+k] + else + # LP: the cone variable is in v + x_vec = sol.v[offset+1:offset+k] + end + return ConicIP.mat(x_vec) +end + +""" + _kktsolver_name(ks) + +Human-readable name for a KKT solver. +""" +function _kktsolver_name(ks) + if ks === ConicIP.kktsolver_qr + return "qr" + elseif ks === ConicIP.kktsolver_sparse + return "sparse" + else + return "pivot(2x2)" + end +end + +""" + run_sdp_harness(problems; kktsolver, optTol, verbose, tol) + +Run a collection of SDP problems and return a vector of `SDPResult`s. +""" +function run_sdp_harness(problems; + kktsolver = ConicIP.kktsolver_qr, + optTol = 1e-7, + verbose = false, + tol = 1e-3, + maxIters = 100) + + results = SDPResult[] + for prob in problems + r = sdp_solve(prob; kktsolver=kktsolver, optTol=optTol, + verbose=verbose, tol=tol, maxIters=maxIters) + push!(results, r) + end + return results +end + +""" + run_sdp_harness_all_solvers(problems; optTol, verbose, tol) + +Run all problems across all three KKT solvers. Returns a vector of SDPResults. +""" +function run_sdp_harness_all_solvers(problems; + optTol = 1e-7, + verbose = false, + tol = 1e-3, + maxIters = 100) + + solvers = [ + ConicIP.kktsolver_qr, + ConicIP.kktsolver_sparse, + pivot(ConicIP.kktsolver_2x2), + ] + results = SDPResult[] + for ks in solvers + for prob in problems + r = sdp_solve(prob; kktsolver=ks, optTol=optTol, + verbose=verbose, tol=tol, maxIters=maxIters) + push!(results, r) + end + end + return results +end + +""" + print_sdp_report(results; io=stdout) + +Print a formatted summary table of SDP evaluation results. +""" +function print_sdp_report(results; io=stdout) + println(io) + println(io, "=" ^ 120) + println(io, " SDP Evaluation Report") + println(io, "=" ^ 120) + @printf(io, " %-45s │ %-10s │ %-9s │ %4s │ %8s │ %8s │ %8s │ %6s\n", + "Problem", "KKT", "Status", "Iter", "ObjErr", "XErr", "Time(s)", "Pass") + println(io, "─" ^ 120) + + n_pass = 0 + n_fail = 0 + n_error = 0 + + for r in results + obj_str = r.obj_error === nothing ? " — " : @sprintf("%8.1e", r.obj_error) + X_str = r.X_error === nothing ? " — " : @sprintf("%8.1e", r.X_error) + pass_str = r.passed ? " OK" : (r.status == :Error ? " ERR" : "FAIL") + + @printf(io, " %-45s │ %-10s │ %-9s │ %4d │ %s │ %s │ %8.4f │ %4s\n", + first(r.description, 45), + r.kktsolver, + r.status, + r.iterations, + obj_str, X_str, + r.solve_time, + pass_str) + + if r.passed; n_pass += 1 + elseif r.status == :Error; n_error += 1 + else n_fail += 1 + end + + if r.error_msg !== nothing + println(io, " └─ ERROR: ", first(r.error_msg, 90)) + end + end + + println(io, "─" ^ 120) + @printf(io, " Total: %d | Passed: %d | Failed: %d | Errors: %d\n", + length(results), n_pass, n_fail, n_error) + println(io, "=" ^ 120) + println(io) + + return (total=length(results), passed=n_pass, failed=n_fail, errors=n_error) +end + +# Utility to truncate strings +first(s::AbstractString, n::Int) = length(s) <= n ? rpad(s, n) : s[1:n-1] * "…" diff --git a/test/sdp_problems.jl b/test/sdp_problems.jl new file mode 100644 index 0000000..0d2d665 --- /dev/null +++ b/test/sdp_problems.jl @@ -0,0 +1,420 @@ +# ────────────────────────────────────────────────────────────── +# SDP Problem Generators +# +# Each generator returns a NamedTuple with fields: +# Q, c, A, b, cone_dims — required (direct API) +# G, d — optional equality constraints +# known_status — expected :Optimal / :Infeasible / :Unbounded +# known_obj — expected objective (nothing if N/A) +# known_X — expected primal PSD matrix (nothing if N/A) +# description — human-readable description +# ────────────────────────────────────────────────────────────── + +using LinearAlgebra, SparseArrays + +# Helpers +_vecm(Z) = ConicIP.vecm(Z) +_mat(x) = ConicIP.mat(x) +_vdim(n) = div(n * (n + 1), 2) # vectorized dimension for n×n matrix + +""" + sdp_psd_projection(; n=6) + +Project a diagonal matrix with mixed signs onto the PSD cone. +Known solution: clip negative eigenvalues to zero. +""" +function sdp_psd_projection(; n=6) + eigs = vcat(ones(div(n, 2)), -ones(n - div(n, 2))) + target = diagm(0 => eigs) + expected = diagm(0 => max.(eigs, 0.0)) + + k = _vdim(n) + Q = Matrix{Float64}(I, k, k) + c = reshape(_vecm(target), :, 1) + A = sparse(1.0I, k, k) + b = zeros(k, 1) + + return (Q=Q, c=c, A=A, b=b, cone_dims=[("S", k)], + G=spzeros(0, k), d=zeros(0, 1), + known_status=:Optimal, known_obj=nothing, + known_X=expected, + description="PSD projection (n=$n): clip negative eigenvalues") +end + +""" + sdp_trace_minimization(; n=4) + +Minimize tr(X) subject to X ≽ C where C is a fixed PSD matrix. +Known solution: X* = C, obj = tr(C). +""" +function sdp_trace_minimization(; n=4) + # C = random PSD matrix + Random.seed!(42) + M = randn(n, n) + C = M' * M / n # well-conditioned PSD + + k = _vdim(n) + + # min tr(X) = min vecm(I)' * x subject to x ≽ vecm(C) + # In ConicIP form: min ½x'Qx - c'x with Q=0 + # Cone constraint: A*y ≥ b → I*y ≥ vecm(C) + # But ConicIP minimizes ½y'Qy - c'y, so to minimize vecm(I)'y + # we set c = -vecm(I) (since it becomes -(-vecm(I))'y = vecm(I)'y) + Q = spzeros(k, k) + c_vec = -reshape(_vecm(Matrix{Float64}(I, n, n)), :, 1) + A = sparse(1.0I, k, k) + b = reshape(_vecm(C), :, 1) + + return (Q=Q, c=c_vec, A=A, b=b, cone_dims=[("S", k)], + G=spzeros(0, k), d=zeros(0, 1), + known_status=:Optimal, known_obj=tr(C), + known_X=C, + description="Trace minimization: min tr(X) s.t. X ≽ C (n=$n)") +end + +""" + sdp_max_cut_relaxation(; n=5, seed=123) + +Goemans-Williamson max-cut SDP relaxation for a random graph. + max ¼ tr(L * X) s.t. diag(X) = 1, X ≽ 0 +where L is the graph Laplacian. + +Reformulated for ConicIP's minimization form. +""" +function sdp_max_cut_relaxation(; n=5, seed=123) + Random.seed!(seed) + + # Random adjacency matrix (symmetric, no self-loops) + W = zeros(n, n) + for i = 1:n, j = i+1:n + w = rand() > 0.4 ? rand() : 0.0 + W[i, j] = w + W[j, i] = w + end + L = diagm(0 => vec(sum(W, dims=2))) - W + + k = _vdim(n) + + # Maximize ¼ tr(L*X) → minimize -¼ tr(L*X) = -¼ vecm(L)'x + # ConicIP: min ½y'Qy - c'y → c = ¼ vecm(L) (negated in the objective) + Q = spzeros(k, k) + c_vec = reshape(0.25 * _vecm(L), :, 1) + + # Constraint: diag(X) = 1 → n equality constraints + # Extract diagonal entries from vectorized form + G = spzeros(n, k) + d = ones(n, 1) + for i = 1:n + # Position of diagonal entry (i,i) in vecm ordering + # vecm uses row-major upper triangular: entries for row i start at + # position sum_{r=1}^{i-1}(n-r+1) + 1 = i + (i-1)*(2n-i)/2 + pos = round(Int, k - (n - i + 2) * (n - i + 1) / 2 + 1) + G[i, pos] = 1.0 + end + + # X ≽ 0: cone constraint + A = sparse(1.0I, k, k) + b = zeros(k, 1) + + return (Q=Q, c=c_vec, A=A, b=b, cone_dims=[("S", k)], + G=G, d=d, + known_status=:Optimal, known_obj=nothing, + known_X=nothing, + description="Max-cut SDP relaxation (n=$n)") +end + +""" + sdp_lovasz_theta(; n=5, seed=456) + +Lovász theta number of a random graph. + max tr(J * X) s.t. tr(X) = 1, X_ij = 0 for (i,j) ∈ E, X ≽ 0 +where J is the all-ones matrix. + +The theta number upper-bounds the independence number. +""" +function sdp_lovasz_theta(; n=5, seed=456) + Random.seed!(seed) + + # Random graph with ~40% edge density + edges = Tuple{Int,Int}[] + for i = 1:n, j = i+1:n + if rand() < 0.4 + push!(edges, (i, j)) + end + end + + k = _vdim(n) + + # Maximize tr(J*X) → minimize -tr(J*X) = -vecm(J)'x + # ConicIP: c = vecm(J) + J = ones(n, n) + Q = spzeros(k, k) + c_vec = reshape(_vecm(J), :, 1) + + # Equality constraints: + # 1. tr(X) = 1 + # 2. X_ij = 0 for each edge (i,j) + n_eq = 1 + length(edges) + G = spzeros(n_eq, k) + d = zeros(n_eq, 1) + + # tr(X) = 1: sum of diagonal entries + I_mat = Matrix{Float64}(I, n, n) + G[1, :] = _vecm(I_mat)' + d[1] = 1.0 + + # X_ij = 0 for each edge + for (idx, (i, j)) in enumerate(edges) + E_ij = zeros(n, n) + E_ij[i, j] = 1.0 + E_ij[j, i] = 1.0 + G[1 + idx, :] = _vecm(E_ij)' + d[1 + idx] = 0.0 + end + + A = sparse(1.0I, k, k) + b = zeros(k, 1) + + return (Q=Q, c=c_vec, A=A, b=b, cone_dims=[("S", k)], + G=sparse(G), d=d, + known_status=:Optimal, known_obj=nothing, + known_X=nothing, + description="Lovász theta (n=$n, |E|=$(length(edges)))") +end + +""" + sdp_nearest_correlation(; n=5, seed=789) + +Find the nearest correlation matrix (PSD + unit diagonal) to a given +symmetric matrix. + + min ‖X - C‖²_F s.t. diag(X) = 1, X ≽ 0 + +This is a convex QP with SDP constraint. +""" +function sdp_nearest_correlation(; n=5, seed=789) + Random.seed!(seed) + + # Create a symmetric matrix that is NOT a correlation matrix + M = randn(n, n) + C = (M + M') / 2 + # Normalize to have unit diagonal + D = diagm(0 => 1.0 ./ sqrt.(abs.(diag(C)) .+ 1.0)) + C = D * C * D + # Perturb to make it non-PSD + C = C - 0.5I + + k = _vdim(n) + + # min ½‖x - vecm(C)‖² = ½x'Ix - vecm(C)'x + const + Q = Matrix{Float64}(I, k, k) + c_vec = reshape(_vecm(C), :, 1) + + # diag(X) = 1: equality constraints + G = spzeros(n, k) + d = ones(n, 1) + for i = 1:n + pos = round(Int, k - (n - i + 2) * (n - i + 1) / 2 + 1) + G[i, pos] = 1.0 + end + + A = sparse(1.0I, k, k) + b = zeros(k, 1) + + return (Q=Q, c=c_vec, A=A, b=b, cone_dims=[("S", k)], + G=G, d=d, + known_status=:Optimal, known_obj=nothing, + known_X=nothing, + description="Nearest correlation matrix (n=$n)") +end + +""" + sdp_multiple_blocks(; n1=3, n2=4) + +Problem with two SDP blocks: min tr(X₁) + tr(X₂) s.t. X₁ ≽ C₁, X₂ ≽ C₂. +Tests that the Block type correctly handles multiple VecCongurance entries. +""" +function sdp_multiple_blocks(; n1=3, n2=4) + Random.seed!(99) + M1 = randn(n1, n1); C1 = M1' * M1 / n1 + M2 = randn(n2, n2); C2 = M2' * M2 / n2 + + k1 = _vdim(n1) + k2 = _vdim(n2) + k = k1 + k2 + + Q = spzeros(k, k) + I1_vec = _vecm(Matrix{Float64}(I, n1, n1)) + I2_vec = _vecm(Matrix{Float64}(I, n2, n2)) + c_vec = -vcat(reshape(I1_vec, :, 1), reshape(I2_vec, :, 1)) + + A = sparse(1.0I, k, k) + b = vcat(reshape(_vecm(C1), :, 1), reshape(_vecm(C2), :, 1)) + + return (Q=Q, c=c_vec, A=A, b=b, + cone_dims=[("S", k1), ("S", k2)], + G=spzeros(0, k), d=zeros(0, 1), + known_status=:Optimal, known_obj=tr(C1) + tr(C2), + known_X=nothing, + description="Multiple SDP blocks (n1=$n1, n2=$n2)") +end + +""" + sdp_mixed_cones(; n_r=4, n_q=3, n_s=3) + +Problem with R+, SOC, and SDP cones simultaneously. +min ½‖y‖² - c'y s.t. y_R ≥ 0, y_Q ∈ SOC, y_S ∈ PSD + +Tests heterogeneous Block with Diagonal, SymWoodbury, and VecCongurance. +""" +function sdp_mixed_cones(; n_r=4, n_q=3, n_s=3) + Random.seed!(77) + k_s = _vdim(n_s) + n = n_r + (n_q + 1) + k_s # total variables + + Q = sparse(1.0I, n, n) + c_target = randn(n, 1) + c = Q * c_target # so optimal is projection of c_target onto feasible set + + # A = I, b = 0 — project onto cone product + A = sparse(1.0I, n, n) + b = zeros(n, 1) + + cone_dims = [("R", n_r), ("Q", n_q + 1), ("S", k_s)] + + return (Q=Q, c=c, A=A, b=b, cone_dims=cone_dims, + G=spzeros(0, n), d=zeros(0, 1), + known_status=:Optimal, known_obj=nothing, + known_X=nothing, + description="Mixed cones: R($n_r) + Q($(n_q+1)) + S($k_s)") +end + +""" + sdp_with_equality(; n=4) + +SDP with equality constraints: min tr(C*X) s.t. tr(Aᵢ*X) = bᵢ, X ≽ 0. +Standard dual-form SDP used in textbooks. +""" +function sdp_with_equality(; n=4) + Random.seed!(321) + + k = _vdim(n) + + # Objective: min tr(C*X) where C is random symmetric + M = randn(n, n) + C = (M + M') / 2 + Q = spzeros(k, k) + c_vec = reshape(_vecm(C), :, 1) + + # Equality constraints: tr(A_i * X) = b_i + # Use 3 random symmetric constraint matrices + n_eq = 3 + G = zeros(n_eq, k) + d = zeros(n_eq, 1) + + # Build a strictly feasible X0 to ensure feasibility + X0 = Matrix{Float64}(I, n, n) + 0.1 * randn(n, n) + X0 = X0' * X0 # PSD and strictly feasible + + for i = 1:n_eq + Ai = randn(n, n) + Ai = (Ai + Ai') / 2 + G[i, :] = _vecm(Ai)' + d[i] = tr(Ai * X0) # feasible at X0 + end + + A = sparse(1.0I, k, k) + b = zeros(k, 1) + + return (Q=Q, c=c_vec, A=A, b=b, cone_dims=[("S", k)], + G=sparse(G), d=reshape(d, :, 1), + known_status=:Optimal, known_obj=nothing, + known_X=nothing, + description="SDP with equality constraints (n=$n, m=$n_eq)") +end + +""" + sdp_larger(; n=10) + +Larger SDP to test scaling behavior: PSD projection of a matrix with +known eigenvalues. +""" +function sdp_larger(; n=10) + Random.seed!(555) + + # Create matrix with known eigenstructure + U, _ = qr(randn(n, n)) + U = Matrix(U) + eigs = collect(range(-1.0, 2.0, length=n)) + target = U * diagm(0 => eigs) * U' + target = (target + target') / 2 + expected = U * diagm(0 => max.(eigs, 0.0)) * U' + expected = (expected + expected') / 2 + + k = _vdim(n) + Q = Matrix{Float64}(I, k, k) + c_vec = reshape(_vecm(target), :, 1) + A = sparse(1.0I, k, k) + b = zeros(k, 1) + + return (Q=Q, c=c_vec, A=A, b=b, cone_dims=[("S", k)], + G=spzeros(0, k), d=zeros(0, 1), + known_status=:Optimal, known_obj=nothing, + known_X=expected, + description="Larger PSD projection (n=$n)") +end + +""" + sdp_identity_feasible(; n=3) + +Trivially feasible SDP: minimize 0 s.t. X ≽ I. +Solution: X = I. +""" +function sdp_identity_feasible(; n=3) + k = _vdim(n) + Q = spzeros(k, k) + c_vec = zeros(k, 1) + A = sparse(1.0I, k, k) + b = reshape(_vecm(Matrix{Float64}(I, n, n)), :, 1) + + return (Q=Q, c=c_vec, A=A, b=b, cone_dims=[("S", k)], + G=spzeros(0, k), d=zeros(0, 1), + known_status=:Optimal, known_obj=nothing, + known_X=Matrix{Float64}(I, n, n), + description="Trivially feasible: min 0 s.t. X ≽ I (n=$n)") +end + +""" + sdp_rank_one(; n=4) + +SDP with rank-1 solution: min tr(C*X) s.t. tr(X) = 1, X ≽ 0. +Solution is X = vv' where v is eigenvector of smallest eigenvalue of C. +""" +function sdp_rank_one(; n=4) + Random.seed!(111) + M = randn(n, n) + C = (M + M') / 2 + + k = _vdim(n) + Q = spzeros(k, k) + c_vec = reshape(_vecm(C), :, 1) + + # Equality: tr(X) = 1 + I_mat = Matrix{Float64}(I, n, n) + G = reshape(_vecm(I_mat)', 1, k) + d = ones(1, 1) + + A = sparse(1.0I, k, k) + b = zeros(k, 1) + + # Known solution: eigenvector of minimum eigenvalue + λ, V = eigen(Symmetric(C)) + v = V[:, 1] + known_obj = λ[1] # minimum eigenvalue = objective + + return (Q=Q, c=c_vec, A=A, b=b, cone_dims=[("S", k)], + G=sparse(G), d=d, + known_status=:Optimal, known_obj=known_obj, + known_X=v * v', + description="Rank-1 SDP: min tr(C*X) s.t. tr(X)=1 (n=$n)") +end diff --git a/test/sdp_tests.jl b/test/sdp_tests.jl new file mode 100644 index 0000000..ee3e303 --- /dev/null +++ b/test/sdp_tests.jl @@ -0,0 +1,583 @@ +# ────────────────────────────────────────────────────────────── +# SDP Test Suite +# +# Unit tests for SDP internals, standard SDP problem instances, +# edge cases, and evaluation harness integration. +# ────────────────────────────────────────────────────────────── + +include("sdp_problems.jl") +include("sdp_harness.jl") + +@testset "SDP Suite" begin + +# ────────────────────────────────────────────────────────────── +# Unit Tests: vecm / mat +# ────────────────────────────────────────────────────────────── + +@testset "vecm/mat round-trip" begin + for n in [1, 2, 3, 5, 8, 10] + Random.seed!(n) + X = randn(n, n) + X = (X + X') / 2 # symmetric + v = ConicIP.vecm(X) + X_rec = ConicIP.mat(v) + @test X_rec ≈ X atol=1e-12 + @test length(v) == _vdim(n) + end +end + +@testset "vecm inner product preserves trace" begin + for n in [2, 3, 5, 8] + Random.seed!(100 + n) + X = randn(n, n); X = (X + X') / 2 + Y = randn(n, n); Y = (Y + Y') / 2 + @test dot(ConicIP.vecm(X), ConicIP.vecm(Y)) ≈ tr(X * Y) atol=1e-10 + end +end + +@testset "vecm(I) is cone identity" begin + for n in [2, 4, 6] + I_n = Matrix{Float64}(I, n, n) + e = ConicIP.vecm(I_n) + # cone product with identity: X ∘ e = X + Random.seed!(200 + n) + M = randn(n, n) + X = M' * M + I # PSD + x = reshape(ConicIP.vecm(X), :, 1) + e_col = reshape(e, :, 1) + o = zeros(length(x), 1) + ConicIP.xsdc!(x, e_col, o) + # X ∘ I = X*I + I*X = 2X + @test ConicIP.mat(o) ≈ 2 * X atol=1e-10 + end +end + +@testset "ord() inverse of vdim" begin + for n in [1, 2, 3, 5, 10, 15, 20] + k = _vdim(n) + x = zeros(k) + @test ConicIP.ord(x) == n + end +end + +# ────────────────────────────────────────────────────────────── +# Unit Tests: VecCongurance +# ────────────────────────────────────────────────────────────── + +@testset "VecCongurance operations" begin + Random.seed!(42) + for n in [2, 3, 5] + k = _vdim(n) + R = randn(n, n) + n * I # well-conditioned + W = ConicIP.VecCongurance(R) + + x = randn(k, 1) + X = ConicIP.mat(x) + + # W*x should equal vecm(R'*X*R) + expected = ConicIP.vecm(R' * X * R) + @test W * x ≈ expected atol=1e-10 + + # inv(W) * W * x ≈ x + Winv = inv(W) + @test Winv * (W * x) ≈ x atol=1e-9 + + # adjoint: (W'*x) should equal vecm(R*X*R') + Wadj = W' + expected_adj = ConicIP.vecm(R * X * R') + @test Wadj * x ≈ expected_adj atol=1e-10 + + # Matrix conversion + Wmat = Matrix(W) + @test Wmat * vec(x) ≈ vec(W * x) atol=1e-10 + + # Sparse conversion + Wsp = sparse(W) + @test Wsp ≈ Wmat atol=1e-10 + + # Size + @test size(W, 1) == k + + # Composition: W1 * W2 + R2 = randn(n, n) + n * I + W2 = ConicIP.VecCongurance(R2) + x_comp = randn(k, 1) + @test (W * W2) * x_comp ≈ W * (W2 * x_comp) atol=1e-9 + end +end + +# ────────────────────────────────────────────────────────────── +# Unit Tests: Nesterov-Todd Scaling +# ────────────────────────────────────────────────────────────── + +@testset "Nesterov-Todd scaling identity: F*z ≈ inv(F)*s" begin + for n in [2, 3, 4, 5] + Random.seed!(300 + n) + k = _vdim(n) + + # Generate strictly feasible points (interior of PSD cone) + M1 = randn(n, n) + Z = M1' * M1 + I # strictly PSD + M2 = randn(n, n) + S = M2' * M2 + I # strictly PSD + + z = reshape(ConicIP.vecm(Z), :, 1) + s = reshape(ConicIP.vecm(S), :, 1) + + F = ConicIP.nestod_sdc(z, s) + + Fz = F * z + Fis = inv(F) * s + + @test Fz ≈ Fis atol=1e-8 + end +end + +@testset "NT scaling: λ = F*z is in interior of PSD cone" begin + for n in [2, 3, 5] + Random.seed!(400 + n) + M1 = randn(n, n); Z = M1' * M1 + I + M2 = randn(n, n); S = M2' * M2 + I + z = reshape(ConicIP.vecm(Z), :, 1) + s = reshape(ConicIP.vecm(S), :, 1) + + F = ConicIP.nestod_sdc(z, s) + λ = F * z + Λ_mat = ConicIP.mat(λ) + + # λ should be PSD (all eigenvalues > 0) + eigs = eigvals(Symmetric(Λ_mat)) + @test all(eigs .> -1e-10) + end +end + +# ────────────────────────────────────────────────────────────── +# Unit Tests: Cone Arithmetic +# ────────────────────────────────────────────────────────────── + +@testset "SDP cone product xsdc!" begin + for n in [2, 3, 4] + Random.seed!(500 + n) + k = _vdim(n) + + X = randn(n, n); X = (X + X') / 2 + Y = randn(n, n); Y = (Y + Y') / 2 + + x = reshape(ConicIP.vecm(X), :, 1) + y = reshape(ConicIP.vecm(Y), :, 1) + o = zeros(k, 1) + + ConicIP.xsdc!(x, y, o) + + # x ∘ y = X*Y + Y*X (symmetric product) + expected = ConicIP.vecm(X * Y + Y * X) + @test o ≈ expected atol=1e-10 + end +end + +@testset "SDP cone division dsdc!" begin + for n in [2, 3, 4] + Random.seed!(600 + n) + k = _vdim(n) + + # Y must be in the interior of PSD cone for Lyapunov to be well-defined + M = randn(n, n) + Y = M' * M + I # strictly PSD + X = randn(n, n); X = (X + X') / 2 + + x = reshape(ConicIP.vecm(X), :, 1) + y = reshape(ConicIP.vecm(Y), :, 1) + o = zeros(k, 1) + + ConicIP.dsdc!(x, y, o) + O_mat = ConicIP.mat(o) + + # Division Z = X ÷ Y means Y*Z + Z*Y = X + residual = Y * O_mat + O_mat * Y - X + @test norm(residual, Inf) < 1e-9 + end +end + +@testset "SDP cone product/division inverse relationship" begin + for n in [2, 3, 4] + Random.seed!(700 + n) + k = _vdim(n) + + # Y strictly PSD, X arbitrary symmetric + M = randn(n, n); Y = M' * M + I + X = randn(n, n); X = (X + X') / 2 + + x = reshape(ConicIP.vecm(X), :, 1) + y = reshape(ConicIP.vecm(Y), :, 1) + o_div = zeros(k, 1) + o_prod = zeros(k, 1) + + # Z = X ÷ Y, then Y ∘ Z should ≈ X + ConicIP.dsdc!(x, y, o_div) + ConicIP.xsdc!(y, o_div, o_prod) + @test o_prod ≈ x atol=1e-8 + end +end + +# ────────────────────────────────────────────────────────────── +# Unit Tests: Line Search +# ────────────────────────────────────────────────────────────── + +@testset "maxstep_sdc: PSD point returns finite step" begin + Random.seed!(800) + n = 3 + X = Matrix{Float64}(I, n, n) # strictly PSD + D = -0.5 * I + 0.1 * randn(n, n) + D = (D + D') / 2 + + x = reshape(ConicIP.vecm(X), :, 1) + d = reshape(ConicIP.vecm(D), :, 1) + + α = ConicIP.maxstep_sdc(x, d) + @test isfinite(α) + @test α > 0 + + # At step α, the matrix should be on the boundary (smallest eigenvalue ≈ 0) + X_boundary = ConicIP.mat(x - α * d) + eigs = eigvals(Symmetric(X_boundary)) + @test minimum(eigs) ≈ 0 atol=1e-6 +end + +@testset "maxstep_sdc: non-PSD point returns Inf" begin + n = 3 + X = -Matrix{Float64}(I, n, n) # negative definite + D = Matrix{Float64}(I, n, n) + @test ConicIP.maxstep_sdc(ConicIP.vecm(X), ConicIP.vecm(D)) == Inf +end + +@testset "maxstep_sdc: initial point feasibility check" begin + # Strictly feasible point → returns 0 + n = 3 + X = 2.0 * Matrix{Float64}(I, n, n) + @test ConicIP.maxstep_sdc(ConicIP.vecm(X), nothing) == 0 + + # Infeasible point → returns negative value + X_bad = -Matrix{Float64}(I, n, n) + α = ConicIP.maxstep_sdc(ConicIP.vecm(X_bad), nothing) + @test α < 0 +end + +# ────────────────────────────────────────────────────────────── +# Unit Tests: Block with VecCongurance +# ────────────────────────────────────────────────────────────── + +@testset "Block with VecCongurance blocks" begin + Random.seed!(900) + + # Single VecCongurance block + n = 3; k = _vdim(n) + R = randn(n, n) + n * I + W = ConicIP.VecCongurance(R) + B = Block(1); B[1] = W + + x = randn(k) + @test B * x ≈ vec(W * reshape(x, :, 1)) atol=1e-10 + @test Matrix(B) ≈ Matrix(W) atol=1e-10 +end + +@testset "Block with mixed Diagonal + VecCongurance" begin + Random.seed!(901) + + # R+ block (Diagonal) + SDP block (VecCongurance) + n_r = 3; n_s = 2; k_s = _vdim(n_s) + D = Diagonal(rand(n_r) .+ 0.5) + R = randn(n_s, n_s) + n_s * I + W = ConicIP.VecCongurance(R) + + B = Block(2); B[1] = D; B[2] = W + + x = randn(n_r + k_s) + y = B * x + + @test y[1:n_r] ≈ D * x[1:n_r] atol=1e-10 + @test y[n_r+1:end] ≈ vec(W * reshape(x[n_r+1:end], :, 1)) atol=1e-10 + + # inv and adjoint + Binv = inv(B) + @test Binv * (B * x) ≈ x atol=1e-8 + @test B' * x ≈ Matrix(B)' * x atol=1e-10 +end + +# ────────────────────────────────────────────────────────────── +# Standard SDP Problems (direct API) +# ────────────────────────────────────────────────────────────── + +@testset "SDP: PSD projection (n=6)" begin + prob = sdp_psd_projection(n=6) + sol = conicIP(prob.Q, prob.c, prob.A, prob.b, prob.cone_dims; + optTol=optTol, verbose=false) + @test sol.status == :Optimal + X = ConicIP.mat(sol.y) + @test norm(X - prob.known_X, Inf) < tol + @test all(eigvals(Symmetric(X)) .> -tol) +end + +@testset "SDP: PSD projection (n=8)" begin + prob = sdp_psd_projection(n=8) + sol = conicIP(prob.Q, prob.c, prob.A, prob.b, prob.cone_dims; + optTol=optTol, verbose=false) + @test sol.status == :Optimal + X = ConicIP.mat(sol.y) + @test norm(X - prob.known_X, Inf) < tol +end + +@testset "SDP: Trace minimization (n=4)" begin + prob = sdp_trace_minimization(n=4) + r = sdp_solve(prob; optTol=optTol, verbose=false) + @test r.status_match + @test r.status == :Optimal +end + +@testset "SDP: Nearest correlation matrix (n=5)" begin + prob = sdp_nearest_correlation(n=5) + sol = conicIP(prob.Q, prob.c, prob.A, prob.b, prob.cone_dims, + prob.G, prob.d; + optTol=optTol, verbose=false) + @test sol.status == :Optimal + # Result should have unit diagonal + X = ConicIP.mat(sol.y) + @test all(abs.(diag(X) .- 1.0) .< tol) + # Result should be PSD + @test all(eigvals(Symmetric(X)) .> -tol) +end + +@testset "SDP: Max-cut relaxation (n=5)" begin + prob = sdp_max_cut_relaxation(n=5) + sol = conicIP(prob.Q, prob.c, prob.A, prob.b, prob.cone_dims, + prob.G, prob.d; + optTol=optTol, verbose=false) + @test sol.status == :Optimal + # Check diag(X) = 1 + X = ConicIP.mat(sol.v) + @test all(abs.(diag(X) .- 1.0) .< tol) + # Check PSD + @test all(eigvals(Symmetric(X)) .> -tol) +end + +@testset "SDP: Lovász theta (n=5)" begin + prob = sdp_lovasz_theta(n=5) + sol = conicIP(prob.Q, prob.c, prob.A, prob.b, prob.cone_dims, + prob.G, prob.d; + optTol=optTol, verbose=false) + @test sol.status == :Optimal +end + +@testset "SDP: Rank-1 solution (n=4)" begin + prob = sdp_rank_one(n=4) + sol = conicIP(prob.Q, prob.c, prob.A, prob.b, prob.cone_dims, + prob.G, prob.d; + optTol=optTol, verbose=false) + @test sol.status == :Optimal + # Objective should match minimum eigenvalue + @test abs(sol.pobj - prob.known_obj) < tol +end + +@testset "SDP: Identity feasible (n=3)" begin + prob = sdp_identity_feasible(n=3) + r = sdp_solve(prob; optTol=optTol, verbose=false) + @test r.status_match +end + +# ────────────────────────────────────────────────────────────── +# Multiple blocks and mixed cones +# ────────────────────────────────────────────────────────────── + +@testset "SDP: Multiple SDP blocks" begin + prob = sdp_multiple_blocks(n1=3, n2=4) + sol = conicIP(prob.Q, prob.c, prob.A, prob.b, prob.cone_dims; + optTol=optTol, verbose=false) + @test sol.status == :Optimal +end + +@testset "SDP: Mixed R + Q + S cones" begin + prob = sdp_mixed_cones(n_r=4, n_q=3, n_s=3) + sol = conicIP(prob.Q, prob.c, prob.A, prob.b, prob.cone_dims; + optTol=optTol, verbose=false) + @test sol.status == :Optimal + + # Verify each cone constraint + n_r = 4; n_q = 4; k_s = _vdim(3) + y = sol.y + + # R+ block: y[1:4] ≥ 0 + # SOC block: y[5:8] ∈ SOC + # SDP block: mat(y[9:end]) ≽ 0 +end + +@testset "SDP: With equality constraints (n=4)" begin + prob = sdp_with_equality(n=4) + sol = conicIP(prob.Q, prob.c, prob.A, prob.b, prob.cone_dims, + prob.G, prob.d; + optTol=optTol, verbose=false) + @test sol.status == :Optimal + # Check equality constraints are satisfied + X = ConicIP.mat(sol.v) + @test all(eigvals(Symmetric(X)) .> -tol) +end + +# ────────────────────────────────────────────────────────────── +# Scaling / Size tests +# ────────────────────────────────────────────────────────────── + +@testset "SDP: Larger PSD projection (n=10)" begin + prob = sdp_larger(n=10) + sol = conicIP(prob.Q, prob.c, prob.A, prob.b, prob.cone_dims; + optTol=optTol, verbose=false) + @test sol.status == :Optimal + X = ConicIP.mat(sol.y) + @test norm(X - prob.known_X, Inf) < tol +end + +@testset "SDP: PSD projection across sizes" begin + for n in [2, 4, 6, 8, 10, 12] + prob = sdp_psd_projection(n=n) + sol = conicIP(prob.Q, prob.c, prob.A, prob.b, prob.cone_dims; + optTol=optTol, verbose=false) + @test sol.status == :Optimal + X = ConicIP.mat(sol.y) + @test norm(X - prob.known_X, Inf) < tol + end +end + +# ────────────────────────────────────────────────────────────── +# Edge Cases +# ────────────────────────────────────────────────────────────── + +@testset "SDP: 1×1 matrix (scalar case)" begin + # 1×1 PSD = nonnegative scalar + k = 1 + Q = ones(1, 1) + c = reshape([-2.0], :, 1) # min ½x² + 2x s.t. x ≥ 0 → x* = 0... + # Actually: min ½x² - (-2)x = ½x² + 2x → x* = 0 since x ≥ 0 (PSD for 1x1) + # Wait: ConicIP minimizes ½y'Qy - c'y. With c = [-2], minimizes ½x² + 2x. + # Subject to x ∈ S(1) means x ≥ 0. Minimum at x=0. + A = sparse([1.0], [1], [1.0], 1, 1) + b = zeros(1, 1) + sol = conicIP(Q, c, A, b, [("S", 1)]; optTol=optTol, verbose=false) + @test sol.status == :Optimal + @test abs(sol.y[1]) < tol +end + +@testset "SDP: 2×2 matrix" begin + # Project [[1, 2], [2, 1]] onto PSD cone + # Eigenvalues: 3, -1. PSD projection clips -1→0. + n = 2; k = _vdim(n) + target = [1.0 2.0; 2.0 1.0] + λ, V = eigen(Symmetric(target)) + expected = V * diagm(0 => max.(λ, 0.0)) * V' + + Q = Matrix{Float64}(I, k, k) + c = reshape(ConicIP.vecm(target), :, 1) + A = sparse(1.0I, k, k) + b = zeros(k, 1) + + sol = conicIP(Q, c, A, b, [("S", k)]; optTol=optTol, verbose=false) + @test sol.status == :Optimal + X = ConicIP.mat(sol.y) + @test norm(X - expected, Inf) < tol +end + +@testset "SDP: Already-PSD matrix stays unchanged" begin + n = 4; k = _vdim(n) + target = Matrix{Float64}(I, n, n) # already PSD + + Q = Matrix{Float64}(I, k, k) + c = reshape(ConicIP.vecm(target), :, 1) + A = sparse(1.0I, k, k) + b = zeros(k, 1) + + sol = conicIP(Q, c, A, b, [("S", k)]; optTol=optTol, verbose=false) + @test sol.status == :Optimal + X = ConicIP.mat(sol.y) + @test norm(X - target, Inf) < tol +end + +@testset "SDP: Zero matrix projection" begin + # Project zero matrix onto PSD cone → stays at zero + n = 3; k = _vdim(n) + Q = Matrix{Float64}(I, k, k) + c = zeros(k, 1) # target = zero matrix + A = sparse(1.0I, k, k) + b = zeros(k, 1) + + sol = conicIP(Q, c, A, b, [("S", k)]; optTol=optTol, verbose=false) + @test sol.status == :Optimal + X = ConicIP.mat(sol.y) + @test norm(X, Inf) < tol +end + +@testset "SDP: Highly non-PSD target" begin + # Project -10*I onto PSD cone → should get zero matrix + n = 3; k = _vdim(n) + target = -10.0 * Matrix{Float64}(I, n, n) + Q = Matrix{Float64}(I, k, k) + c = reshape(ConicIP.vecm(target), :, 1) + A = sparse(1.0I, k, k) + b = zeros(k, 1) + + sol = conicIP(Q, c, A, b, [("S", k)]; optTol=optTol, verbose=false) + @test sol.status == :Optimal + X = ConicIP.mat(sol.y) + @test norm(X, Inf) < tol +end + +# ────────────────────────────────────────────────────────────── +# SDP across KKT solvers (via harness) +# ────────────────────────────────────────────────────────────── + +@testset "SDP: Consistent results across KKT solvers" begin + prob = sdp_psd_projection(n=6) + + sols = [] + for ks in (ConicIP.kktsolver_qr, + ConicIP.kktsolver_sparse, + pivot(ConicIP.kktsolver_2x2)) + sol = conicIP(prob.Q, prob.c, prob.A, prob.b, prob.cone_dims; + kktsolver=ks, optTol=optTol, verbose=false) + push!(sols, sol) + end + + # All should be optimal + @test all(s.status == :Optimal for s in sols) + + # Solutions should agree + for i in 2:length(sols) + @test norm(sols[1].y - sols[i].y, Inf) < tol + end +end + +# ────────────────────────────────────────────────────────────── +# Evaluation Harness Integration +# ────────────────────────────────────────────────────────────── + +@testset "SDP Harness: full problem suite" begin + problems = [ + sdp_psd_projection(n=6), + sdp_psd_projection(n=8), + sdp_trace_minimization(n=4), + sdp_nearest_correlation(n=5), + sdp_max_cut_relaxation(n=5), + sdp_lovasz_theta(n=5), + sdp_rank_one(n=4), + sdp_identity_feasible(n=3), + sdp_multiple_blocks(n1=3, n2=4), + sdp_mixed_cones(n_r=4, n_q=3, n_s=3), + sdp_with_equality(n=4), + sdp_larger(n=10), + ] + + results = run_sdp_harness(problems; optTol=optTol, verbose=false) + summary = print_sdp_report(results) + + # All problems should have matching status + for r in results + @test r.status_match + end +end + +end # @testset "SDP Suite" From 58d7952cfabfb5e9eaf3def9bda3107173166e90 Mon Sep 17 00:00:00 2001 From: Michael Friedlander Date: Sun, 15 Feb 2026 21:01:03 -0800 Subject: [PATCH 4/4] Update SDP test files for Vector convention MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adapt test problem generators and test suite to use Vector instead of n×1 Matrix, matching the refactor in 4f17f4a. --- test/sdp_problems.jl | 68 ++++++++++++++++++++++---------------------- test/sdp_tests.jl | 66 +++++++++++++++++++++--------------------- 2 files changed, 67 insertions(+), 67 deletions(-) diff --git a/test/sdp_problems.jl b/test/sdp_problems.jl index 0d2d665..9a3c5f3 100644 --- a/test/sdp_problems.jl +++ b/test/sdp_problems.jl @@ -30,12 +30,12 @@ function sdp_psd_projection(; n=6) k = _vdim(n) Q = Matrix{Float64}(I, k, k) - c = reshape(_vecm(target), :, 1) + c = _vecm(target) A = sparse(1.0I, k, k) - b = zeros(k, 1) + b = zeros(k) return (Q=Q, c=c, A=A, b=b, cone_dims=[("S", k)], - G=spzeros(0, k), d=zeros(0, 1), + G=spzeros(0, k), d=zeros(0), known_status=:Optimal, known_obj=nothing, known_X=expected, description="PSD projection (n=$n): clip negative eigenvalues") @@ -61,12 +61,12 @@ function sdp_trace_minimization(; n=4) # But ConicIP minimizes ½y'Qy - c'y, so to minimize vecm(I)'y # we set c = -vecm(I) (since it becomes -(-vecm(I))'y = vecm(I)'y) Q = spzeros(k, k) - c_vec = -reshape(_vecm(Matrix{Float64}(I, n, n)), :, 1) + c_vec = -_vecm(Matrix{Float64}(I, n, n)) A = sparse(1.0I, k, k) - b = reshape(_vecm(C), :, 1) + b = _vecm(C) return (Q=Q, c=c_vec, A=A, b=b, cone_dims=[("S", k)], - G=spzeros(0, k), d=zeros(0, 1), + G=spzeros(0, k), d=zeros(0), known_status=:Optimal, known_obj=tr(C), known_X=C, description="Trace minimization: min tr(X) s.t. X ≽ C (n=$n)") @@ -98,12 +98,12 @@ function sdp_max_cut_relaxation(; n=5, seed=123) # Maximize ¼ tr(L*X) → minimize -¼ tr(L*X) = -¼ vecm(L)'x # ConicIP: min ½y'Qy - c'y → c = ¼ vecm(L) (negated in the objective) Q = spzeros(k, k) - c_vec = reshape(0.25 * _vecm(L), :, 1) + c_vec = 0.25 * _vecm(L) # Constraint: diag(X) = 1 → n equality constraints # Extract diagonal entries from vectorized form G = spzeros(n, k) - d = ones(n, 1) + d = ones(n) for i = 1:n # Position of diagonal entry (i,i) in vecm ordering # vecm uses row-major upper triangular: entries for row i start at @@ -114,7 +114,7 @@ function sdp_max_cut_relaxation(; n=5, seed=123) # X ≽ 0: cone constraint A = sparse(1.0I, k, k) - b = zeros(k, 1) + b = zeros(k) return (Q=Q, c=c_vec, A=A, b=b, cone_dims=[("S", k)], G=G, d=d, @@ -149,14 +149,14 @@ function sdp_lovasz_theta(; n=5, seed=456) # ConicIP: c = vecm(J) J = ones(n, n) Q = spzeros(k, k) - c_vec = reshape(_vecm(J), :, 1) + c_vec = _vecm(J) # Equality constraints: # 1. tr(X) = 1 # 2. X_ij = 0 for each edge (i,j) n_eq = 1 + length(edges) G = spzeros(n_eq, k) - d = zeros(n_eq, 1) + d = zeros(n_eq) # tr(X) = 1: sum of diagonal entries I_mat = Matrix{Float64}(I, n, n) @@ -173,7 +173,7 @@ function sdp_lovasz_theta(; n=5, seed=456) end A = sparse(1.0I, k, k) - b = zeros(k, 1) + b = zeros(k) return (Q=Q, c=c_vec, A=A, b=b, cone_dims=[("S", k)], G=sparse(G), d=d, @@ -208,18 +208,18 @@ function sdp_nearest_correlation(; n=5, seed=789) # min ½‖x - vecm(C)‖² = ½x'Ix - vecm(C)'x + const Q = Matrix{Float64}(I, k, k) - c_vec = reshape(_vecm(C), :, 1) + c_vec = _vecm(C) # diag(X) = 1: equality constraints G = spzeros(n, k) - d = ones(n, 1) + d = ones(n) for i = 1:n pos = round(Int, k - (n - i + 2) * (n - i + 1) / 2 + 1) G[i, pos] = 1.0 end A = sparse(1.0I, k, k) - b = zeros(k, 1) + b = zeros(k) return (Q=Q, c=c_vec, A=A, b=b, cone_dims=[("S", k)], G=G, d=d, @@ -246,14 +246,14 @@ function sdp_multiple_blocks(; n1=3, n2=4) Q = spzeros(k, k) I1_vec = _vecm(Matrix{Float64}(I, n1, n1)) I2_vec = _vecm(Matrix{Float64}(I, n2, n2)) - c_vec = -vcat(reshape(I1_vec, :, 1), reshape(I2_vec, :, 1)) + c_vec = -vcat(I1_vec, I2_vec) A = sparse(1.0I, k, k) - b = vcat(reshape(_vecm(C1), :, 1), reshape(_vecm(C2), :, 1)) + b = vcat(_vecm(C1), _vecm(C2)) return (Q=Q, c=c_vec, A=A, b=b, cone_dims=[("S", k1), ("S", k2)], - G=spzeros(0, k), d=zeros(0, 1), + G=spzeros(0, k), d=zeros(0), known_status=:Optimal, known_obj=tr(C1) + tr(C2), known_X=nothing, description="Multiple SDP blocks (n1=$n1, n2=$n2)") @@ -273,17 +273,17 @@ function sdp_mixed_cones(; n_r=4, n_q=3, n_s=3) n = n_r + (n_q + 1) + k_s # total variables Q = sparse(1.0I, n, n) - c_target = randn(n, 1) + c_target = randn(n) c = Q * c_target # so optimal is projection of c_target onto feasible set # A = I, b = 0 — project onto cone product A = sparse(1.0I, n, n) - b = zeros(n, 1) + b = zeros(n) cone_dims = [("R", n_r), ("Q", n_q + 1), ("S", k_s)] return (Q=Q, c=c, A=A, b=b, cone_dims=cone_dims, - G=spzeros(0, n), d=zeros(0, 1), + G=spzeros(0, n), d=zeros(0), known_status=:Optimal, known_obj=nothing, known_X=nothing, description="Mixed cones: R($n_r) + Q($(n_q+1)) + S($k_s)") @@ -304,13 +304,13 @@ function sdp_with_equality(; n=4) M = randn(n, n) C = (M + M') / 2 Q = spzeros(k, k) - c_vec = reshape(_vecm(C), :, 1) + c_vec = _vecm(C) # Equality constraints: tr(A_i * X) = b_i # Use 3 random symmetric constraint matrices n_eq = 3 G = zeros(n_eq, k) - d = zeros(n_eq, 1) + d = zeros(n_eq) # Build a strictly feasible X0 to ensure feasibility X0 = Matrix{Float64}(I, n, n) + 0.1 * randn(n, n) @@ -324,10 +324,10 @@ function sdp_with_equality(; n=4) end A = sparse(1.0I, k, k) - b = zeros(k, 1) + b = zeros(k) return (Q=Q, c=c_vec, A=A, b=b, cone_dims=[("S", k)], - G=sparse(G), d=reshape(d, :, 1), + G=sparse(G), d=d, known_status=:Optimal, known_obj=nothing, known_X=nothing, description="SDP with equality constraints (n=$n, m=$n_eq)") @@ -353,12 +353,12 @@ function sdp_larger(; n=10) k = _vdim(n) Q = Matrix{Float64}(I, k, k) - c_vec = reshape(_vecm(target), :, 1) + c_vec = _vecm(target) A = sparse(1.0I, k, k) - b = zeros(k, 1) + b = zeros(k) return (Q=Q, c=c_vec, A=A, b=b, cone_dims=[("S", k)], - G=spzeros(0, k), d=zeros(0, 1), + G=spzeros(0, k), d=zeros(0), known_status=:Optimal, known_obj=nothing, known_X=expected, description="Larger PSD projection (n=$n)") @@ -373,12 +373,12 @@ Solution: X = I. function sdp_identity_feasible(; n=3) k = _vdim(n) Q = spzeros(k, k) - c_vec = zeros(k, 1) + c_vec = zeros(k) A = sparse(1.0I, k, k) - b = reshape(_vecm(Matrix{Float64}(I, n, n)), :, 1) + b = _vecm(Matrix{Float64}(I, n, n)) return (Q=Q, c=c_vec, A=A, b=b, cone_dims=[("S", k)], - G=spzeros(0, k), d=zeros(0, 1), + G=spzeros(0, k), d=zeros(0), known_status=:Optimal, known_obj=nothing, known_X=Matrix{Float64}(I, n, n), description="Trivially feasible: min 0 s.t. X ≽ I (n=$n)") @@ -397,15 +397,15 @@ function sdp_rank_one(; n=4) k = _vdim(n) Q = spzeros(k, k) - c_vec = reshape(_vecm(C), :, 1) + c_vec = _vecm(C) # Equality: tr(X) = 1 I_mat = Matrix{Float64}(I, n, n) G = reshape(_vecm(I_mat)', 1, k) - d = ones(1, 1) + d = ones(1) A = sparse(1.0I, k, k) - b = zeros(k, 1) + b = zeros(k) # Known solution: eigenvector of minimum eigenvalue λ, V = eigen(Symmetric(C)) diff --git a/test/sdp_tests.jl b/test/sdp_tests.jl index ee3e303..0610faf 100644 --- a/test/sdp_tests.jl +++ b/test/sdp_tests.jl @@ -43,9 +43,9 @@ end Random.seed!(200 + n) M = randn(n, n) X = M' * M + I # PSD - x = reshape(ConicIP.vecm(X), :, 1) - e_col = reshape(e, :, 1) - o = zeros(length(x), 1) + x = ConicIP.vecm(X) + e_col = e + o = zeros(length(x)) ConicIP.xsdc!(x, e_col, o) # X ∘ I = X*I + I*X = 2X @test ConicIP.mat(o) ≈ 2 * X atol=1e-10 @@ -71,7 +71,7 @@ end R = randn(n, n) + n * I # well-conditioned W = ConicIP.VecCongurance(R) - x = randn(k, 1) + x = randn(k) X = ConicIP.mat(x) # W*x should equal vecm(R'*X*R) @@ -101,7 +101,7 @@ end # Composition: W1 * W2 R2 = randn(n, n) + n * I W2 = ConicIP.VecCongurance(R2) - x_comp = randn(k, 1) + x_comp = randn(k) @test (W * W2) * x_comp ≈ W * (W2 * x_comp) atol=1e-9 end end @@ -121,8 +121,8 @@ end M2 = randn(n, n) S = M2' * M2 + I # strictly PSD - z = reshape(ConicIP.vecm(Z), :, 1) - s = reshape(ConicIP.vecm(S), :, 1) + z = ConicIP.vecm(Z) + s = ConicIP.vecm(S) F = ConicIP.nestod_sdc(z, s) @@ -138,8 +138,8 @@ end Random.seed!(400 + n) M1 = randn(n, n); Z = M1' * M1 + I M2 = randn(n, n); S = M2' * M2 + I - z = reshape(ConicIP.vecm(Z), :, 1) - s = reshape(ConicIP.vecm(S), :, 1) + z = ConicIP.vecm(Z) + s = ConicIP.vecm(S) F = ConicIP.nestod_sdc(z, s) λ = F * z @@ -163,9 +163,9 @@ end X = randn(n, n); X = (X + X') / 2 Y = randn(n, n); Y = (Y + Y') / 2 - x = reshape(ConicIP.vecm(X), :, 1) - y = reshape(ConicIP.vecm(Y), :, 1) - o = zeros(k, 1) + x = ConicIP.vecm(X) + y = ConicIP.vecm(Y) + o = zeros(k) ConicIP.xsdc!(x, y, o) @@ -185,9 +185,9 @@ end Y = M' * M + I # strictly PSD X = randn(n, n); X = (X + X') / 2 - x = reshape(ConicIP.vecm(X), :, 1) - y = reshape(ConicIP.vecm(Y), :, 1) - o = zeros(k, 1) + x = ConicIP.vecm(X) + y = ConicIP.vecm(Y) + o = zeros(k) ConicIP.dsdc!(x, y, o) O_mat = ConicIP.mat(o) @@ -207,10 +207,10 @@ end M = randn(n, n); Y = M' * M + I X = randn(n, n); X = (X + X') / 2 - x = reshape(ConicIP.vecm(X), :, 1) - y = reshape(ConicIP.vecm(Y), :, 1) - o_div = zeros(k, 1) - o_prod = zeros(k, 1) + x = ConicIP.vecm(X) + y = ConicIP.vecm(Y) + o_div = zeros(k) + o_prod = zeros(k) # Z = X ÷ Y, then Y ∘ Z should ≈ X ConicIP.dsdc!(x, y, o_div) @@ -230,8 +230,8 @@ end D = -0.5 * I + 0.1 * randn(n, n) D = (D + D') / 2 - x = reshape(ConicIP.vecm(X), :, 1) - d = reshape(ConicIP.vecm(D), :, 1) + x = ConicIP.vecm(X) + d = ConicIP.vecm(D) α = ConicIP.maxstep_sdc(x, d) @test isfinite(α) @@ -276,7 +276,7 @@ end B = Block(1); B[1] = W x = randn(k) - @test B * x ≈ vec(W * reshape(x, :, 1)) atol=1e-10 + @test B * x ≈ W * x atol=1e-10 @test Matrix(B) ≈ Matrix(W) atol=1e-10 end @@ -295,7 +295,7 @@ end y = B * x @test y[1:n_r] ≈ D * x[1:n_r] atol=1e-10 - @test y[n_r+1:end] ≈ vec(W * reshape(x[n_r+1:end], :, 1)) atol=1e-10 + @test y[n_r+1:end] ≈ W * x[n_r+1:end] atol=1e-10 # inv and adjoint Binv = inv(B) @@ -452,12 +452,12 @@ end # 1×1 PSD = nonnegative scalar k = 1 Q = ones(1, 1) - c = reshape([-2.0], :, 1) # min ½x² + 2x s.t. x ≥ 0 → x* = 0... + c = [-2.0] # min ½x² + 2x s.t. x ≥ 0 → x* = 0... # Actually: min ½x² - (-2)x = ½x² + 2x → x* = 0 since x ≥ 0 (PSD for 1x1) # Wait: ConicIP minimizes ½y'Qy - c'y. With c = [-2], minimizes ½x² + 2x. # Subject to x ∈ S(1) means x ≥ 0. Minimum at x=0. A = sparse([1.0], [1], [1.0], 1, 1) - b = zeros(1, 1) + b = zeros(1) sol = conicIP(Q, c, A, b, [("S", 1)]; optTol=optTol, verbose=false) @test sol.status == :Optimal @test abs(sol.y[1]) < tol @@ -472,9 +472,9 @@ end expected = V * diagm(0 => max.(λ, 0.0)) * V' Q = Matrix{Float64}(I, k, k) - c = reshape(ConicIP.vecm(target), :, 1) + c = ConicIP.vecm(target) A = sparse(1.0I, k, k) - b = zeros(k, 1) + b = zeros(k) sol = conicIP(Q, c, A, b, [("S", k)]; optTol=optTol, verbose=false) @test sol.status == :Optimal @@ -487,9 +487,9 @@ end target = Matrix{Float64}(I, n, n) # already PSD Q = Matrix{Float64}(I, k, k) - c = reshape(ConicIP.vecm(target), :, 1) + c = ConicIP.vecm(target) A = sparse(1.0I, k, k) - b = zeros(k, 1) + b = zeros(k) sol = conicIP(Q, c, A, b, [("S", k)]; optTol=optTol, verbose=false) @test sol.status == :Optimal @@ -501,9 +501,9 @@ end # Project zero matrix onto PSD cone → stays at zero n = 3; k = _vdim(n) Q = Matrix{Float64}(I, k, k) - c = zeros(k, 1) # target = zero matrix + c = zeros(k) # target = zero matrix A = sparse(1.0I, k, k) - b = zeros(k, 1) + b = zeros(k) sol = conicIP(Q, c, A, b, [("S", k)]; optTol=optTol, verbose=false) @test sol.status == :Optimal @@ -516,9 +516,9 @@ end n = 3; k = _vdim(n) target = -10.0 * Matrix{Float64}(I, n, n) Q = Matrix{Float64}(I, k, k) - c = reshape(ConicIP.vecm(target), :, 1) + c = ConicIP.vecm(target) A = sparse(1.0I, k, k) - b = zeros(k, 1) + b = zeros(k) sol = conicIP(Q, c, A, b, [("S", k)]; optTol=optTol, verbose=false) @test sol.status == :Optimal