From 276fb7533448b29e8ce853fcc0c860190f3e0735 Mon Sep 17 00:00:00 2001 From: ENCALADA Date: Tue, 22 Sep 2020 18:26:43 +0200 Subject: [PATCH] Update vendor This mainly includes: - tekton clientset deps - code-generator deps --- .../client/clientset/versioned/clientset.go | 111 + .../pkg/client/clientset/versioned/doc.go | 20 + .../client/clientset/versioned/scheme/doc.go | 20 + .../clientset/versioned/scheme/register.go | 58 + .../typed/pipeline/v1alpha1/clustertask.go | 164 + .../typed/pipeline/v1alpha1/condition.go | 174 + .../versioned/typed/pipeline/v1alpha1/doc.go | 20 + .../pipeline/v1alpha1/generated_expansion.go | 31 + .../typed/pipeline/v1alpha1/pipeline.go | 174 + .../pipeline/v1alpha1/pipeline_client.go | 114 + .../typed/pipeline/v1alpha1/pipelinerun.go | 191 ++ .../versioned/typed/pipeline/v1alpha1/task.go | 174 + .../typed/pipeline/v1alpha1/taskrun.go | 191 ++ .../typed/pipeline/v1beta1/clustertask.go | 164 + .../versioned/typed/pipeline/v1beta1/doc.go | 20 + .../pipeline/v1beta1/generated_expansion.go | 29 + .../typed/pipeline/v1beta1/pipeline.go | 174 + .../typed/pipeline/v1beta1/pipeline_client.go | 109 + .../typed/pipeline/v1beta1/pipelinerun.go | 191 ++ .../versioned/typed/pipeline/v1beta1/task.go | 174 + .../typed/pipeline/v1beta1/taskrun.go | 191 ++ vendor/gonum.org/v1/gonum/AUTHORS | 89 + vendor/gonum.org/v1/gonum/CONTRIBUTORS | 91 + vendor/gonum.org/v1/gonum/LICENSE | 23 + vendor/gonum.org/v1/gonum/blas/README.md | 47 + vendor/gonum.org/v1/gonum/blas/blas.go | 283 ++ .../gonum.org/v1/gonum/blas/blas64/blas64.go | 469 +++ vendor/gonum.org/v1/gonum/blas/blas64/conv.go | 277 ++ .../v1/gonum/blas/blas64/conv_symmetric.go | 153 + vendor/gonum.org/v1/gonum/blas/blas64/doc.go | 6 + .../v1/gonum/blas/cblas128/cblas128.go | 508 +++ .../gonum.org/v1/gonum/blas/cblas128/conv.go | 279 ++ .../v1/gonum/blas/cblas128/conv_hermitian.go | 155 + .../v1/gonum/blas/cblas128/conv_symmetric.go | 155 + .../gonum.org/v1/gonum/blas/cblas128/doc.go | 6 + .../gonum.org/v1/gonum/blas/conversions.bash | 159 + vendor/gonum.org/v1/gonum/blas/doc.go | 108 + vendor/gonum.org/v1/gonum/blas/gonum/dgemm.go | 314 ++ vendor/gonum.org/v1/gonum/blas/gonum/doc.go | 88 + .../gonum.org/v1/gonum/blas/gonum/errors.go | 35 + vendor/gonum.org/v1/gonum/blas/gonum/gemv.go | 190 ++ vendor/gonum.org/v1/gonum/blas/gonum/gonum.go | 58 + .../v1/gonum/blas/gonum/level1cmplx128.go | 445 +++ .../v1/gonum/blas/gonum/level1cmplx64.go | 467 +++ .../v1/gonum/blas/gonum/level1float32.go | 644 ++++ .../gonum/blas/gonum/level1float32_dsdot.go | 53 + .../v1/gonum/blas/gonum/level1float32_sdot.go | 53 + .../gonum/blas/gonum/level1float32_sdsdot.go | 53 + .../v1/gonum/blas/gonum/level1float64.go | 620 ++++ .../v1/gonum/blas/gonum/level1float64_ddot.go | 49 + .../v1/gonum/blas/gonum/level2cmplx128.go | 2906 ++++++++++++++++ .../v1/gonum/blas/gonum/level2cmplx64.go | 2942 +++++++++++++++++ .../v1/gonum/blas/gonum/level2float32.go | 2296 +++++++++++++ .../v1/gonum/blas/gonum/level2float64.go | 2264 +++++++++++++ .../v1/gonum/blas/gonum/level3cmplx128.go | 1715 ++++++++++ .../v1/gonum/blas/gonum/level3cmplx64.go | 1735 ++++++++++ .../v1/gonum/blas/gonum/level3float32.go | 876 +++++ .../v1/gonum/blas/gonum/level3float64.go | 864 +++++ vendor/gonum.org/v1/gonum/blas/gonum/sgemm.go | 318 ++ .../v1/gonum/blas/gonum/single_precision.bash | 218 ++ vendor/gonum.org/v1/gonum/floats/README.md | 4 + vendor/gonum.org/v1/gonum/floats/doc.go | 11 + vendor/gonum.org/v1/gonum/floats/floats.go | 933 ++++++ vendor/gonum.org/v1/gonum/graph/.gitignore | 1 + vendor/gonum.org/v1/gonum/graph/README.md | 3 + vendor/gonum.org/v1/gonum/graph/doc.go | 9 + vendor/gonum.org/v1/gonum/graph/graph.go | 282 ++ .../v1/gonum/graph/internal/linear/doc.go | 6 + .../v1/gonum/graph/internal/linear/linear.go | 73 + .../v1/gonum/graph/internal/ordered/doc.go | 6 + .../v1/gonum/graph/internal/ordered/sort.go | 93 + .../v1/gonum/graph/internal/set/doc.go | 6 + .../v1/gonum/graph/internal/set/same.go | 36 + .../graph/internal/set/same_appengine.go | 36 + .../v1/gonum/graph/internal/set/set.go | 228 ++ .../v1/gonum/graph/internal/uid/uid.go | 54 + .../gonum.org/v1/gonum/graph/iterator/doc.go | 9 + .../v1/gonum/graph/iterator/edges.go | 131 + .../v1/gonum/graph/iterator/lines.go | 131 + .../v1/gonum/graph/iterator/nodes.go | 125 + vendor/gonum.org/v1/gonum/graph/multigraph.go | 198 ++ .../gonum.org/v1/gonum/graph/nodes_edges.go | 300 ++ .../graph/simple/dense_directed_matrix.go | 301 ++ .../graph/simple/dense_undirected_matrix.go | 268 ++ .../v1/gonum/graph/simple/directed.go | 235 ++ vendor/gonum.org/v1/gonum/graph/simple/doc.go | 9 + .../gonum.org/v1/gonum/graph/simple/simple.go | 72 + .../v1/gonum/graph/simple/undirected.go | 216 ++ .../gonum/graph/simple/weighted_directed.go | 279 ++ .../gonum/graph/simple/weighted_undirected.go | 273 ++ .../v1/gonum/graph/topo/bron_kerbosch.go | 250 ++ .../v1/gonum/graph/topo/clique_graph.go | 111 + vendor/gonum.org/v1/gonum/graph/topo/doc.go | 6 + .../v1/gonum/graph/topo/johnson_cycles.go | 285 ++ .../v1/gonum/graph/topo/non_tomita_choice.go | 9 + .../v1/gonum/graph/topo/paton_cycles.go | 83 + .../gonum.org/v1/gonum/graph/topo/tarjan.go | 199 ++ .../v1/gonum/graph/topo/tomita_choice.go | 9 + vendor/gonum.org/v1/gonum/graph/topo/topo.go | 68 + .../gonum.org/v1/gonum/graph/traverse/doc.go | 6 + .../v1/gonum/graph/traverse/traverse.go | 231 ++ vendor/gonum.org/v1/gonum/graph/undirect.go | 270 ++ .../gonum/internal/asm/c128/axpyinc_amd64.s | 134 + .../gonum/internal/asm/c128/axpyincto_amd64.s | 141 + .../internal/asm/c128/axpyunitary_amd64.s | 122 + .../internal/asm/c128/axpyunitaryto_amd64.s | 123 + .../v1/gonum/internal/asm/c128/doc.go | 6 + .../gonum/internal/asm/c128/dotcinc_amd64.s | 153 + .../internal/asm/c128/dotcunitary_amd64.s | 143 + .../gonum/internal/asm/c128/dotuinc_amd64.s | 141 + .../internal/asm/c128/dotuunitary_amd64.s | 130 + .../gonum/internal/asm/c128/dscalinc_amd64.s | 69 + .../internal/asm/c128/dscalunitary_amd64.s | 66 + .../v1/gonum/internal/asm/c128/scal.go | 31 + .../internal/asm/c128/scalUnitary_amd64.s | 116 + .../gonum/internal/asm/c128/scalinc_amd64.s | 121 + .../v1/gonum/internal/asm/c128/stubs_amd64.go | 96 + .../v1/gonum/internal/asm/c128/stubs_noasm.go | 163 + .../v1/gonum/internal/asm/c64/axpyinc_amd64.s | 151 + .../gonum/internal/asm/c64/axpyincto_amd64.s | 156 + .../internal/asm/c64/axpyunitary_amd64.s | 160 + .../internal/asm/c64/axpyunitaryto_amd64.s | 157 + .../v1/gonum/internal/asm/c64/conj.go | 7 + .../v1/gonum/internal/asm/c64/doc.go | 6 + .../v1/gonum/internal/asm/c64/dotcinc_amd64.s | 160 + .../internal/asm/c64/dotcunitary_amd64.s | 208 ++ .../v1/gonum/internal/asm/c64/dotuinc_amd64.s | 148 + .../internal/asm/c64/dotuunitary_amd64.s | 197 ++ .../v1/gonum/internal/asm/c64/scal.go | 79 + .../v1/gonum/internal/asm/c64/stubs_amd64.go | 68 + .../v1/gonum/internal/asm/c64/stubs_noasm.go | 113 + .../v1/gonum/internal/asm/f32/axpyinc_amd64.s | 73 + .../gonum/internal/asm/f32/axpyincto_amd64.s | 78 + .../internal/asm/f32/axpyunitary_amd64.s | 97 + .../internal/asm/f32/axpyunitaryto_amd64.s | 98 + .../v1/gonum/internal/asm/f32/ddotinc_amd64.s | 91 + .../internal/asm/f32/ddotunitary_amd64.s | 110 + .../v1/gonum/internal/asm/f32/doc.go | 6 + .../v1/gonum/internal/asm/f32/dotinc_amd64.s | 85 + .../gonum/internal/asm/f32/dotunitary_amd64.s | 106 + .../v1/gonum/internal/asm/f32/ge_amd64.go | 15 + .../v1/gonum/internal/asm/f32/ge_amd64.s | 757 +++++ .../v1/gonum/internal/asm/f32/ge_noasm.go | 36 + .../v1/gonum/internal/asm/f32/scal.go | 55 + .../v1/gonum/internal/asm/f32/stubs_amd64.go | 68 + .../v1/gonum/internal/asm/f32/stubs_noasm.go | 113 + .../v1/gonum/internal/asm/f64/abssum_amd64.s | 82 + .../gonum/internal/asm/f64/abssuminc_amd64.s | 90 + .../v1/gonum/internal/asm/f64/add_amd64.s | 66 + .../gonum/internal/asm/f64/addconst_amd64.s | 53 + .../v1/gonum/internal/asm/f64/axpy.go | 57 + .../v1/gonum/internal/asm/f64/axpyinc_amd64.s | 142 + .../gonum/internal/asm/f64/axpyincto_amd64.s | 148 + .../internal/asm/f64/axpyunitary_amd64.s | 134 + .../internal/asm/f64/axpyunitaryto_amd64.s | 140 + .../v1/gonum/internal/asm/f64/cumprod_amd64.s | 71 + .../v1/gonum/internal/asm/f64/cumsum_amd64.s | 64 + .../v1/gonum/internal/asm/f64/div_amd64.s | 67 + .../v1/gonum/internal/asm/f64/divto_amd64.s | 73 + .../v1/gonum/internal/asm/f64/doc.go | 6 + .../v1/gonum/internal/asm/f64/dot.go | 35 + .../v1/gonum/internal/asm/f64/dot_amd64.s | 145 + .../v1/gonum/internal/asm/f64/ge_amd64.go | 22 + .../v1/gonum/internal/asm/f64/ge_noasm.go | 118 + .../v1/gonum/internal/asm/f64/gemvN_amd64.s | 685 ++++ .../v1/gonum/internal/asm/f64/gemvT_amd64.s | 745 +++++ .../v1/gonum/internal/asm/f64/ger_amd64.s | 591 ++++ .../v1/gonum/internal/asm/f64/l1norm_amd64.s | 58 + .../gonum/internal/asm/f64/linfnorm_amd64.s | 57 + .../v1/gonum/internal/asm/f64/scal.go | 57 + .../v1/gonum/internal/asm/f64/scalinc_amd64.s | 113 + .../gonum/internal/asm/f64/scalincto_amd64.s | 122 + .../internal/asm/f64/scalunitary_amd64.s | 112 + .../internal/asm/f64/scalunitaryto_amd64.s | 113 + .../v1/gonum/internal/asm/f64/stubs_amd64.go | 172 + .../v1/gonum/internal/asm/f64/stubs_noasm.go | 170 + .../v1/gonum/internal/asm/f64/sum_amd64.s | 100 + .../v1/gonum/internal/cmplx64/abs.go | 14 + .../v1/gonum/internal/cmplx64/conj.go | 12 + .../v1/gonum/internal/cmplx64/doc.go | 7 + .../v1/gonum/internal/cmplx64/isinf.go | 25 + .../v1/gonum/internal/cmplx64/isnan.go | 29 + .../v1/gonum/internal/cmplx64/sqrt.go | 108 + .../gonum.org/v1/gonum/internal/math32/doc.go | 7 + .../v1/gonum/internal/math32/math.go | 111 + .../v1/gonum/internal/math32/signbit.go | 16 + .../v1/gonum/internal/math32/sqrt.go | 25 + .../v1/gonum/internal/math32/sqrt_amd64.go | 20 + .../v1/gonum/internal/math32/sqrt_amd64.s | 20 + vendor/gonum.org/v1/gonum/lapack/.gitignore | 0 vendor/gonum.org/v1/gonum/lapack/README.md | 28 + vendor/gonum.org/v1/gonum/lapack/doc.go | 6 + .../gonum.org/v1/gonum/lapack/gonum/dbdsqr.go | 505 +++ .../gonum.org/v1/gonum/lapack/gonum/dgebak.go | 89 + .../gonum.org/v1/gonum/lapack/gonum/dgebal.go | 239 ++ .../gonum.org/v1/gonum/lapack/gonum/dgebd2.go | 86 + .../gonum.org/v1/gonum/lapack/gonum/dgebrd.go | 161 + .../gonum.org/v1/gonum/lapack/gonum/dgecon.go | 92 + .../gonum.org/v1/gonum/lapack/gonum/dgeev.go | 279 ++ .../gonum.org/v1/gonum/lapack/gonum/dgehd2.go | 97 + .../gonum.org/v1/gonum/lapack/gonum/dgehrd.go | 194 ++ .../gonum.org/v1/gonum/lapack/gonum/dgelq2.go | 65 + .../gonum.org/v1/gonum/lapack/gonum/dgelqf.go | 97 + .../gonum.org/v1/gonum/lapack/gonum/dgels.go | 219 ++ .../gonum.org/v1/gonum/lapack/gonum/dgeql2.go | 61 + .../gonum.org/v1/gonum/lapack/gonum/dgeqp3.go | 186 ++ .../gonum.org/v1/gonum/lapack/gonum/dgeqr2.go | 76 + .../gonum.org/v1/gonum/lapack/gonum/dgeqrf.go | 108 + .../gonum.org/v1/gonum/lapack/gonum/dgerq2.go | 68 + .../gonum.org/v1/gonum/lapack/gonum/dgerqf.go | 129 + .../gonum.org/v1/gonum/lapack/gonum/dgesvd.go | 1374 ++++++++ .../gonum.org/v1/gonum/lapack/gonum/dgetf2.go | 84 + .../gonum.org/v1/gonum/lapack/gonum/dgetrf.go | 85 + .../gonum.org/v1/gonum/lapack/gonum/dgetri.go | 116 + .../gonum.org/v1/gonum/lapack/gonum/dgetrs.go | 72 + .../v1/gonum/lapack/gonum/dggsvd3.go | 242 ++ .../v1/gonum/lapack/gonum/dggsvp3.go | 281 ++ .../gonum.org/v1/gonum/lapack/gonum/dhseqr.go | 252 ++ .../gonum.org/v1/gonum/lapack/gonum/dlabrd.go | 173 + .../gonum.org/v1/gonum/lapack/gonum/dlacn2.go | 134 + .../gonum.org/v1/gonum/lapack/gonum/dlacpy.go | 59 + .../gonum.org/v1/gonum/lapack/gonum/dlae2.go | 49 + .../gonum.org/v1/gonum/lapack/gonum/dlaev2.go | 82 + .../gonum.org/v1/gonum/lapack/gonum/dlaexc.go | 269 ++ .../gonum.org/v1/gonum/lapack/gonum/dlags2.go | 182 + .../gonum.org/v1/gonum/lapack/gonum/dlahqr.go | 431 +++ .../gonum.org/v1/gonum/lapack/gonum/dlahr2.go | 195 ++ .../gonum.org/v1/gonum/lapack/gonum/dlaln2.go | 405 +++ .../gonum.org/v1/gonum/lapack/gonum/dlange.go | 89 + .../gonum.org/v1/gonum/lapack/gonum/dlanst.go | 75 + .../gonum.org/v1/gonum/lapack/gonum/dlansy.go | 132 + .../gonum.org/v1/gonum/lapack/gonum/dlantr.go | 260 ++ .../gonum.org/v1/gonum/lapack/gonum/dlanv2.go | 132 + .../gonum.org/v1/gonum/lapack/gonum/dlapll.go | 55 + .../gonum.org/v1/gonum/lapack/gonum/dlapmt.go | 89 + .../gonum.org/v1/gonum/lapack/gonum/dlapy2.go | 14 + .../gonum.org/v1/gonum/lapack/gonum/dlaqp2.go | 127 + .../gonum.org/v1/gonum/lapack/gonum/dlaqps.go | 244 ++ .../v1/gonum/lapack/gonum/dlaqr04.go | 478 +++ .../gonum.org/v1/gonum/lapack/gonum/dlaqr1.go | 59 + .../v1/gonum/lapack/gonum/dlaqr23.go | 415 +++ .../gonum.org/v1/gonum/lapack/gonum/dlaqr5.go | 644 ++++ .../gonum.org/v1/gonum/lapack/gonum/dlarf.go | 101 + .../gonum.org/v1/gonum/lapack/gonum/dlarfb.go | 449 +++ .../gonum.org/v1/gonum/lapack/gonum/dlarfg.go | 71 + .../gonum.org/v1/gonum/lapack/gonum/dlarft.go | 166 + .../gonum.org/v1/gonum/lapack/gonum/dlarfx.go | 550 +++ .../gonum.org/v1/gonum/lapack/gonum/dlartg.go | 80 + .../gonum.org/v1/gonum/lapack/gonum/dlas2.go | 43 + .../gonum.org/v1/gonum/lapack/gonum/dlascl.go | 111 + .../gonum.org/v1/gonum/lapack/gonum/dlaset.go | 57 + .../gonum.org/v1/gonum/lapack/gonum/dlasq1.go | 100 + .../gonum.org/v1/gonum/lapack/gonum/dlasq2.go | 369 +++ .../gonum.org/v1/gonum/lapack/gonum/dlasq3.go | 172 + .../gonum.org/v1/gonum/lapack/gonum/dlasq4.go | 249 ++ .../gonum.org/v1/gonum/lapack/gonum/dlasq5.go | 140 + .../gonum.org/v1/gonum/lapack/gonum/dlasq6.go | 118 + .../gonum.org/v1/gonum/lapack/gonum/dlasr.go | 279 ++ .../gonum.org/v1/gonum/lapack/gonum/dlasrt.go | 36 + .../gonum.org/v1/gonum/lapack/gonum/dlassq.go | 41 + .../gonum.org/v1/gonum/lapack/gonum/dlasv2.go | 115 + .../gonum.org/v1/gonum/lapack/gonum/dlaswp.go | 52 + .../gonum.org/v1/gonum/lapack/gonum/dlasy2.go | 290 ++ .../gonum.org/v1/gonum/lapack/gonum/dlatrd.go | 165 + .../gonum.org/v1/gonum/lapack/gonum/dlatrs.go | 359 ++ .../gonum.org/v1/gonum/lapack/gonum/dlauu2.go | 64 + .../gonum.org/v1/gonum/lapack/gonum/dlauum.go | 81 + vendor/gonum.org/v1/gonum/lapack/gonum/doc.go | 28 + .../gonum.org/v1/gonum/lapack/gonum/dorg2l.go | 76 + .../gonum.org/v1/gonum/lapack/gonum/dorg2r.go | 75 + .../gonum.org/v1/gonum/lapack/gonum/dorgbr.go | 138 + .../gonum.org/v1/gonum/lapack/gonum/dorghr.go | 101 + .../gonum.org/v1/gonum/lapack/gonum/dorgl2.go | 71 + .../gonum.org/v1/gonum/lapack/gonum/dorglq.go | 123 + .../gonum.org/v1/gonum/lapack/gonum/dorgql.go | 136 + .../gonum.org/v1/gonum/lapack/gonum/dorgqr.go | 134 + .../gonum.org/v1/gonum/lapack/gonum/dorgtr.go | 104 + .../gonum.org/v1/gonum/lapack/gonum/dorm2r.go | 101 + .../gonum.org/v1/gonum/lapack/gonum/dormbr.go | 178 + .../gonum.org/v1/gonum/lapack/gonum/dormhr.go | 129 + .../gonum.org/v1/gonum/lapack/gonum/dorml2.go | 102 + .../gonum.org/v1/gonum/lapack/gonum/dormlq.go | 174 + .../gonum.org/v1/gonum/lapack/gonum/dormqr.go | 177 + .../gonum.org/v1/gonum/lapack/gonum/dormr2.go | 103 + .../gonum.org/v1/gonum/lapack/gonum/dpbtf2.go | 110 + .../gonum.org/v1/gonum/lapack/gonum/dpocon.go | 90 + .../gonum.org/v1/gonum/lapack/gonum/dpotf2.go | 82 + .../gonum.org/v1/gonum/lapack/gonum/dpotrf.go | 81 + .../gonum.org/v1/gonum/lapack/gonum/dpotri.go | 44 + .../gonum.org/v1/gonum/lapack/gonum/dpotrs.go | 62 + .../gonum.org/v1/gonum/lapack/gonum/drscl.go | 63 + .../gonum.org/v1/gonum/lapack/gonum/dsteqr.go | 376 +++ .../gonum.org/v1/gonum/lapack/gonum/dsterf.go | 285 ++ .../gonum.org/v1/gonum/lapack/gonum/dsyev.go | 130 + .../gonum.org/v1/gonum/lapack/gonum/dsytd2.go | 136 + .../gonum.org/v1/gonum/lapack/gonum/dsytrd.go | 172 + .../gonum.org/v1/gonum/lapack/gonum/dtgsja.go | 373 +++ .../gonum.org/v1/gonum/lapack/gonum/dtrcon.go | 90 + .../v1/gonum/lapack/gonum/dtrevc3.go | 885 +++++ .../gonum.org/v1/gonum/lapack/gonum/dtrexc.go | 230 ++ .../gonum.org/v1/gonum/lapack/gonum/dtrti2.go | 69 + .../gonum.org/v1/gonum/lapack/gonum/dtrtri.go | 72 + .../gonum.org/v1/gonum/lapack/gonum/dtrtrs.go | 55 + .../gonum.org/v1/gonum/lapack/gonum/errors.go | 174 + .../gonum.org/v1/gonum/lapack/gonum/iladlc.go | 45 + .../gonum.org/v1/gonum/lapack/gonum/iladlr.go | 41 + .../gonum.org/v1/gonum/lapack/gonum/ilaenv.go | 387 +++ .../gonum.org/v1/gonum/lapack/gonum/iparmq.go | 115 + .../gonum.org/v1/gonum/lapack/gonum/lapack.go | 51 + vendor/gonum.org/v1/gonum/lapack/lapack.go | 213 ++ .../gonum.org/v1/gonum/lapack/lapack64/doc.go | 20 + .../v1/gonum/lapack/lapack64/lapack64.go | 581 ++++ vendor/gonum.org/v1/gonum/mat/README.md | 3 + vendor/gonum.org/v1/gonum/mat/band.go | 263 ++ vendor/gonum.org/v1/gonum/mat/cdense.go | 168 + vendor/gonum.org/v1/gonum/mat/cholesky.go | 673 ++++ vendor/gonum.org/v1/gonum/mat/cmatrix.go | 210 ++ vendor/gonum.org/v1/gonum/mat/consts.go | 15 + vendor/gonum.org/v1/gonum/mat/dense.go | 558 ++++ .../v1/gonum/mat/dense_arithmetic.go | 886 +++++ vendor/gonum.org/v1/gonum/mat/diagonal.go | 311 ++ vendor/gonum.org/v1/gonum/mat/doc.go | 169 + vendor/gonum.org/v1/gonum/mat/eigen.go | 350 ++ vendor/gonum.org/v1/gonum/mat/errors.go | 149 + vendor/gonum.org/v1/gonum/mat/format.go | 238 ++ vendor/gonum.org/v1/gonum/mat/gsvd.go | 415 +++ vendor/gonum.org/v1/gonum/mat/hogsvd.go | 233 ++ .../v1/gonum/mat/index_bound_checks.go | 348 ++ .../v1/gonum/mat/index_no_bound_checks.go | 359 ++ vendor/gonum.org/v1/gonum/mat/inner.go | 121 + vendor/gonum.org/v1/gonum/mat/io.go | 492 +++ vendor/gonum.org/v1/gonum/mat/lq.go | 262 ++ vendor/gonum.org/v1/gonum/mat/lu.go | 422 +++ vendor/gonum.org/v1/gonum/mat/matrix.go | 946 ++++++ vendor/gonum.org/v1/gonum/mat/offset.go | 20 + .../v1/gonum/mat/offset_appengine.go | 24 + vendor/gonum.org/v1/gonum/mat/pool.go | 236 ++ vendor/gonum.org/v1/gonum/mat/product.go | 193 ++ vendor/gonum.org/v1/gonum/mat/qr.go | 260 ++ vendor/gonum.org/v1/gonum/mat/shadow.go | 226 ++ vendor/gonum.org/v1/gonum/mat/solve.go | 140 + vendor/gonum.org/v1/gonum/mat/svd.go | 247 ++ vendor/gonum.org/v1/gonum/mat/symband.go | 221 ++ vendor/gonum.org/v1/gonum/mat/symmetric.go | 602 ++++ vendor/gonum.org/v1/gonum/mat/triangular.go | 659 ++++ vendor/gonum.org/v1/gonum/mat/triband.go | 353 ++ vendor/gonum.org/v1/gonum/mat/vector.go | 741 +++++ .../client-go/discovery/fake/discovery.go | 160 + vendor/k8s.io/client-go/testing/actions.go | 671 ++++ vendor/k8s.io/client-go/testing/fake.go | 216 ++ vendor/k8s.io/client-go/testing/fixture.go | 577 ++++ vendor/k8s.io/code-generator/CONTRIBUTING.md | 7 + vendor/k8s.io/code-generator/LICENSE | 202 ++ vendor/k8s.io/code-generator/OWNERS | 13 + vendor/k8s.io/code-generator/README.md | 24 + .../k8s.io/code-generator/SECURITY_CONTACTS | 17 + .../code-generator/cmd/client-gen/OWNERS | 10 + .../code-generator/cmd/client-gen/README.md | 4 + .../cmd/client-gen/args/args.go | 120 + .../cmd/client-gen/args/gvpackages.go | 183 + .../cmd/client-gen/args/gvtype.go | 110 + .../client-gen/generators/client_generator.go | 403 +++ .../generators/fake/fake_client_generator.go | 130 + .../fake/generator_fake_for_clientset.go | 167 + .../fake/generator_fake_for_group.go | 130 + .../fake/generator_fake_for_type.go | 479 +++ .../generators/generator_for_clientset.go | 183 + .../generators/generator_for_expansion.go | 54 + .../generators/generator_for_group.go | 246 ++ .../generators/generator_for_type.go | 599 ++++ .../generators/scheme/generator_for_scheme.go | 186 ++ .../cmd/client-gen/generators/util/tags.go | 341 ++ .../code-generator/cmd/client-gen/main.go | 66 + .../cmd/client-gen/path/path.go | 31 + .../cmd/client-gen/types/helpers.go | 121 + .../cmd/client-gen/types/types.go | 75 + .../cmd/conversion-gen/args/args.go | 83 + .../conversion-gen/generators/conversion.go | 1195 +++++++ .../code-generator/cmd/conversion-gen/main.go | 125 + .../cmd/deepcopy-gen/args/args.go | 54 + .../code-generator/cmd/deepcopy-gen/main.go | 85 + .../cmd/defaulter-gen/args/args.go | 54 + .../code-generator/cmd/defaulter-gen/main.go | 84 + .../cmd/go-to-protobuf/.gitignore | 1 + .../code-generator/cmd/go-to-protobuf/OWNERS | 6 + .../code-generator/cmd/go-to-protobuf/main.go | 39 + .../cmd/go-to-protobuf/protobuf/cmd.go | 428 +++ .../cmd/go-to-protobuf/protobuf/generator.go | 773 +++++ .../go-to-protobuf/protobuf/import_tracker.go | 50 + .../cmd/go-to-protobuf/protobuf/namer.go | 208 ++ .../cmd/go-to-protobuf/protobuf/package.go | 215 ++ .../cmd/go-to-protobuf/protobuf/parser.go | 452 +++ .../cmd/go-to-protobuf/protobuf/tags.go | 33 + .../code-generator/cmd/import-boss/.gitignore | 1 + .../code-generator/cmd/import-boss/main.go | 96 + .../cmd/informer-gen/args/args.go | 77 + .../cmd/informer-gen/generators/factory.go | 258 ++ .../generators/factoryinterface.go | 90 + .../cmd/informer-gen/generators/generic.go | 184 ++ .../informer-gen/generators/groupinterface.go | 118 + .../cmd/informer-gen/generators/informer.go | 186 ++ .../cmd/informer-gen/generators/packages.go | 352 ++ .../cmd/informer-gen/generators/types.go | 42 + .../generators/versioninterface.go | 109 + .../code-generator/cmd/informer-gen/main.go | 63 + .../cmd/lister-gen/.import-restrictions | 1 + .../cmd/lister-gen/args/args.go | 56 + .../cmd/lister-gen/generators/expansion.go | 67 + .../cmd/lister-gen/generators/lister.go | 371 +++ .../code-generator/cmd/lister-gen/main.go | 60 + .../code-generator/cmd/openapi-gen/main.go | 57 + .../cmd/register-gen/args/args.go | 39 + .../cmd/register-gen/generators/packages.go | 137 + .../generators/register_external.go | 117 + .../code-generator/cmd/register-gen/main.go | 53 + .../code-generator/cmd/set-gen/.gitignore | 1 + .../k8s.io/code-generator/cmd/set-gen/main.go | 56 + .../k8s.io/code-generator/code-of-conduct.md | 3 + .../k8s.io/code-generator/generate-groups.sh | 92 + .../generate-internal-groups.sh | 122 + vendor/k8s.io/code-generator/go.mod | 30 + vendor/k8s.io/code-generator/go.sum | 134 + .../code-generator/pkg/namer/tag-override.go | 58 + .../k8s.io/code-generator/pkg/util/build.go | 61 + .../third_party/forked/golang/reflect/type.go | 91 + vendor/k8s.io/code-generator/tools.go | 35 + vendor/k8s.io/gengo/LICENSE | 202 ++ vendor/k8s.io/gengo/args/args.go | 212 ++ .../deepcopy-gen/generators/deepcopy.go | 924 ++++++ .../defaulter-gen/generators/defaulter.go | 832 +++++ .../import-boss/generators/import_restrict.go | 419 +++ .../gengo/examples/set-gen/generators/sets.go | 362 ++ .../gengo/examples/set-gen/generators/tags.go | 33 + .../gengo/examples/set-gen/sets/byte.go | 205 ++ .../k8s.io/gengo/examples/set-gen/sets/doc.go | 20 + .../gengo/examples/set-gen/sets/empty.go | 23 + .../k8s.io/gengo/examples/set-gen/sets/int.go | 205 ++ .../gengo/examples/set-gen/sets/int64.go | 205 ++ .../gengo/examples/set-gen/sets/string.go | 205 ++ .../gengo/generator/default_generator.go | 62 + .../k8s.io/gengo/generator/default_package.go | 75 + vendor/k8s.io/gengo/generator/doc.go | 31 + .../k8s.io/gengo/generator/error_tracker.go | 50 + vendor/k8s.io/gengo/generator/execute.go | 314 ++ vendor/k8s.io/gengo/generator/generator.go | 256 ++ .../k8s.io/gengo/generator/import_tracker.go | 70 + .../k8s.io/gengo/generator/snippet_writer.go | 154 + .../gengo/generator/transitive_closure.go | 65 + vendor/k8s.io/gengo/namer/doc.go | 31 + vendor/k8s.io/gengo/namer/import_tracker.go | 112 + vendor/k8s.io/gengo/namer/namer.go | 383 +++ vendor/k8s.io/gengo/namer/order.go | 72 + vendor/k8s.io/gengo/namer/plural_namer.go | 120 + vendor/k8s.io/gengo/parser/doc.go | 19 + vendor/k8s.io/gengo/parser/parse.go | 859 +++++ vendor/k8s.io/gengo/types/comments.go | 82 + vendor/k8s.io/gengo/types/doc.go | 19 + vendor/k8s.io/gengo/types/flatten.go | 57 + vendor/k8s.io/gengo/types/types.go | 526 +++ .../kube-openapi/cmd/openapi-gen/args/args.go | 76 + .../kube-openapi/pkg/generators/README.md | 49 + .../kube-openapi/pkg/generators/api_linter.go | 220 ++ .../kube-openapi/pkg/generators/config.go | 91 + .../kube-openapi/pkg/generators/extension.go | 188 ++ .../kube-openapi/pkg/generators/openapi.go | 692 ++++ .../kube-openapi/pkg/generators/rules/OWNERS | 4 + .../kube-openapi/pkg/generators/rules/doc.go | 23 + .../pkg/generators/rules/idl_tag.go | 36 + .../pkg/generators/rules/names_match.go | 172 + .../generators/rules/omitempty_match_case.go | 64 + .../kube-openapi/pkg/generators/union.go | 207 ++ .../kube-openapi/pkg/util/sets/empty.go | 27 + .../kube-openapi/pkg/util/sets/string.go | 207 ++ vendor/modules.txt | 2 + 474 files changed, 95240 insertions(+) create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/doc.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/doc.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/clustertask.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/condition.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/doc.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelinerun.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/task.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/taskrun.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/clustertask.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/doc.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/generated_expansion.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipeline.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipeline_client.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipelinerun.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/task.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/taskrun.go create mode 100644 vendor/gonum.org/v1/gonum/AUTHORS create mode 100644 vendor/gonum.org/v1/gonum/CONTRIBUTORS create mode 100644 vendor/gonum.org/v1/gonum/LICENSE create mode 100644 vendor/gonum.org/v1/gonum/blas/README.md create mode 100644 vendor/gonum.org/v1/gonum/blas/blas.go create mode 100644 vendor/gonum.org/v1/gonum/blas/blas64/blas64.go create mode 100644 vendor/gonum.org/v1/gonum/blas/blas64/conv.go create mode 100644 vendor/gonum.org/v1/gonum/blas/blas64/conv_symmetric.go create mode 100644 vendor/gonum.org/v1/gonum/blas/blas64/doc.go create mode 100644 vendor/gonum.org/v1/gonum/blas/cblas128/cblas128.go create mode 100644 vendor/gonum.org/v1/gonum/blas/cblas128/conv.go create mode 100644 vendor/gonum.org/v1/gonum/blas/cblas128/conv_hermitian.go create mode 100644 vendor/gonum.org/v1/gonum/blas/cblas128/conv_symmetric.go create mode 100644 vendor/gonum.org/v1/gonum/blas/cblas128/doc.go create mode 100644 vendor/gonum.org/v1/gonum/blas/conversions.bash create mode 100644 vendor/gonum.org/v1/gonum/blas/doc.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/dgemm.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/doc.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/errors.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/gemv.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/gonum.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx128.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx64.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1float32.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1float32_dsdot.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdot.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdsdot.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1float64.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level1float64_ddot.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx128.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx64.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level2float32.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level2float64.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx128.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx64.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level3float32.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/level3float64.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/sgemm.go create mode 100644 vendor/gonum.org/v1/gonum/blas/gonum/single_precision.bash create mode 100644 vendor/gonum.org/v1/gonum/floats/README.md create mode 100644 vendor/gonum.org/v1/gonum/floats/doc.go create mode 100644 vendor/gonum.org/v1/gonum/floats/floats.go create mode 100644 vendor/gonum.org/v1/gonum/graph/.gitignore create mode 100644 vendor/gonum.org/v1/gonum/graph/README.md create mode 100644 vendor/gonum.org/v1/gonum/graph/doc.go create mode 100644 vendor/gonum.org/v1/gonum/graph/graph.go create mode 100644 vendor/gonum.org/v1/gonum/graph/internal/linear/doc.go create mode 100644 vendor/gonum.org/v1/gonum/graph/internal/linear/linear.go create mode 100644 vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go create mode 100644 vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go create mode 100644 vendor/gonum.org/v1/gonum/graph/internal/set/doc.go create mode 100644 vendor/gonum.org/v1/gonum/graph/internal/set/same.go create mode 100644 vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go create mode 100644 vendor/gonum.org/v1/gonum/graph/internal/set/set.go create mode 100644 vendor/gonum.org/v1/gonum/graph/internal/uid/uid.go create mode 100644 vendor/gonum.org/v1/gonum/graph/iterator/doc.go create mode 100644 vendor/gonum.org/v1/gonum/graph/iterator/edges.go create mode 100644 vendor/gonum.org/v1/gonum/graph/iterator/lines.go create mode 100644 vendor/gonum.org/v1/gonum/graph/iterator/nodes.go create mode 100644 vendor/gonum.org/v1/gonum/graph/multigraph.go create mode 100644 vendor/gonum.org/v1/gonum/graph/nodes_edges.go create mode 100644 vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go create mode 100644 vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go create mode 100644 vendor/gonum.org/v1/gonum/graph/simple/directed.go create mode 100644 vendor/gonum.org/v1/gonum/graph/simple/doc.go create mode 100644 vendor/gonum.org/v1/gonum/graph/simple/simple.go create mode 100644 vendor/gonum.org/v1/gonum/graph/simple/undirected.go create mode 100644 vendor/gonum.org/v1/gonum/graph/simple/weighted_directed.go create mode 100644 vendor/gonum.org/v1/gonum/graph/simple/weighted_undirected.go create mode 100644 vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go create mode 100644 vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go create mode 100644 vendor/gonum.org/v1/gonum/graph/topo/doc.go create mode 100644 vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go create mode 100644 vendor/gonum.org/v1/gonum/graph/topo/non_tomita_choice.go create mode 100644 vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go create mode 100644 vendor/gonum.org/v1/gonum/graph/topo/tarjan.go create mode 100644 vendor/gonum.org/v1/gonum/graph/topo/tomita_choice.go create mode 100644 vendor/gonum.org/v1/gonum/graph/topo/topo.go create mode 100644 vendor/gonum.org/v1/gonum/graph/traverse/doc.go create mode 100644 vendor/gonum.org/v1/gonum/graph/traverse/traverse.go create mode 100644 vendor/gonum.org/v1/gonum/graph/undirect.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/axpyinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/axpyincto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitaryto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/doc.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/dotcinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/dotcunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/dotuinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/dotuunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/dscalinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/dscalunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/scal.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/scalUnitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/scalinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_amd64.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_noasm.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/axpyinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/axpyincto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitaryto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/conj.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/doc.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/dotcinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/dotcunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/dotuinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/dotuunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/scal.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_amd64.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_noasm.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/axpyinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/axpyincto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitaryto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/ddotinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/ddotunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/doc.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/dotinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/dotunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/ge_noasm.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/scal.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_amd64.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_noasm.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/abssum_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/abssuminc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/add_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/addconst_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/axpy.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/axpyinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/axpyincto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitaryto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/cumprod_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/cumsum_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/div_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/divto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/doc.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/dot.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/dot_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/ge_amd64.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/ge_noasm.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/gemvN_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/gemvT_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/ger_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/l1norm_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/linfnorm_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/scal.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/scalinc_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/scalincto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitary_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitaryto_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_amd64.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_noasm.go create mode 100644 vendor/gonum.org/v1/gonum/internal/asm/f64/sum_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/internal/cmplx64/abs.go create mode 100644 vendor/gonum.org/v1/gonum/internal/cmplx64/conj.go create mode 100644 vendor/gonum.org/v1/gonum/internal/cmplx64/doc.go create mode 100644 vendor/gonum.org/v1/gonum/internal/cmplx64/isinf.go create mode 100644 vendor/gonum.org/v1/gonum/internal/cmplx64/isnan.go create mode 100644 vendor/gonum.org/v1/gonum/internal/cmplx64/sqrt.go create mode 100644 vendor/gonum.org/v1/gonum/internal/math32/doc.go create mode 100644 vendor/gonum.org/v1/gonum/internal/math32/math.go create mode 100644 vendor/gonum.org/v1/gonum/internal/math32/signbit.go create mode 100644 vendor/gonum.org/v1/gonum/internal/math32/sqrt.go create mode 100644 vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.go create mode 100644 vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.s create mode 100644 vendor/gonum.org/v1/gonum/lapack/.gitignore create mode 100644 vendor/gonum.org/v1/gonum/lapack/README.md create mode 100644 vendor/gonum.org/v1/gonum/lapack/doc.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dbdsqr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgebak.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgebal.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgebd2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgebrd.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgecon.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgeev.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgehd2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgehrd.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgelq2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgelqf.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgels.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgeql2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgeqp3.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgeqr2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgeqrf.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgerq2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgerqf.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgesvd.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgetf2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgetrf.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgetri.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dgetrs.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dggsvd3.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dggsvp3.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dhseqr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlabrd.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlacn2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlacpy.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlae2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaev2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaexc.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlags2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlahqr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlahr2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaln2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlange.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlanst.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlansy.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlantr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlanv2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlapll.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlapmt.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlapy2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaqp2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaqps.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr04.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr1.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr23.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr5.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlarf.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlarfb.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlarfg.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlarft.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlarfx.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlartg.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlas2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlascl.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaset.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasq1.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasq2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasq3.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasq4.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasq5.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasq6.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasrt.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlassq.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasv2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlaswp.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlasy2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlatrd.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlatrs.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlauu2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dlauum.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/doc.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorg2l.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorg2r.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorgbr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorghr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorgl2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorglq.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorgql.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorgqr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorgtr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorm2r.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dormbr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dormhr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dorml2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dormlq.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dormqr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dormr2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dpbtf2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dpocon.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dpotf2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dpotrf.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dpotri.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dpotrs.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/drscl.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dsteqr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dsterf.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dsyev.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dsytd2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dsytrd.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtgsja.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtrcon.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtrevc3.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtrexc.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtrti2.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtrtri.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/dtrtrs.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/errors.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/iladlc.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/iladlr.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/ilaenv.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/iparmq.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/gonum/lapack.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/lapack.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/lapack64/doc.go create mode 100644 vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go create mode 100644 vendor/gonum.org/v1/gonum/mat/README.md create mode 100644 vendor/gonum.org/v1/gonum/mat/band.go create mode 100644 vendor/gonum.org/v1/gonum/mat/cdense.go create mode 100644 vendor/gonum.org/v1/gonum/mat/cholesky.go create mode 100644 vendor/gonum.org/v1/gonum/mat/cmatrix.go create mode 100644 vendor/gonum.org/v1/gonum/mat/consts.go create mode 100644 vendor/gonum.org/v1/gonum/mat/dense.go create mode 100644 vendor/gonum.org/v1/gonum/mat/dense_arithmetic.go create mode 100644 vendor/gonum.org/v1/gonum/mat/diagonal.go create mode 100644 vendor/gonum.org/v1/gonum/mat/doc.go create mode 100644 vendor/gonum.org/v1/gonum/mat/eigen.go create mode 100644 vendor/gonum.org/v1/gonum/mat/errors.go create mode 100644 vendor/gonum.org/v1/gonum/mat/format.go create mode 100644 vendor/gonum.org/v1/gonum/mat/gsvd.go create mode 100644 vendor/gonum.org/v1/gonum/mat/hogsvd.go create mode 100644 vendor/gonum.org/v1/gonum/mat/index_bound_checks.go create mode 100644 vendor/gonum.org/v1/gonum/mat/index_no_bound_checks.go create mode 100644 vendor/gonum.org/v1/gonum/mat/inner.go create mode 100644 vendor/gonum.org/v1/gonum/mat/io.go create mode 100644 vendor/gonum.org/v1/gonum/mat/lq.go create mode 100644 vendor/gonum.org/v1/gonum/mat/lu.go create mode 100644 vendor/gonum.org/v1/gonum/mat/matrix.go create mode 100644 vendor/gonum.org/v1/gonum/mat/offset.go create mode 100644 vendor/gonum.org/v1/gonum/mat/offset_appengine.go create mode 100644 vendor/gonum.org/v1/gonum/mat/pool.go create mode 100644 vendor/gonum.org/v1/gonum/mat/product.go create mode 100644 vendor/gonum.org/v1/gonum/mat/qr.go create mode 100644 vendor/gonum.org/v1/gonum/mat/shadow.go create mode 100644 vendor/gonum.org/v1/gonum/mat/solve.go create mode 100644 vendor/gonum.org/v1/gonum/mat/svd.go create mode 100644 vendor/gonum.org/v1/gonum/mat/symband.go create mode 100644 vendor/gonum.org/v1/gonum/mat/symmetric.go create mode 100644 vendor/gonum.org/v1/gonum/mat/triangular.go create mode 100644 vendor/gonum.org/v1/gonum/mat/triband.go create mode 100644 vendor/gonum.org/v1/gonum/mat/vector.go create mode 100644 vendor/k8s.io/client-go/discovery/fake/discovery.go create mode 100644 vendor/k8s.io/client-go/testing/actions.go create mode 100644 vendor/k8s.io/client-go/testing/fake.go create mode 100644 vendor/k8s.io/client-go/testing/fixture.go create mode 100644 vendor/k8s.io/code-generator/CONTRIBUTING.md create mode 100644 vendor/k8s.io/code-generator/LICENSE create mode 100644 vendor/k8s.io/code-generator/OWNERS create mode 100644 vendor/k8s.io/code-generator/README.md create mode 100644 vendor/k8s.io/code-generator/SECURITY_CONTACTS create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/OWNERS create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/README.md create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/args/args.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/args/gvpackages.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/args/gvtype.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_expansion.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/main.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/path/path.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/types/types.go create mode 100644 vendor/k8s.io/code-generator/cmd/conversion-gen/args/args.go create mode 100644 vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go create mode 100644 vendor/k8s.io/code-generator/cmd/conversion-gen/main.go create mode 100644 vendor/k8s.io/code-generator/cmd/deepcopy-gen/args/args.go create mode 100644 vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go create mode 100644 vendor/k8s.io/code-generator/cmd/defaulter-gen/args/args.go create mode 100644 vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/.gitignore create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/main.go create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/import_tracker.go create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/namer.go create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/package.go create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go create mode 100644 vendor/k8s.io/code-generator/cmd/import-boss/.gitignore create mode 100644 vendor/k8s.io/code-generator/cmd/import-boss/main.go create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/args/args.go create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/groupinterface.go create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go create mode 100644 vendor/k8s.io/code-generator/cmd/informer-gen/main.go create mode 100644 vendor/k8s.io/code-generator/cmd/lister-gen/.import-restrictions create mode 100644 vendor/k8s.io/code-generator/cmd/lister-gen/args/args.go create mode 100644 vendor/k8s.io/code-generator/cmd/lister-gen/generators/expansion.go create mode 100644 vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go create mode 100644 vendor/k8s.io/code-generator/cmd/lister-gen/main.go create mode 100644 vendor/k8s.io/code-generator/cmd/openapi-gen/main.go create mode 100644 vendor/k8s.io/code-generator/cmd/register-gen/args/args.go create mode 100644 vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go create mode 100644 vendor/k8s.io/code-generator/cmd/register-gen/generators/register_external.go create mode 100644 vendor/k8s.io/code-generator/cmd/register-gen/main.go create mode 100644 vendor/k8s.io/code-generator/cmd/set-gen/.gitignore create mode 100644 vendor/k8s.io/code-generator/cmd/set-gen/main.go create mode 100644 vendor/k8s.io/code-generator/code-of-conduct.md create mode 100644 vendor/k8s.io/code-generator/generate-groups.sh create mode 100644 vendor/k8s.io/code-generator/generate-internal-groups.sh create mode 100644 vendor/k8s.io/code-generator/go.mod create mode 100644 vendor/k8s.io/code-generator/go.sum create mode 100644 vendor/k8s.io/code-generator/pkg/namer/tag-override.go create mode 100644 vendor/k8s.io/code-generator/pkg/util/build.go create mode 100644 vendor/k8s.io/code-generator/third_party/forked/golang/reflect/type.go create mode 100644 vendor/k8s.io/code-generator/tools.go create mode 100644 vendor/k8s.io/gengo/LICENSE create mode 100644 vendor/k8s.io/gengo/args/args.go create mode 100644 vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go create mode 100644 vendor/k8s.io/gengo/examples/defaulter-gen/generators/defaulter.go create mode 100644 vendor/k8s.io/gengo/examples/import-boss/generators/import_restrict.go create mode 100644 vendor/k8s.io/gengo/examples/set-gen/generators/sets.go create mode 100644 vendor/k8s.io/gengo/examples/set-gen/generators/tags.go create mode 100644 vendor/k8s.io/gengo/examples/set-gen/sets/byte.go create mode 100644 vendor/k8s.io/gengo/examples/set-gen/sets/doc.go create mode 100644 vendor/k8s.io/gengo/examples/set-gen/sets/empty.go create mode 100644 vendor/k8s.io/gengo/examples/set-gen/sets/int.go create mode 100644 vendor/k8s.io/gengo/examples/set-gen/sets/int64.go create mode 100644 vendor/k8s.io/gengo/examples/set-gen/sets/string.go create mode 100644 vendor/k8s.io/gengo/generator/default_generator.go create mode 100644 vendor/k8s.io/gengo/generator/default_package.go create mode 100644 vendor/k8s.io/gengo/generator/doc.go create mode 100644 vendor/k8s.io/gengo/generator/error_tracker.go create mode 100644 vendor/k8s.io/gengo/generator/execute.go create mode 100644 vendor/k8s.io/gengo/generator/generator.go create mode 100644 vendor/k8s.io/gengo/generator/import_tracker.go create mode 100644 vendor/k8s.io/gengo/generator/snippet_writer.go create mode 100644 vendor/k8s.io/gengo/generator/transitive_closure.go create mode 100644 vendor/k8s.io/gengo/namer/doc.go create mode 100644 vendor/k8s.io/gengo/namer/import_tracker.go create mode 100644 vendor/k8s.io/gengo/namer/namer.go create mode 100644 vendor/k8s.io/gengo/namer/order.go create mode 100644 vendor/k8s.io/gengo/namer/plural_namer.go create mode 100644 vendor/k8s.io/gengo/parser/doc.go create mode 100644 vendor/k8s.io/gengo/parser/parse.go create mode 100644 vendor/k8s.io/gengo/types/comments.go create mode 100644 vendor/k8s.io/gengo/types/doc.go create mode 100644 vendor/k8s.io/gengo/types/flatten.go create mode 100644 vendor/k8s.io/gengo/types/types.go create mode 100644 vendor/k8s.io/kube-openapi/cmd/openapi-gen/args/args.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/README.md create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/config.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/extension.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/openapi.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/doc.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/idl_tag.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/union.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/sets/empty.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/sets/string.go diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go new file mode 100644 index 0000000000..cea4669136 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go @@ -0,0 +1,111 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + + tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1" + tektonv1beta1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + TektonV1alpha1() tektonv1alpha1.TektonV1alpha1Interface + TektonV1beta1() tektonv1beta1.TektonV1beta1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + tektonV1alpha1 *tektonv1alpha1.TektonV1alpha1Client + tektonV1beta1 *tektonv1beta1.TektonV1beta1Client +} + +// TektonV1alpha1 retrieves the TektonV1alpha1Client +func (c *Clientset) TektonV1alpha1() tektonv1alpha1.TektonV1alpha1Interface { + return c.tektonV1alpha1 +} + +// TektonV1beta1 retrieves the TektonV1beta1Client +func (c *Clientset) TektonV1beta1() tektonv1beta1.TektonV1beta1Interface { + return c.tektonV1beta1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("Burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.tektonV1alpha1, err = tektonv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.tektonV1beta1, err = tektonv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.tektonV1alpha1 = tektonv1alpha1.NewForConfigOrDie(c) + cs.tektonV1beta1 = tektonv1beta1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.tektonV1alpha1 = tektonv1alpha1.New(c) + cs.tektonV1beta1 = tektonv1beta1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/doc.go new file mode 100644 index 0000000000..0d13552ae2 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000..0fb16cc056 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000..757e6eb815 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + tektonv1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + tektonv1alpha1.AddToScheme, + tektonv1beta1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/clustertask.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/clustertask.go new file mode 100644 index 0000000000..72c6514102 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/clustertask.go @@ -0,0 +1,164 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ClusterTasksGetter has a method to return a ClusterTaskInterface. +// A group's client should implement this interface. +type ClusterTasksGetter interface { + ClusterTasks() ClusterTaskInterface +} + +// ClusterTaskInterface has methods to work with ClusterTask resources. +type ClusterTaskInterface interface { + Create(*v1alpha1.ClusterTask) (*v1alpha1.ClusterTask, error) + Update(*v1alpha1.ClusterTask) (*v1alpha1.ClusterTask, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.ClusterTask, error) + List(opts v1.ListOptions) (*v1alpha1.ClusterTaskList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterTask, err error) + ClusterTaskExpansion +} + +// clusterTasks implements ClusterTaskInterface +type clusterTasks struct { + client rest.Interface +} + +// newClusterTasks returns a ClusterTasks +func newClusterTasks(c *TektonV1alpha1Client) *clusterTasks { + return &clusterTasks{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterTask, and returns the corresponding clusterTask object, and an error if there is any. +func (c *clusterTasks) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterTask, err error) { + result = &v1alpha1.ClusterTask{} + err = c.client.Get(). + Resource("clustertasks"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterTasks that match those selectors. +func (c *clusterTasks) List(opts v1.ListOptions) (result *v1alpha1.ClusterTaskList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ClusterTaskList{} + err = c.client.Get(). + Resource("clustertasks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterTasks. +func (c *clusterTasks) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clustertasks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a clusterTask and creates it. Returns the server's representation of the clusterTask, and an error, if there is any. +func (c *clusterTasks) Create(clusterTask *v1alpha1.ClusterTask) (result *v1alpha1.ClusterTask, err error) { + result = &v1alpha1.ClusterTask{} + err = c.client.Post(). + Resource("clustertasks"). + Body(clusterTask). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterTask and updates it. Returns the server's representation of the clusterTask, and an error, if there is any. +func (c *clusterTasks) Update(clusterTask *v1alpha1.ClusterTask) (result *v1alpha1.ClusterTask, err error) { + result = &v1alpha1.ClusterTask{} + err = c.client.Put(). + Resource("clustertasks"). + Name(clusterTask.Name). + Body(clusterTask). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterTask and deletes it. Returns an error if one occurs. +func (c *clusterTasks) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clustertasks"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterTasks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clustertasks"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched clusterTask. +func (c *clusterTasks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterTask, err error) { + result = &v1alpha1.ClusterTask{} + err = c.client.Patch(pt). + Resource("clustertasks"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/condition.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/condition.go new file mode 100644 index 0000000000..6402ee1f1f --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/condition.go @@ -0,0 +1,174 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ConditionsGetter has a method to return a ConditionInterface. +// A group's client should implement this interface. +type ConditionsGetter interface { + Conditions(namespace string) ConditionInterface +} + +// ConditionInterface has methods to work with Condition resources. +type ConditionInterface interface { + Create(*v1alpha1.Condition) (*v1alpha1.Condition, error) + Update(*v1alpha1.Condition) (*v1alpha1.Condition, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.Condition, error) + List(opts v1.ListOptions) (*v1alpha1.ConditionList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Condition, err error) + ConditionExpansion +} + +// conditions implements ConditionInterface +type conditions struct { + client rest.Interface + ns string +} + +// newConditions returns a Conditions +func newConditions(c *TektonV1alpha1Client, namespace string) *conditions { + return &conditions{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the condition, and returns the corresponding condition object, and an error if there is any. +func (c *conditions) Get(name string, options v1.GetOptions) (result *v1alpha1.Condition, err error) { + result = &v1alpha1.Condition{} + err = c.client.Get(). + Namespace(c.ns). + Resource("conditions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Conditions that match those selectors. +func (c *conditions) List(opts v1.ListOptions) (result *v1alpha1.ConditionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ConditionList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("conditions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested conditions. +func (c *conditions) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("conditions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a condition and creates it. Returns the server's representation of the condition, and an error, if there is any. +func (c *conditions) Create(condition *v1alpha1.Condition) (result *v1alpha1.Condition, err error) { + result = &v1alpha1.Condition{} + err = c.client.Post(). + Namespace(c.ns). + Resource("conditions"). + Body(condition). + Do(). + Into(result) + return +} + +// Update takes the representation of a condition and updates it. Returns the server's representation of the condition, and an error, if there is any. +func (c *conditions) Update(condition *v1alpha1.Condition) (result *v1alpha1.Condition, err error) { + result = &v1alpha1.Condition{} + err = c.client.Put(). + Namespace(c.ns). + Resource("conditions"). + Name(condition.Name). + Body(condition). + Do(). + Into(result) + return +} + +// Delete takes name of the condition and deletes it. Returns an error if one occurs. +func (c *conditions) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("conditions"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *conditions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("conditions"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched condition. +func (c *conditions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Condition, err error) { + result = &v1alpha1.Condition{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("conditions"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/doc.go new file mode 100644 index 0000000000..69ed294b82 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go new file mode 100644 index 0000000000..a3b6b03b04 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go @@ -0,0 +1,31 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type ClusterTaskExpansion interface{} + +type ConditionExpansion interface{} + +type PipelineExpansion interface{} + +type PipelineRunExpansion interface{} + +type TaskExpansion interface{} + +type TaskRunExpansion interface{} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline.go new file mode 100644 index 0000000000..46bcb55606 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline.go @@ -0,0 +1,174 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PipelinesGetter has a method to return a PipelineInterface. +// A group's client should implement this interface. +type PipelinesGetter interface { + Pipelines(namespace string) PipelineInterface +} + +// PipelineInterface has methods to work with Pipeline resources. +type PipelineInterface interface { + Create(*v1alpha1.Pipeline) (*v1alpha1.Pipeline, error) + Update(*v1alpha1.Pipeline) (*v1alpha1.Pipeline, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.Pipeline, error) + List(opts v1.ListOptions) (*v1alpha1.PipelineList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Pipeline, err error) + PipelineExpansion +} + +// pipelines implements PipelineInterface +type pipelines struct { + client rest.Interface + ns string +} + +// newPipelines returns a Pipelines +func newPipelines(c *TektonV1alpha1Client, namespace string) *pipelines { + return &pipelines{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the pipeline, and returns the corresponding pipeline object, and an error if there is any. +func (c *pipelines) Get(name string, options v1.GetOptions) (result *v1alpha1.Pipeline, err error) { + result = &v1alpha1.Pipeline{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pipelines"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Pipelines that match those selectors. +func (c *pipelines) List(opts v1.ListOptions) (result *v1alpha1.PipelineList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.PipelineList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pipelines"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested pipelines. +func (c *pipelines) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("pipelines"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a pipeline and creates it. Returns the server's representation of the pipeline, and an error, if there is any. +func (c *pipelines) Create(pipeline *v1alpha1.Pipeline) (result *v1alpha1.Pipeline, err error) { + result = &v1alpha1.Pipeline{} + err = c.client.Post(). + Namespace(c.ns). + Resource("pipelines"). + Body(pipeline). + Do(). + Into(result) + return +} + +// Update takes the representation of a pipeline and updates it. Returns the server's representation of the pipeline, and an error, if there is any. +func (c *pipelines) Update(pipeline *v1alpha1.Pipeline) (result *v1alpha1.Pipeline, err error) { + result = &v1alpha1.Pipeline{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pipelines"). + Name(pipeline.Name). + Body(pipeline). + Do(). + Into(result) + return +} + +// Delete takes name of the pipeline and deletes it. Returns an error if one occurs. +func (c *pipelines) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pipelines"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *pipelines) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("pipelines"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched pipeline. +func (c *pipelines) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Pipeline, err error) { + result = &v1alpha1.Pipeline{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("pipelines"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go new file mode 100644 index 0000000000..f96f3e34db --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go @@ -0,0 +1,114 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type TektonV1alpha1Interface interface { + RESTClient() rest.Interface + ClusterTasksGetter + ConditionsGetter + PipelinesGetter + PipelineRunsGetter + TasksGetter + TaskRunsGetter +} + +// TektonV1alpha1Client is used to interact with features provided by the tekton.dev group. +type TektonV1alpha1Client struct { + restClient rest.Interface +} + +func (c *TektonV1alpha1Client) ClusterTasks() ClusterTaskInterface { + return newClusterTasks(c) +} + +func (c *TektonV1alpha1Client) Conditions(namespace string) ConditionInterface { + return newConditions(c, namespace) +} + +func (c *TektonV1alpha1Client) Pipelines(namespace string) PipelineInterface { + return newPipelines(c, namespace) +} + +func (c *TektonV1alpha1Client) PipelineRuns(namespace string) PipelineRunInterface { + return newPipelineRuns(c, namespace) +} + +func (c *TektonV1alpha1Client) Tasks(namespace string) TaskInterface { + return newTasks(c, namespace) +} + +func (c *TektonV1alpha1Client) TaskRuns(namespace string) TaskRunInterface { + return newTaskRuns(c, namespace) +} + +// NewForConfig creates a new TektonV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*TektonV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &TektonV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new TektonV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *TektonV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new TektonV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *TektonV1alpha1Client { + return &TektonV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *TektonV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelinerun.go new file mode 100644 index 0000000000..e76542c45a --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelinerun.go @@ -0,0 +1,191 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PipelineRunsGetter has a method to return a PipelineRunInterface. +// A group's client should implement this interface. +type PipelineRunsGetter interface { + PipelineRuns(namespace string) PipelineRunInterface +} + +// PipelineRunInterface has methods to work with PipelineRun resources. +type PipelineRunInterface interface { + Create(*v1alpha1.PipelineRun) (*v1alpha1.PipelineRun, error) + Update(*v1alpha1.PipelineRun) (*v1alpha1.PipelineRun, error) + UpdateStatus(*v1alpha1.PipelineRun) (*v1alpha1.PipelineRun, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.PipelineRun, error) + List(opts v1.ListOptions) (*v1alpha1.PipelineRunList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PipelineRun, err error) + PipelineRunExpansion +} + +// pipelineRuns implements PipelineRunInterface +type pipelineRuns struct { + client rest.Interface + ns string +} + +// newPipelineRuns returns a PipelineRuns +func newPipelineRuns(c *TektonV1alpha1Client, namespace string) *pipelineRuns { + return &pipelineRuns{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the pipelineRun, and returns the corresponding pipelineRun object, and an error if there is any. +func (c *pipelineRuns) Get(name string, options v1.GetOptions) (result *v1alpha1.PipelineRun, err error) { + result = &v1alpha1.PipelineRun{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pipelineruns"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PipelineRuns that match those selectors. +func (c *pipelineRuns) List(opts v1.ListOptions) (result *v1alpha1.PipelineRunList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.PipelineRunList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pipelineruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested pipelineRuns. +func (c *pipelineRuns) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("pipelineruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a pipelineRun and creates it. Returns the server's representation of the pipelineRun, and an error, if there is any. +func (c *pipelineRuns) Create(pipelineRun *v1alpha1.PipelineRun) (result *v1alpha1.PipelineRun, err error) { + result = &v1alpha1.PipelineRun{} + err = c.client.Post(). + Namespace(c.ns). + Resource("pipelineruns"). + Body(pipelineRun). + Do(). + Into(result) + return +} + +// Update takes the representation of a pipelineRun and updates it. Returns the server's representation of the pipelineRun, and an error, if there is any. +func (c *pipelineRuns) Update(pipelineRun *v1alpha1.PipelineRun) (result *v1alpha1.PipelineRun, err error) { + result = &v1alpha1.PipelineRun{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pipelineruns"). + Name(pipelineRun.Name). + Body(pipelineRun). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *pipelineRuns) UpdateStatus(pipelineRun *v1alpha1.PipelineRun) (result *v1alpha1.PipelineRun, err error) { + result = &v1alpha1.PipelineRun{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pipelineruns"). + Name(pipelineRun.Name). + SubResource("status"). + Body(pipelineRun). + Do(). + Into(result) + return +} + +// Delete takes name of the pipelineRun and deletes it. Returns an error if one occurs. +func (c *pipelineRuns) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pipelineruns"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *pipelineRuns) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("pipelineruns"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched pipelineRun. +func (c *pipelineRuns) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PipelineRun, err error) { + result = &v1alpha1.PipelineRun{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("pipelineruns"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/task.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/task.go new file mode 100644 index 0000000000..4009628782 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/task.go @@ -0,0 +1,174 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// TasksGetter has a method to return a TaskInterface. +// A group's client should implement this interface. +type TasksGetter interface { + Tasks(namespace string) TaskInterface +} + +// TaskInterface has methods to work with Task resources. +type TaskInterface interface { + Create(*v1alpha1.Task) (*v1alpha1.Task, error) + Update(*v1alpha1.Task) (*v1alpha1.Task, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.Task, error) + List(opts v1.ListOptions) (*v1alpha1.TaskList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Task, err error) + TaskExpansion +} + +// tasks implements TaskInterface +type tasks struct { + client rest.Interface + ns string +} + +// newTasks returns a Tasks +func newTasks(c *TektonV1alpha1Client, namespace string) *tasks { + return &tasks{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the task, and returns the corresponding task object, and an error if there is any. +func (c *tasks) Get(name string, options v1.GetOptions) (result *v1alpha1.Task, err error) { + result = &v1alpha1.Task{} + err = c.client.Get(). + Namespace(c.ns). + Resource("tasks"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Tasks that match those selectors. +func (c *tasks) List(opts v1.ListOptions) (result *v1alpha1.TaskList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.TaskList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("tasks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested tasks. +func (c *tasks) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("tasks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a task and creates it. Returns the server's representation of the task, and an error, if there is any. +func (c *tasks) Create(task *v1alpha1.Task) (result *v1alpha1.Task, err error) { + result = &v1alpha1.Task{} + err = c.client.Post(). + Namespace(c.ns). + Resource("tasks"). + Body(task). + Do(). + Into(result) + return +} + +// Update takes the representation of a task and updates it. Returns the server's representation of the task, and an error, if there is any. +func (c *tasks) Update(task *v1alpha1.Task) (result *v1alpha1.Task, err error) { + result = &v1alpha1.Task{} + err = c.client.Put(). + Namespace(c.ns). + Resource("tasks"). + Name(task.Name). + Body(task). + Do(). + Into(result) + return +} + +// Delete takes name of the task and deletes it. Returns an error if one occurs. +func (c *tasks) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("tasks"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *tasks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("tasks"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched task. +func (c *tasks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Task, err error) { + result = &v1alpha1.Task{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("tasks"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/taskrun.go new file mode 100644 index 0000000000..3335adb489 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/taskrun.go @@ -0,0 +1,191 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// TaskRunsGetter has a method to return a TaskRunInterface. +// A group's client should implement this interface. +type TaskRunsGetter interface { + TaskRuns(namespace string) TaskRunInterface +} + +// TaskRunInterface has methods to work with TaskRun resources. +type TaskRunInterface interface { + Create(*v1alpha1.TaskRun) (*v1alpha1.TaskRun, error) + Update(*v1alpha1.TaskRun) (*v1alpha1.TaskRun, error) + UpdateStatus(*v1alpha1.TaskRun) (*v1alpha1.TaskRun, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.TaskRun, error) + List(opts v1.ListOptions) (*v1alpha1.TaskRunList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.TaskRun, err error) + TaskRunExpansion +} + +// taskRuns implements TaskRunInterface +type taskRuns struct { + client rest.Interface + ns string +} + +// newTaskRuns returns a TaskRuns +func newTaskRuns(c *TektonV1alpha1Client, namespace string) *taskRuns { + return &taskRuns{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the taskRun, and returns the corresponding taskRun object, and an error if there is any. +func (c *taskRuns) Get(name string, options v1.GetOptions) (result *v1alpha1.TaskRun, err error) { + result = &v1alpha1.TaskRun{} + err = c.client.Get(). + Namespace(c.ns). + Resource("taskruns"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of TaskRuns that match those selectors. +func (c *taskRuns) List(opts v1.ListOptions) (result *v1alpha1.TaskRunList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.TaskRunList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("taskruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested taskRuns. +func (c *taskRuns) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("taskruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a taskRun and creates it. Returns the server's representation of the taskRun, and an error, if there is any. +func (c *taskRuns) Create(taskRun *v1alpha1.TaskRun) (result *v1alpha1.TaskRun, err error) { + result = &v1alpha1.TaskRun{} + err = c.client.Post(). + Namespace(c.ns). + Resource("taskruns"). + Body(taskRun). + Do(). + Into(result) + return +} + +// Update takes the representation of a taskRun and updates it. Returns the server's representation of the taskRun, and an error, if there is any. +func (c *taskRuns) Update(taskRun *v1alpha1.TaskRun) (result *v1alpha1.TaskRun, err error) { + result = &v1alpha1.TaskRun{} + err = c.client.Put(). + Namespace(c.ns). + Resource("taskruns"). + Name(taskRun.Name). + Body(taskRun). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *taskRuns) UpdateStatus(taskRun *v1alpha1.TaskRun) (result *v1alpha1.TaskRun, err error) { + result = &v1alpha1.TaskRun{} + err = c.client.Put(). + Namespace(c.ns). + Resource("taskruns"). + Name(taskRun.Name). + SubResource("status"). + Body(taskRun). + Do(). + Into(result) + return +} + +// Delete takes name of the taskRun and deletes it. Returns an error if one occurs. +func (c *taskRuns) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("taskruns"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *taskRuns) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("taskruns"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched taskRun. +func (c *taskRuns) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.TaskRun, err error) { + result = &v1alpha1.TaskRun{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("taskruns"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/clustertask.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/clustertask.go new file mode 100644 index 0000000000..8961ca389e --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/clustertask.go @@ -0,0 +1,164 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ClusterTasksGetter has a method to return a ClusterTaskInterface. +// A group's client should implement this interface. +type ClusterTasksGetter interface { + ClusterTasks() ClusterTaskInterface +} + +// ClusterTaskInterface has methods to work with ClusterTask resources. +type ClusterTaskInterface interface { + Create(*v1beta1.ClusterTask) (*v1beta1.ClusterTask, error) + Update(*v1beta1.ClusterTask) (*v1beta1.ClusterTask, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ClusterTask, error) + List(opts v1.ListOptions) (*v1beta1.ClusterTaskList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterTask, err error) + ClusterTaskExpansion +} + +// clusterTasks implements ClusterTaskInterface +type clusterTasks struct { + client rest.Interface +} + +// newClusterTasks returns a ClusterTasks +func newClusterTasks(c *TektonV1beta1Client) *clusterTasks { + return &clusterTasks{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterTask, and returns the corresponding clusterTask object, and an error if there is any. +func (c *clusterTasks) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterTask, err error) { + result = &v1beta1.ClusterTask{} + err = c.client.Get(). + Resource("clustertasks"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterTasks that match those selectors. +func (c *clusterTasks) List(opts v1.ListOptions) (result *v1beta1.ClusterTaskList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.ClusterTaskList{} + err = c.client.Get(). + Resource("clustertasks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterTasks. +func (c *clusterTasks) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clustertasks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a clusterTask and creates it. Returns the server's representation of the clusterTask, and an error, if there is any. +func (c *clusterTasks) Create(clusterTask *v1beta1.ClusterTask) (result *v1beta1.ClusterTask, err error) { + result = &v1beta1.ClusterTask{} + err = c.client.Post(). + Resource("clustertasks"). + Body(clusterTask). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterTask and updates it. Returns the server's representation of the clusterTask, and an error, if there is any. +func (c *clusterTasks) Update(clusterTask *v1beta1.ClusterTask) (result *v1beta1.ClusterTask, err error) { + result = &v1beta1.ClusterTask{} + err = c.client.Put(). + Resource("clustertasks"). + Name(clusterTask.Name). + Body(clusterTask). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterTask and deletes it. Returns an error if one occurs. +func (c *clusterTasks) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clustertasks"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterTasks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clustertasks"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched clusterTask. +func (c *clusterTasks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterTask, err error) { + result = &v1beta1.ClusterTask{} + err = c.client.Patch(pt). + Resource("clustertasks"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/doc.go new file mode 100644 index 0000000000..acfb8c0b67 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/generated_expansion.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..83951f9851 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/generated_expansion.go @@ -0,0 +1,29 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type ClusterTaskExpansion interface{} + +type PipelineExpansion interface{} + +type PipelineRunExpansion interface{} + +type TaskExpansion interface{} + +type TaskRunExpansion interface{} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipeline.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipeline.go new file mode 100644 index 0000000000..faea5452f7 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipeline.go @@ -0,0 +1,174 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PipelinesGetter has a method to return a PipelineInterface. +// A group's client should implement this interface. +type PipelinesGetter interface { + Pipelines(namespace string) PipelineInterface +} + +// PipelineInterface has methods to work with Pipeline resources. +type PipelineInterface interface { + Create(*v1beta1.Pipeline) (*v1beta1.Pipeline, error) + Update(*v1beta1.Pipeline) (*v1beta1.Pipeline, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Pipeline, error) + List(opts v1.ListOptions) (*v1beta1.PipelineList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Pipeline, err error) + PipelineExpansion +} + +// pipelines implements PipelineInterface +type pipelines struct { + client rest.Interface + ns string +} + +// newPipelines returns a Pipelines +func newPipelines(c *TektonV1beta1Client, namespace string) *pipelines { + return &pipelines{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the pipeline, and returns the corresponding pipeline object, and an error if there is any. +func (c *pipelines) Get(name string, options v1.GetOptions) (result *v1beta1.Pipeline, err error) { + result = &v1beta1.Pipeline{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pipelines"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Pipelines that match those selectors. +func (c *pipelines) List(opts v1.ListOptions) (result *v1beta1.PipelineList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.PipelineList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pipelines"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested pipelines. +func (c *pipelines) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("pipelines"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a pipeline and creates it. Returns the server's representation of the pipeline, and an error, if there is any. +func (c *pipelines) Create(pipeline *v1beta1.Pipeline) (result *v1beta1.Pipeline, err error) { + result = &v1beta1.Pipeline{} + err = c.client.Post(). + Namespace(c.ns). + Resource("pipelines"). + Body(pipeline). + Do(). + Into(result) + return +} + +// Update takes the representation of a pipeline and updates it. Returns the server's representation of the pipeline, and an error, if there is any. +func (c *pipelines) Update(pipeline *v1beta1.Pipeline) (result *v1beta1.Pipeline, err error) { + result = &v1beta1.Pipeline{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pipelines"). + Name(pipeline.Name). + Body(pipeline). + Do(). + Into(result) + return +} + +// Delete takes name of the pipeline and deletes it. Returns an error if one occurs. +func (c *pipelines) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pipelines"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *pipelines) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("pipelines"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched pipeline. +func (c *pipelines) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Pipeline, err error) { + result = &v1beta1.Pipeline{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("pipelines"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipeline_client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipeline_client.go new file mode 100644 index 0000000000..9c3335eb47 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipeline_client.go @@ -0,0 +1,109 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type TektonV1beta1Interface interface { + RESTClient() rest.Interface + ClusterTasksGetter + PipelinesGetter + PipelineRunsGetter + TasksGetter + TaskRunsGetter +} + +// TektonV1beta1Client is used to interact with features provided by the tekton.dev group. +type TektonV1beta1Client struct { + restClient rest.Interface +} + +func (c *TektonV1beta1Client) ClusterTasks() ClusterTaskInterface { + return newClusterTasks(c) +} + +func (c *TektonV1beta1Client) Pipelines(namespace string) PipelineInterface { + return newPipelines(c, namespace) +} + +func (c *TektonV1beta1Client) PipelineRuns(namespace string) PipelineRunInterface { + return newPipelineRuns(c, namespace) +} + +func (c *TektonV1beta1Client) Tasks(namespace string) TaskInterface { + return newTasks(c, namespace) +} + +func (c *TektonV1beta1Client) TaskRuns(namespace string) TaskRunInterface { + return newTaskRuns(c, namespace) +} + +// NewForConfig creates a new TektonV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*TektonV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &TektonV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new TektonV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *TektonV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new TektonV1beta1Client for the given RESTClient. +func New(c rest.Interface) *TektonV1beta1Client { + return &TektonV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *TektonV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipelinerun.go new file mode 100644 index 0000000000..015c63c05d --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipelinerun.go @@ -0,0 +1,191 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PipelineRunsGetter has a method to return a PipelineRunInterface. +// A group's client should implement this interface. +type PipelineRunsGetter interface { + PipelineRuns(namespace string) PipelineRunInterface +} + +// PipelineRunInterface has methods to work with PipelineRun resources. +type PipelineRunInterface interface { + Create(*v1beta1.PipelineRun) (*v1beta1.PipelineRun, error) + Update(*v1beta1.PipelineRun) (*v1beta1.PipelineRun, error) + UpdateStatus(*v1beta1.PipelineRun) (*v1beta1.PipelineRun, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.PipelineRun, error) + List(opts v1.ListOptions) (*v1beta1.PipelineRunList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PipelineRun, err error) + PipelineRunExpansion +} + +// pipelineRuns implements PipelineRunInterface +type pipelineRuns struct { + client rest.Interface + ns string +} + +// newPipelineRuns returns a PipelineRuns +func newPipelineRuns(c *TektonV1beta1Client, namespace string) *pipelineRuns { + return &pipelineRuns{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the pipelineRun, and returns the corresponding pipelineRun object, and an error if there is any. +func (c *pipelineRuns) Get(name string, options v1.GetOptions) (result *v1beta1.PipelineRun, err error) { + result = &v1beta1.PipelineRun{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pipelineruns"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PipelineRuns that match those selectors. +func (c *pipelineRuns) List(opts v1.ListOptions) (result *v1beta1.PipelineRunList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.PipelineRunList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pipelineruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested pipelineRuns. +func (c *pipelineRuns) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("pipelineruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a pipelineRun and creates it. Returns the server's representation of the pipelineRun, and an error, if there is any. +func (c *pipelineRuns) Create(pipelineRun *v1beta1.PipelineRun) (result *v1beta1.PipelineRun, err error) { + result = &v1beta1.PipelineRun{} + err = c.client.Post(). + Namespace(c.ns). + Resource("pipelineruns"). + Body(pipelineRun). + Do(). + Into(result) + return +} + +// Update takes the representation of a pipelineRun and updates it. Returns the server's representation of the pipelineRun, and an error, if there is any. +func (c *pipelineRuns) Update(pipelineRun *v1beta1.PipelineRun) (result *v1beta1.PipelineRun, err error) { + result = &v1beta1.PipelineRun{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pipelineruns"). + Name(pipelineRun.Name). + Body(pipelineRun). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *pipelineRuns) UpdateStatus(pipelineRun *v1beta1.PipelineRun) (result *v1beta1.PipelineRun, err error) { + result = &v1beta1.PipelineRun{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pipelineruns"). + Name(pipelineRun.Name). + SubResource("status"). + Body(pipelineRun). + Do(). + Into(result) + return +} + +// Delete takes name of the pipelineRun and deletes it. Returns an error if one occurs. +func (c *pipelineRuns) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pipelineruns"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *pipelineRuns) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("pipelineruns"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched pipelineRun. +func (c *pipelineRuns) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PipelineRun, err error) { + result = &v1beta1.PipelineRun{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("pipelineruns"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/task.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/task.go new file mode 100644 index 0000000000..998e6f85a4 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/task.go @@ -0,0 +1,174 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// TasksGetter has a method to return a TaskInterface. +// A group's client should implement this interface. +type TasksGetter interface { + Tasks(namespace string) TaskInterface +} + +// TaskInterface has methods to work with Task resources. +type TaskInterface interface { + Create(*v1beta1.Task) (*v1beta1.Task, error) + Update(*v1beta1.Task) (*v1beta1.Task, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Task, error) + List(opts v1.ListOptions) (*v1beta1.TaskList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Task, err error) + TaskExpansion +} + +// tasks implements TaskInterface +type tasks struct { + client rest.Interface + ns string +} + +// newTasks returns a Tasks +func newTasks(c *TektonV1beta1Client, namespace string) *tasks { + return &tasks{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the task, and returns the corresponding task object, and an error if there is any. +func (c *tasks) Get(name string, options v1.GetOptions) (result *v1beta1.Task, err error) { + result = &v1beta1.Task{} + err = c.client.Get(). + Namespace(c.ns). + Resource("tasks"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Tasks that match those selectors. +func (c *tasks) List(opts v1.ListOptions) (result *v1beta1.TaskList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.TaskList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("tasks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested tasks. +func (c *tasks) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("tasks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a task and creates it. Returns the server's representation of the task, and an error, if there is any. +func (c *tasks) Create(task *v1beta1.Task) (result *v1beta1.Task, err error) { + result = &v1beta1.Task{} + err = c.client.Post(). + Namespace(c.ns). + Resource("tasks"). + Body(task). + Do(). + Into(result) + return +} + +// Update takes the representation of a task and updates it. Returns the server's representation of the task, and an error, if there is any. +func (c *tasks) Update(task *v1beta1.Task) (result *v1beta1.Task, err error) { + result = &v1beta1.Task{} + err = c.client.Put(). + Namespace(c.ns). + Resource("tasks"). + Name(task.Name). + Body(task). + Do(). + Into(result) + return +} + +// Delete takes name of the task and deletes it. Returns an error if one occurs. +func (c *tasks) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("tasks"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *tasks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("tasks"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched task. +func (c *tasks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Task, err error) { + result = &v1beta1.Task{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("tasks"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/taskrun.go new file mode 100644 index 0000000000..e48b23f3a9 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/taskrun.go @@ -0,0 +1,191 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// TaskRunsGetter has a method to return a TaskRunInterface. +// A group's client should implement this interface. +type TaskRunsGetter interface { + TaskRuns(namespace string) TaskRunInterface +} + +// TaskRunInterface has methods to work with TaskRun resources. +type TaskRunInterface interface { + Create(*v1beta1.TaskRun) (*v1beta1.TaskRun, error) + Update(*v1beta1.TaskRun) (*v1beta1.TaskRun, error) + UpdateStatus(*v1beta1.TaskRun) (*v1beta1.TaskRun, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.TaskRun, error) + List(opts v1.ListOptions) (*v1beta1.TaskRunList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.TaskRun, err error) + TaskRunExpansion +} + +// taskRuns implements TaskRunInterface +type taskRuns struct { + client rest.Interface + ns string +} + +// newTaskRuns returns a TaskRuns +func newTaskRuns(c *TektonV1beta1Client, namespace string) *taskRuns { + return &taskRuns{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the taskRun, and returns the corresponding taskRun object, and an error if there is any. +func (c *taskRuns) Get(name string, options v1.GetOptions) (result *v1beta1.TaskRun, err error) { + result = &v1beta1.TaskRun{} + err = c.client.Get(). + Namespace(c.ns). + Resource("taskruns"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of TaskRuns that match those selectors. +func (c *taskRuns) List(opts v1.ListOptions) (result *v1beta1.TaskRunList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.TaskRunList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("taskruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested taskRuns. +func (c *taskRuns) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("taskruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a taskRun and creates it. Returns the server's representation of the taskRun, and an error, if there is any. +func (c *taskRuns) Create(taskRun *v1beta1.TaskRun) (result *v1beta1.TaskRun, err error) { + result = &v1beta1.TaskRun{} + err = c.client.Post(). + Namespace(c.ns). + Resource("taskruns"). + Body(taskRun). + Do(). + Into(result) + return +} + +// Update takes the representation of a taskRun and updates it. Returns the server's representation of the taskRun, and an error, if there is any. +func (c *taskRuns) Update(taskRun *v1beta1.TaskRun) (result *v1beta1.TaskRun, err error) { + result = &v1beta1.TaskRun{} + err = c.client.Put(). + Namespace(c.ns). + Resource("taskruns"). + Name(taskRun.Name). + Body(taskRun). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *taskRuns) UpdateStatus(taskRun *v1beta1.TaskRun) (result *v1beta1.TaskRun, err error) { + result = &v1beta1.TaskRun{} + err = c.client.Put(). + Namespace(c.ns). + Resource("taskruns"). + Name(taskRun.Name). + SubResource("status"). + Body(taskRun). + Do(). + Into(result) + return +} + +// Delete takes name of the taskRun and deletes it. Returns an error if one occurs. +func (c *taskRuns) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("taskruns"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *taskRuns) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("taskruns"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched taskRun. +func (c *taskRuns) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.TaskRun, err error) { + result = &v1beta1.TaskRun{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("taskruns"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/gonum.org/v1/gonum/AUTHORS b/vendor/gonum.org/v1/gonum/AUTHORS new file mode 100644 index 0000000000..417db9b890 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/AUTHORS @@ -0,0 +1,89 @@ +# This is the official list of gonum authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Alexander Egurnov +Bill Gray +Bill Noon +Brendan Tracey +Brent Pedersen +Chad Kunde +Chih-Wei Chang +Chris Tessum +Christophe Meessen +Clayton Northey +Dan Kortschak +Daniel Fireman +David Samborski +Davor Kapsa +DeepMind Technologies +Dezmond Goff +Egon Elbre +Ekaterina Efimova +Ethan Burns +Evert Lammerts +Facundo Gaich +Fazlul Shahriar +Francesc Campoy +Google Inc +Gustaf Johansson +Iakov Davydov +Igor Mikushkin +Iskander Sharipov +Jalem Raj Rohit +James Bell +James Bowman +James Holmes <32bitkid@gmail.com> +Janne Snabb +Jeff Juozapaitis +Jeremy Atkinson +Jonas Kahler +Jonas Schulze +Jonathan J Lawlor +Jonathan Schroeder +Joseph Watson +Josh Wilson +Julien Roland +Kai Trukenmüller +Kent English +Kevin C. Zimmerman +Kirill Motkov +Konstantin Shaposhnikov +Leonid Kneller +Lyron Winderbaum +Martin Diz +Matthieu Di Mercurio +Max Halford +MinJae Kwon +Nick Potts +Olivier Wulveryck +Or Rikon +Pontus Melke +Renée French +Rishi Desai +Robin Eklind +Sam Zaydel +Samuel Kelemen +Saran Ahluwalia +Scott Holden +Sebastien Binet +Shawn Smith +source{d} +Spencer Lyon +Steve McCoy +Taesu Pyo +Takeshi Yoneda +The University of Adelaide +The University of Minnesota +The University of Washington +Thomas Berg +Tobin Harding +Vincent Thiery +Vladimír Chalupecký +Yevgeniy Vahlis diff --git a/vendor/gonum.org/v1/gonum/CONTRIBUTORS b/vendor/gonum.org/v1/gonum/CONTRIBUTORS new file mode 100644 index 0000000000..0c7cc46a74 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/CONTRIBUTORS @@ -0,0 +1,91 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the gonum +# repository. +# +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees would be listed here +# but not in AUTHORS, because Google would hold the copyright. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file. +# +# Names should be added to this file like so: +# Name +# +# Please keep the list sorted. + +Alexander Egurnov +Andrew Brampton +Bill Gray +Bill Noon +Brendan Tracey +Brent Pedersen +Chad Kunde +Chih-Wei Chang +Chris Tessum +Christophe Meessen +Clayton Northey +Dan Kortschak +Daniel Fireman +David Samborski +Davor Kapsa +Dezmond Goff +Egon Elbre +Ekaterina Efimova +Ethan Burns +Evert Lammerts +Facundo Gaich +Fazlul Shahriar +Francesc Campoy +Gustaf Johansson +Iakov Davydov +Igor Mikushkin +Iskander Sharipov +Jalem Raj Rohit +James Bell +James Bowman +James Holmes <32bitkid@gmail.com> +Janne Snabb +Jeff Juozapaitis +Jeremy Atkinson +Jonas Kahler +Jonas Schulze +Jonathan J Lawlor +Jonathan Schroeder +Joseph Watson +Josh Wilson +Julien Roland +Kai Trukenmüller +Kent English +Kevin C. Zimmerman +Kirill Motkov +Konstantin Shaposhnikov +Leonid Kneller +Lyron Winderbaum +Martin Diz +Matthieu Di Mercurio +Max Halford +MinJae Kwon +Nick Potts +Olivier Wulveryck +Or Rikon +Pontus Melke +Renée French +Rishi Desai +Robin Eklind +Sam Zaydel +Samuel Kelemen +Saran Ahluwalia +Scott Holden +Sebastien Binet +Shawn Smith +Spencer Lyon +Steve McCoy +Taesu Pyo +Takeshi Yoneda +Thomas Berg +Tobin Harding +Vincent Thiery +Vladimír Chalupecký +Yevgeniy Vahlis diff --git a/vendor/gonum.org/v1/gonum/LICENSE b/vendor/gonum.org/v1/gonum/LICENSE new file mode 100644 index 0000000000..5f1c3f9ccf --- /dev/null +++ b/vendor/gonum.org/v1/gonum/LICENSE @@ -0,0 +1,23 @@ +Copyright ©2013 The Gonum Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the gonum project nor the names of its authors and + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/gonum.org/v1/gonum/blas/README.md b/vendor/gonum.org/v1/gonum/blas/README.md new file mode 100644 index 0000000000..e9d33eeeb3 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/README.md @@ -0,0 +1,47 @@ +# Gonum BLAS [![GoDoc](https://godoc.org/gonum.org/v1/gonum/blas?status.svg)](https://godoc.org/gonum.org/v1/gonum/blas) + +A collection of packages to provide BLAS functionality for the [Go programming +language](http://golang.org) + +## Installation +```sh + go get gonum.org/v1/gonum/blas/... +``` + +## Packages + +### blas + +Defines [BLAS API](http://www.netlib.org/blas/blast-forum/cinterface.pdf) split in several +interfaces. + +### blas/gonum + +Go implementation of the BLAS API (incomplete, implements the `float32` and `float64` API). + +### blas/blas64 and blas/blas32 + +Wrappers for an implementation of the double (i.e., `float64`) and single (`float32`) +precision real parts of the BLAS API. + +```Go +package main + +import ( + "fmt" + + "gonum.org/v1/gonum/blas/blas64" +) + +func main() { + v := blas64.Vector{Inc: 1, Data: []float64{1, 1, 1}} + fmt.Println("v has length:", blas64.Nrm2(len(v.Data), v)) +} +``` + +### blas/cblas128 and blas/cblas64 + +Wrappers for an implementation of the double (i.e., `complex128`) and single (`complex64`) +precision complex parts of the blas API. + +Currently blas/cblas64 and blas/cblas128 require gonum.org/v1/netlib/blas. diff --git a/vendor/gonum.org/v1/gonum/blas/blas.go b/vendor/gonum.org/v1/gonum/blas/blas.go new file mode 100644 index 0000000000..9b933e3fc5 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/blas.go @@ -0,0 +1,283 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate ./conversions.bash + +package blas + +// Flag constants indicate Givens transformation H matrix state. +type Flag int + +const ( + Identity Flag = -2 // H is the identity matrix; no rotation is needed. + Rescaling Flag = -1 // H specifies rescaling. + OffDiagonal Flag = 0 // Off-diagonal elements of H are non-unit. + Diagonal Flag = 1 // Diagonal elements of H are non-unit. +) + +// SrotmParams contains Givens transformation parameters returned +// by the Float32 Srotm method. +type SrotmParams struct { + Flag + H [4]float32 // Column-major 2 by 2 matrix. +} + +// DrotmParams contains Givens transformation parameters returned +// by the Float64 Drotm method. +type DrotmParams struct { + Flag + H [4]float64 // Column-major 2 by 2 matrix. +} + +// Transpose specifies the transposition operation of a matrix. +type Transpose byte + +const ( + NoTrans Transpose = 'N' + Trans Transpose = 'T' + ConjTrans Transpose = 'C' +) + +// Uplo specifies whether a matrix is upper or lower triangular. +type Uplo byte + +const ( + Upper Uplo = 'U' + Lower Uplo = 'L' + All Uplo = 'A' +) + +// Diag specifies whether a matrix is unit triangular. +type Diag byte + +const ( + NonUnit Diag = 'N' + Unit Diag = 'U' +) + +// Side specifies from which side a multiplication operation is performed. +type Side byte + +const ( + Left Side = 'L' + Right Side = 'R' +) + +// Float32 implements the single precision real BLAS routines. +type Float32 interface { + Float32Level1 + Float32Level2 + Float32Level3 +} + +// Float32Level1 implements the single precision real BLAS Level 1 routines. +type Float32Level1 interface { + Sdsdot(n int, alpha float32, x []float32, incX int, y []float32, incY int) float32 + Dsdot(n int, x []float32, incX int, y []float32, incY int) float64 + Sdot(n int, x []float32, incX int, y []float32, incY int) float32 + Snrm2(n int, x []float32, incX int) float32 + Sasum(n int, x []float32, incX int) float32 + Isamax(n int, x []float32, incX int) int + Sswap(n int, x []float32, incX int, y []float32, incY int) + Scopy(n int, x []float32, incX int, y []float32, incY int) + Saxpy(n int, alpha float32, x []float32, incX int, y []float32, incY int) + Srotg(a, b float32) (c, s, r, z float32) + Srotmg(d1, d2, b1, b2 float32) (p SrotmParams, rd1, rd2, rb1 float32) + Srot(n int, x []float32, incX int, y []float32, incY int, c, s float32) + Srotm(n int, x []float32, incX int, y []float32, incY int, p SrotmParams) + Sscal(n int, alpha float32, x []float32, incX int) +} + +// Float32Level2 implements the single precision real BLAS Level 2 routines. +type Float32Level2 interface { + Sgemv(tA Transpose, m, n int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) + Sgbmv(tA Transpose, m, n, kL, kU int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) + Strmv(ul Uplo, tA Transpose, d Diag, n int, a []float32, lda int, x []float32, incX int) + Stbmv(ul Uplo, tA Transpose, d Diag, n, k int, a []float32, lda int, x []float32, incX int) + Stpmv(ul Uplo, tA Transpose, d Diag, n int, ap []float32, x []float32, incX int) + Strsv(ul Uplo, tA Transpose, d Diag, n int, a []float32, lda int, x []float32, incX int) + Stbsv(ul Uplo, tA Transpose, d Diag, n, k int, a []float32, lda int, x []float32, incX int) + Stpsv(ul Uplo, tA Transpose, d Diag, n int, ap []float32, x []float32, incX int) + Ssymv(ul Uplo, n int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) + Ssbmv(ul Uplo, n, k int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) + Sspmv(ul Uplo, n int, alpha float32, ap []float32, x []float32, incX int, beta float32, y []float32, incY int) + Sger(m, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32, lda int) + Ssyr(ul Uplo, n int, alpha float32, x []float32, incX int, a []float32, lda int) + Sspr(ul Uplo, n int, alpha float32, x []float32, incX int, ap []float32) + Ssyr2(ul Uplo, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32, lda int) + Sspr2(ul Uplo, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32) +} + +// Float32Level3 implements the single precision real BLAS Level 3 routines. +type Float32Level3 interface { + Sgemm(tA, tB Transpose, m, n, k int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) + Ssymm(s Side, ul Uplo, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) + Ssyrk(ul Uplo, t Transpose, n, k int, alpha float32, a []float32, lda int, beta float32, c []float32, ldc int) + Ssyr2k(ul Uplo, t Transpose, n, k int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) + Strmm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int) + Strsm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int) +} + +// Float64 implements the single precision real BLAS routines. +type Float64 interface { + Float64Level1 + Float64Level2 + Float64Level3 +} + +// Float64Level1 implements the double precision real BLAS Level 1 routines. +type Float64Level1 interface { + Ddot(n int, x []float64, incX int, y []float64, incY int) float64 + Dnrm2(n int, x []float64, incX int) float64 + Dasum(n int, x []float64, incX int) float64 + Idamax(n int, x []float64, incX int) int + Dswap(n int, x []float64, incX int, y []float64, incY int) + Dcopy(n int, x []float64, incX int, y []float64, incY int) + Daxpy(n int, alpha float64, x []float64, incX int, y []float64, incY int) + Drotg(a, b float64) (c, s, r, z float64) + Drotmg(d1, d2, b1, b2 float64) (p DrotmParams, rd1, rd2, rb1 float64) + Drot(n int, x []float64, incX int, y []float64, incY int, c float64, s float64) + Drotm(n int, x []float64, incX int, y []float64, incY int, p DrotmParams) + Dscal(n int, alpha float64, x []float64, incX int) +} + +// Float64Level2 implements the double precision real BLAS Level 2 routines. +type Float64Level2 interface { + Dgemv(tA Transpose, m, n int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) + Dgbmv(tA Transpose, m, n, kL, kU int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) + Dtrmv(ul Uplo, tA Transpose, d Diag, n int, a []float64, lda int, x []float64, incX int) + Dtbmv(ul Uplo, tA Transpose, d Diag, n, k int, a []float64, lda int, x []float64, incX int) + Dtpmv(ul Uplo, tA Transpose, d Diag, n int, ap []float64, x []float64, incX int) + Dtrsv(ul Uplo, tA Transpose, d Diag, n int, a []float64, lda int, x []float64, incX int) + Dtbsv(ul Uplo, tA Transpose, d Diag, n, k int, a []float64, lda int, x []float64, incX int) + Dtpsv(ul Uplo, tA Transpose, d Diag, n int, ap []float64, x []float64, incX int) + Dsymv(ul Uplo, n int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) + Dsbmv(ul Uplo, n, k int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) + Dspmv(ul Uplo, n int, alpha float64, ap []float64, x []float64, incX int, beta float64, y []float64, incY int) + Dger(m, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int) + Dsyr(ul Uplo, n int, alpha float64, x []float64, incX int, a []float64, lda int) + Dspr(ul Uplo, n int, alpha float64, x []float64, incX int, ap []float64) + Dsyr2(ul Uplo, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int) + Dspr2(ul Uplo, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64) +} + +// Float64Level3 implements the double precision real BLAS Level 3 routines. +type Float64Level3 interface { + Dgemm(tA, tB Transpose, m, n, k int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) + Dsymm(s Side, ul Uplo, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) + Dsyrk(ul Uplo, t Transpose, n, k int, alpha float64, a []float64, lda int, beta float64, c []float64, ldc int) + Dsyr2k(ul Uplo, t Transpose, n, k int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) + Dtrmm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int) + Dtrsm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int) +} + +// Complex64 implements the single precision complex BLAS routines. +type Complex64 interface { + Complex64Level1 + Complex64Level2 + Complex64Level3 +} + +// Complex64Level1 implements the single precision complex BLAS Level 1 routines. +type Complex64Level1 interface { + Cdotu(n int, x []complex64, incX int, y []complex64, incY int) (dotu complex64) + Cdotc(n int, x []complex64, incX int, y []complex64, incY int) (dotc complex64) + Scnrm2(n int, x []complex64, incX int) float32 + Scasum(n int, x []complex64, incX int) float32 + Icamax(n int, x []complex64, incX int) int + Cswap(n int, x []complex64, incX int, y []complex64, incY int) + Ccopy(n int, x []complex64, incX int, y []complex64, incY int) + Caxpy(n int, alpha complex64, x []complex64, incX int, y []complex64, incY int) + Cscal(n int, alpha complex64, x []complex64, incX int) + Csscal(n int, alpha float32, x []complex64, incX int) +} + +// Complex64Level2 implements the single precision complex BLAS routines Level 2 routines. +type Complex64Level2 interface { + Cgemv(tA Transpose, m, n int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) + Cgbmv(tA Transpose, m, n, kL, kU int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) + Ctrmv(ul Uplo, tA Transpose, d Diag, n int, a []complex64, lda int, x []complex64, incX int) + Ctbmv(ul Uplo, tA Transpose, d Diag, n, k int, a []complex64, lda int, x []complex64, incX int) + Ctpmv(ul Uplo, tA Transpose, d Diag, n int, ap []complex64, x []complex64, incX int) + Ctrsv(ul Uplo, tA Transpose, d Diag, n int, a []complex64, lda int, x []complex64, incX int) + Ctbsv(ul Uplo, tA Transpose, d Diag, n, k int, a []complex64, lda int, x []complex64, incX int) + Ctpsv(ul Uplo, tA Transpose, d Diag, n int, ap []complex64, x []complex64, incX int) + Chemv(ul Uplo, n int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) + Chbmv(ul Uplo, n, k int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) + Chpmv(ul Uplo, n int, alpha complex64, ap []complex64, x []complex64, incX int, beta complex64, y []complex64, incY int) + Cgeru(m, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) + Cgerc(m, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) + Cher(ul Uplo, n int, alpha float32, x []complex64, incX int, a []complex64, lda int) + Chpr(ul Uplo, n int, alpha float32, x []complex64, incX int, a []complex64) + Cher2(ul Uplo, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) + Chpr2(ul Uplo, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, ap []complex64) +} + +// Complex64Level3 implements the single precision complex BLAS Level 3 routines. +type Complex64Level3 interface { + Cgemm(tA, tB Transpose, m, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) + Csymm(s Side, ul Uplo, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) + Csyrk(ul Uplo, t Transpose, n, k int, alpha complex64, a []complex64, lda int, beta complex64, c []complex64, ldc int) + Csyr2k(ul Uplo, t Transpose, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) + Ctrmm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int) + Ctrsm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int) + Chemm(s Side, ul Uplo, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) + Cherk(ul Uplo, t Transpose, n, k int, alpha float32, a []complex64, lda int, beta float32, c []complex64, ldc int) + Cher2k(ul Uplo, t Transpose, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta float32, c []complex64, ldc int) +} + +// Complex128 implements the double precision complex BLAS routines. +type Complex128 interface { + Complex128Level1 + Complex128Level2 + Complex128Level3 +} + +// Complex128Level1 implements the double precision complex BLAS Level 1 routines. +type Complex128Level1 interface { + Zdotu(n int, x []complex128, incX int, y []complex128, incY int) (dotu complex128) + Zdotc(n int, x []complex128, incX int, y []complex128, incY int) (dotc complex128) + Dznrm2(n int, x []complex128, incX int) float64 + Dzasum(n int, x []complex128, incX int) float64 + Izamax(n int, x []complex128, incX int) int + Zswap(n int, x []complex128, incX int, y []complex128, incY int) + Zcopy(n int, x []complex128, incX int, y []complex128, incY int) + Zaxpy(n int, alpha complex128, x []complex128, incX int, y []complex128, incY int) + Zscal(n int, alpha complex128, x []complex128, incX int) + Zdscal(n int, alpha float64, x []complex128, incX int) +} + +// Complex128Level2 implements the double precision complex BLAS Level 2 routines. +type Complex128Level2 interface { + Zgemv(tA Transpose, m, n int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) + Zgbmv(tA Transpose, m, n int, kL int, kU int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) + Ztrmv(ul Uplo, tA Transpose, d Diag, n int, a []complex128, lda int, x []complex128, incX int) + Ztbmv(ul Uplo, tA Transpose, d Diag, n, k int, a []complex128, lda int, x []complex128, incX int) + Ztpmv(ul Uplo, tA Transpose, d Diag, n int, ap []complex128, x []complex128, incX int) + Ztrsv(ul Uplo, tA Transpose, d Diag, n int, a []complex128, lda int, x []complex128, incX int) + Ztbsv(ul Uplo, tA Transpose, d Diag, n, k int, a []complex128, lda int, x []complex128, incX int) + Ztpsv(ul Uplo, tA Transpose, d Diag, n int, ap []complex128, x []complex128, incX int) + Zhemv(ul Uplo, n int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) + Zhbmv(ul Uplo, n, k int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) + Zhpmv(ul Uplo, n int, alpha complex128, ap []complex128, x []complex128, incX int, beta complex128, y []complex128, incY int) + Zgeru(m, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) + Zgerc(m, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) + Zher(ul Uplo, n int, alpha float64, x []complex128, incX int, a []complex128, lda int) + Zhpr(ul Uplo, n int, alpha float64, x []complex128, incX int, a []complex128) + Zher2(ul Uplo, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) + Zhpr2(ul Uplo, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, ap []complex128) +} + +// Complex128Level3 implements the double precision complex BLAS Level 3 routines. +type Complex128Level3 interface { + Zgemm(tA, tB Transpose, m, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) + Zsymm(s Side, ul Uplo, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) + Zsyrk(ul Uplo, t Transpose, n, k int, alpha complex128, a []complex128, lda int, beta complex128, c []complex128, ldc int) + Zsyr2k(ul Uplo, t Transpose, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) + Ztrmm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int) + Ztrsm(s Side, ul Uplo, tA Transpose, d Diag, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int) + Zhemm(s Side, ul Uplo, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) + Zherk(ul Uplo, t Transpose, n, k int, alpha float64, a []complex128, lda int, beta float64, c []complex128, ldc int) + Zher2k(ul Uplo, t Transpose, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta float64, c []complex128, ldc int) +} diff --git a/vendor/gonum.org/v1/gonum/blas/blas64/blas64.go b/vendor/gonum.org/v1/gonum/blas/blas64/blas64.go new file mode 100644 index 0000000000..551983836c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/blas64/blas64.go @@ -0,0 +1,469 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blas64 + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/gonum" +) + +var blas64 blas.Float64 = gonum.Implementation{} + +// Use sets the BLAS float64 implementation to be used by subsequent BLAS calls. +// The default implementation is +// gonum.org/v1/gonum/blas/gonum.Implementation. +func Use(b blas.Float64) { + blas64 = b +} + +// Implementation returns the current BLAS float64 implementation. +// +// Implementation allows direct calls to the current the BLAS float64 implementation +// giving finer control of parameters. +func Implementation() blas.Float64 { + return blas64 +} + +// Vector represents a vector with an associated element increment. +type Vector struct { + N int + Data []float64 + Inc int +} + +// General represents a matrix using the conventional storage scheme. +type General struct { + Rows, Cols int + Data []float64 + Stride int +} + +// Band represents a band matrix using the band storage scheme. +type Band struct { + Rows, Cols int + KL, KU int + Data []float64 + Stride int +} + +// Triangular represents a triangular matrix using the conventional storage scheme. +type Triangular struct { + Uplo blas.Uplo + Diag blas.Diag + N int + Data []float64 + Stride int +} + +// TriangularBand represents a triangular matrix using the band storage scheme. +type TriangularBand struct { + Uplo blas.Uplo + Diag blas.Diag + N, K int + Data []float64 + Stride int +} + +// TriangularPacked represents a triangular matrix using the packed storage scheme. +type TriangularPacked struct { + Uplo blas.Uplo + Diag blas.Diag + N int + Data []float64 +} + +// Symmetric represents a symmetric matrix using the conventional storage scheme. +type Symmetric struct { + Uplo blas.Uplo + N int + Data []float64 + Stride int +} + +// SymmetricBand represents a symmetric matrix using the band storage scheme. +type SymmetricBand struct { + Uplo blas.Uplo + N, K int + Data []float64 + Stride int +} + +// SymmetricPacked represents a symmetric matrix using the packed storage scheme. +type SymmetricPacked struct { + Uplo blas.Uplo + N int + Data []float64 +} + +// Level 1 + +const ( + negInc = "blas64: negative vector increment" + badLength = "blas64: vector length mismatch" +) + +// Dot computes the dot product of the two vectors: +// \sum_i x[i]*y[i]. +func Dot(x, y Vector) float64 { + if x.N != y.N { + panic(badLength) + } + return blas64.Ddot(x.N, x.Data, x.Inc, y.Data, y.Inc) +} + +// Nrm2 computes the Euclidean norm of the vector x: +// sqrt(\sum_i x[i]*x[i]). +// +// Nrm2 will panic if the vector increment is negative. +func Nrm2(x Vector) float64 { + if x.Inc < 0 { + panic(negInc) + } + return blas64.Dnrm2(x.N, x.Data, x.Inc) +} + +// Asum computes the sum of the absolute values of the elements of x: +// \sum_i |x[i]|. +// +// Asum will panic if the vector increment is negative. +func Asum(x Vector) float64 { + if x.Inc < 0 { + panic(negInc) + } + return blas64.Dasum(x.N, x.Data, x.Inc) +} + +// Iamax returns the index of an element of x with the largest absolute value. +// If there are multiple such indices the earliest is returned. +// Iamax returns -1 if n == 0. +// +// Iamax will panic if the vector increment is negative. +func Iamax(x Vector) int { + if x.Inc < 0 { + panic(negInc) + } + return blas64.Idamax(x.N, x.Data, x.Inc) +} + +// Swap exchanges the elements of the two vectors: +// x[i], y[i] = y[i], x[i] for all i. +func Swap(x, y Vector) { + if x.N != y.N { + panic(badLength) + } + blas64.Dswap(x.N, x.Data, x.Inc, y.Data, y.Inc) +} + +// Copy copies the elements of x into the elements of y: +// y[i] = x[i] for all i. +// Copy requires that the lengths of x and y match and will panic otherwise. +func Copy(x, y Vector) { + if x.N != y.N { + panic(badLength) + } + blas64.Dcopy(x.N, x.Data, x.Inc, y.Data, y.Inc) +} + +// Axpy adds x scaled by alpha to y: +// y[i] += alpha*x[i] for all i. +func Axpy(alpha float64, x, y Vector) { + if x.N != y.N { + panic(badLength) + } + blas64.Daxpy(x.N, alpha, x.Data, x.Inc, y.Data, y.Inc) +} + +// Rotg computes the parameters of a Givens plane rotation so that +// ⎡ c s⎤ ⎡a⎤ ⎡r⎤ +// ⎣-s c⎦ * ⎣b⎦ = ⎣0⎦ +// where a and b are the Cartesian coordinates of a given point. +// c, s, and r are defined as +// r = ±Sqrt(a^2 + b^2), +// c = a/r, the cosine of the rotation angle, +// s = a/r, the sine of the rotation angle, +// and z is defined such that +// if |a| > |b|, z = s, +// otherwise if c != 0, z = 1/c, +// otherwise z = 1. +func Rotg(a, b float64) (c, s, r, z float64) { + return blas64.Drotg(a, b) +} + +// Rotmg computes the modified Givens rotation. See +// http://www.netlib.org/lapack/explore-html/df/deb/drotmg_8f.html +// for more details. +func Rotmg(d1, d2, b1, b2 float64) (p blas.DrotmParams, rd1, rd2, rb1 float64) { + return blas64.Drotmg(d1, d2, b1, b2) +} + +// Rot applies a plane transformation to n points represented by the vectors x +// and y: +// x[i] = c*x[i] + s*y[i], +// y[i] = -s*x[i] + c*y[i], for all i. +func Rot(x, y Vector, c, s float64) { + if x.N != y.N { + panic(badLength) + } + blas64.Drot(x.N, x.Data, x.Inc, y.Data, y.Inc, c, s) +} + +// Rotm applies the modified Givens rotation to n points represented by the +// vectors x and y. +func Rotm(x, y Vector, p blas.DrotmParams) { + if x.N != y.N { + panic(badLength) + } + blas64.Drotm(x.N, x.Data, x.Inc, y.Data, y.Inc, p) +} + +// Scal scales the vector x by alpha: +// x[i] *= alpha for all i. +// +// Scal will panic if the vector increment is negative. +func Scal(alpha float64, x Vector) { + if x.Inc < 0 { + panic(negInc) + } + blas64.Dscal(x.N, alpha, x.Data, x.Inc) +} + +// Level 2 + +// Gemv computes +// y = alpha * A * x + beta * y, if t == blas.NoTrans, +// y = alpha * A^T * x + beta * y, if t == blas.Trans or blas.ConjTrans, +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func Gemv(t blas.Transpose, alpha float64, a General, x Vector, beta float64, y Vector) { + blas64.Dgemv(t, a.Rows, a.Cols, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Gbmv computes +// y = alpha * A * x + beta * y, if t == blas.NoTrans, +// y = alpha * A^T * x + beta * y, if t == blas.Trans or blas.ConjTrans, +// where A is an m×n band matrix, x and y are vectors, and alpha and beta are scalars. +func Gbmv(t blas.Transpose, alpha float64, a Band, x Vector, beta float64, y Vector) { + blas64.Dgbmv(t, a.Rows, a.Cols, a.KL, a.KU, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Trmv computes +// x = A * x, if t == blas.NoTrans, +// x = A^T * x, if t == blas.Trans or blas.ConjTrans, +// where A is an n×n triangular matrix, and x is a vector. +func Trmv(t blas.Transpose, a Triangular, x Vector) { + blas64.Dtrmv(a.Uplo, t, a.Diag, a.N, a.Data, a.Stride, x.Data, x.Inc) +} + +// Tbmv computes +// x = A * x, if t == blas.NoTrans, +// x = A^T * x, if t == blas.Trans or blas.ConjTrans, +// where A is an n×n triangular band matrix, and x is a vector. +func Tbmv(t blas.Transpose, a TriangularBand, x Vector) { + blas64.Dtbmv(a.Uplo, t, a.Diag, a.N, a.K, a.Data, a.Stride, x.Data, x.Inc) +} + +// Tpmv computes +// x = A * x, if t == blas.NoTrans, +// x = A^T * x, if t == blas.Trans or blas.ConjTrans, +// where A is an n×n triangular matrix in packed format, and x is a vector. +func Tpmv(t blas.Transpose, a TriangularPacked, x Vector) { + blas64.Dtpmv(a.Uplo, t, a.Diag, a.N, a.Data, x.Data, x.Inc) +} + +// Trsv solves +// A * x = b, if t == blas.NoTrans, +// A^T * x = b, if t == blas.Trans or blas.ConjTrans, +// where A is an n×n triangular matrix, and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func Trsv(t blas.Transpose, a Triangular, x Vector) { + blas64.Dtrsv(a.Uplo, t, a.Diag, a.N, a.Data, a.Stride, x.Data, x.Inc) +} + +// Tbsv solves +// A * x = b, if t == blas.NoTrans, +// A^T * x = b, if t == blas.Trans or blas.ConjTrans, +// where A is an n×n triangular band matrix, and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func Tbsv(t blas.Transpose, a TriangularBand, x Vector) { + blas64.Dtbsv(a.Uplo, t, a.Diag, a.N, a.K, a.Data, a.Stride, x.Data, x.Inc) +} + +// Tpsv solves +// A * x = b, if t == blas.NoTrans, +// A^T * x = b, if t == blas.Trans or blas.ConjTrans, +// where A is an n×n triangular matrix in packed format, and x and b are +// vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func Tpsv(t blas.Transpose, a TriangularPacked, x Vector) { + blas64.Dtpsv(a.Uplo, t, a.Diag, a.N, a.Data, x.Data, x.Inc) +} + +// Symv computes +// y = alpha * A * x + beta * y, +// where A is an n×n symmetric matrix, x and y are vectors, and alpha and +// beta are scalars. +func Symv(alpha float64, a Symmetric, x Vector, beta float64, y Vector) { + blas64.Dsymv(a.Uplo, a.N, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Sbmv performs +// y = alpha * A * x + beta * y, +// where A is an n×n symmetric band matrix, x and y are vectors, and alpha +// and beta are scalars. +func Sbmv(alpha float64, a SymmetricBand, x Vector, beta float64, y Vector) { + blas64.Dsbmv(a.Uplo, a.N, a.K, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Spmv performs +// y = alpha * A * x + beta * y, +// where A is an n×n symmetric matrix in packed format, x and y are vectors, +// and alpha and beta are scalars. +func Spmv(alpha float64, a SymmetricPacked, x Vector, beta float64, y Vector) { + blas64.Dspmv(a.Uplo, a.N, alpha, a.Data, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Ger performs a rank-1 update +// A += alpha * x * y^T, +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Ger(alpha float64, x, y Vector, a General) { + blas64.Dger(a.Rows, a.Cols, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) +} + +// Syr performs a rank-1 update +// A += alpha * x * x^T, +// where A is an n×n symmetric matrix, x is a vector, and alpha is a scalar. +func Syr(alpha float64, x Vector, a Symmetric) { + blas64.Dsyr(a.Uplo, a.N, alpha, x.Data, x.Inc, a.Data, a.Stride) +} + +// Spr performs the rank-1 update +// A += alpha * x * x^T, +// where A is an n×n symmetric matrix in packed format, x is a vector, and +// alpha is a scalar. +func Spr(alpha float64, x Vector, a SymmetricPacked) { + blas64.Dspr(a.Uplo, a.N, alpha, x.Data, x.Inc, a.Data) +} + +// Syr2 performs a rank-2 update +// A += alpha * x * y^T + alpha * y * x^T, +// where A is a symmetric n×n matrix, x and y are vectors, and alpha is a scalar. +func Syr2(alpha float64, x, y Vector, a Symmetric) { + blas64.Dsyr2(a.Uplo, a.N, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) +} + +// Spr2 performs a rank-2 update +// A += alpha * x * y^T + alpha * y * x^T, +// where A is an n×n symmetric matrix in packed format, x and y are vectors, +// and alpha is a scalar. +func Spr2(alpha float64, x, y Vector, a SymmetricPacked) { + blas64.Dspr2(a.Uplo, a.N, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data) +} + +// Level 3 + +// Gemm computes +// C = alpha * A * B + beta * C, +// where A, B, and C are dense matrices, and alpha and beta are scalars. +// tA and tB specify whether A or B are transposed. +func Gemm(tA, tB blas.Transpose, alpha float64, a, b General, beta float64, c General) { + var m, n, k int + if tA == blas.NoTrans { + m, k = a.Rows, a.Cols + } else { + m, k = a.Cols, a.Rows + } + if tB == blas.NoTrans { + n = b.Cols + } else { + n = b.Rows + } + blas64.Dgemm(tA, tB, m, n, k, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) +} + +// Symm performs +// C = alpha * A * B + beta * C, if s == blas.Left, +// C = alpha * B * A + beta * C, if s == blas.Right, +// where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and +// alpha is a scalar. +func Symm(s blas.Side, alpha float64, a Symmetric, b General, beta float64, c General) { + var m, n int + if s == blas.Left { + m, n = a.N, b.Cols + } else { + m, n = b.Rows, a.N + } + blas64.Dsymm(s, a.Uplo, m, n, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) +} + +// Syrk performs a symmetric rank-k update +// C = alpha * A * A^T + beta * C, if t == blas.NoTrans, +// C = alpha * A^T * A + beta * C, if t == blas.Trans or blas.ConjTrans, +// where C is an n×n symmetric matrix, A is an n×k matrix if t == blas.NoTrans and +// a k×n matrix otherwise, and alpha and beta are scalars. +func Syrk(t blas.Transpose, alpha float64, a General, beta float64, c Symmetric) { + var n, k int + if t == blas.NoTrans { + n, k = a.Rows, a.Cols + } else { + n, k = a.Cols, a.Rows + } + blas64.Dsyrk(c.Uplo, t, n, k, alpha, a.Data, a.Stride, beta, c.Data, c.Stride) +} + +// Syr2k performs a symmetric rank-2k update +// C = alpha * A * B^T + alpha * B * A^T + beta * C, if t == blas.NoTrans, +// C = alpha * A^T * B + alpha * B^T * A + beta * C, if t == blas.Trans or blas.ConjTrans, +// where C is an n×n symmetric matrix, A and B are n×k matrices if t == NoTrans +// and k×n matrices otherwise, and alpha and beta are scalars. +func Syr2k(t blas.Transpose, alpha float64, a, b General, beta float64, c Symmetric) { + var n, k int + if t == blas.NoTrans { + n, k = a.Rows, a.Cols + } else { + n, k = a.Cols, a.Rows + } + blas64.Dsyr2k(c.Uplo, t, n, k, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) +} + +// Trmm performs +// B = alpha * A * B, if tA == blas.NoTrans and s == blas.Left, +// B = alpha * A^T * B, if tA == blas.Trans or blas.ConjTrans, and s == blas.Left, +// B = alpha * B * A, if tA == blas.NoTrans and s == blas.Right, +// B = alpha * B * A^T, if tA == blas.Trans or blas.ConjTrans, and s == blas.Right, +// where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is +// a scalar. +func Trmm(s blas.Side, tA blas.Transpose, alpha float64, a Triangular, b General) { + blas64.Dtrmm(s, a.Uplo, tA, a.Diag, b.Rows, b.Cols, alpha, a.Data, a.Stride, b.Data, b.Stride) +} + +// Trsm solves +// A * X = alpha * B, if tA == blas.NoTrans and s == blas.Left, +// A^T * X = alpha * B, if tA == blas.Trans or blas.ConjTrans, and s == blas.Left, +// X * A = alpha * B, if tA == blas.NoTrans and s == blas.Right, +// X * A^T = alpha * B, if tA == blas.Trans or blas.ConjTrans, and s == blas.Right, +// where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and +// alpha is a scalar. +// +// At entry to the function, X contains the values of B, and the result is +// stored in-place into X. +// +// No check is made that A is invertible. +func Trsm(s blas.Side, tA blas.Transpose, alpha float64, a Triangular, b General) { + blas64.Dtrsm(s, a.Uplo, tA, a.Diag, b.Rows, b.Cols, alpha, a.Data, a.Stride, b.Data, b.Stride) +} diff --git a/vendor/gonum.org/v1/gonum/blas/blas64/conv.go b/vendor/gonum.org/v1/gonum/blas/blas64/conv.go new file mode 100644 index 0000000000..882fd8a716 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/blas64/conv.go @@ -0,0 +1,277 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blas64 + +import "gonum.org/v1/gonum/blas" + +// GeneralCols represents a matrix using the conventional column-major storage scheme. +type GeneralCols General + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions as a and have adequate backing +// data storage. +func (t GeneralCols) From(a General) { + if t.Rows != a.Rows || t.Cols != a.Cols { + panic("blas64: mismatched dimension") + } + if len(t.Data) < (t.Cols-1)*t.Stride+t.Rows { + panic("blas64: short data slice") + } + for i := 0; i < a.Rows; i++ { + for j, v := range a.Data[i*a.Stride : i*a.Stride+a.Cols] { + t.Data[i+j*t.Stride] = v + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions as a and have adequate backing +// data storage. +func (t General) From(a GeneralCols) { + if t.Rows != a.Rows || t.Cols != a.Cols { + panic("blas64: mismatched dimension") + } + if len(t.Data) < (t.Rows-1)*t.Stride+t.Cols { + panic("blas64: short data slice") + } + for j := 0; j < a.Cols; j++ { + for i, v := range a.Data[j*a.Stride : j*a.Stride+a.Rows] { + t.Data[i*t.Stride+j] = v + } + } +} + +// TriangularCols represents a matrix using the conventional column-major storage scheme. +type TriangularCols Triangular + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, uplo and diag as a and have +// adequate backing data storage. +func (t TriangularCols) From(a Triangular) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + if t.Diag != a.Diag { + panic("blas64: mismatched BLAS diag") + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + case blas.All: + for i := 0; i < a.N; i++ { + for j := 0; j < a.N; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, uplo and diag as a and have +// adequate backing data storage. +func (t Triangular) From(a TriangularCols) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + if t.Diag != a.Diag { + panic("blas64: mismatched BLAS diag") + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + case blas.All: + for i := 0; i < a.N; i++ { + for j := 0; j < a.N; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + } +} + +// BandCols represents a matrix using the band column-major storage scheme. +type BandCols Band + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and bandwidth as a and have +// adequate backing data storage. +func (t BandCols) From(a Band) { + if t.Rows != a.Rows || t.Cols != a.Cols { + panic("blas64: mismatched dimension") + } + if t.KL != a.KL || t.KU != a.KU { + panic("blas64: mismatched bandwidth") + } + if a.Stride < a.KL+a.KU+1 { + panic("blas64: short stride for source") + } + if t.Stride < t.KL+t.KU+1 { + panic("blas64: short stride for destination") + } + for i := 0; i < a.Rows; i++ { + for j := max(0, i-a.KL); j < min(i+a.KU+1, a.Cols); j++ { + t.Data[i+t.KU-j+j*t.Stride] = a.Data[j+a.KL-i+i*a.Stride] + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and bandwidth as a and have +// adequate backing data storage. +func (t Band) From(a BandCols) { + if t.Rows != a.Rows || t.Cols != a.Cols { + panic("blas64: mismatched dimension") + } + if t.KL != a.KL || t.KU != a.KU { + panic("blas64: mismatched bandwidth") + } + if a.Stride < a.KL+a.KU+1 { + panic("blas64: short stride for source") + } + if t.Stride < t.KL+t.KU+1 { + panic("blas64: short stride for destination") + } + for j := 0; j < a.Cols; j++ { + for i := max(0, j-a.KU); i < min(j+a.KL+1, a.Rows); i++ { + t.Data[j+a.KL-i+i*a.Stride] = a.Data[i+t.KU-j+j*t.Stride] + } + } +} + +// TriangularBandCols represents a symmetric matrix using the band column-major storage scheme. +type TriangularBandCols TriangularBand + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t TriangularBandCols) From(a TriangularBand) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.K != a.K { + panic("blas64: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("blas64: short stride for source") + } + if t.Stride < t.K+1 { + panic("blas64: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + if t.Diag != a.Diag { + panic("blas64: mismatched BLAS diag") + } + dst := BandCols{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := Band{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t TriangularBand) From(a TriangularBandCols) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.K != a.K { + panic("blas64: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("blas64: short stride for source") + } + if t.Stride < t.K+1 { + panic("blas64: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + if t.Diag != a.Diag { + panic("blas64: mismatched BLAS diag") + } + dst := Band{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := BandCols{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/vendor/gonum.org/v1/gonum/blas/blas64/conv_symmetric.go b/vendor/gonum.org/v1/gonum/blas/blas64/conv_symmetric.go new file mode 100644 index 0000000000..5146f1a1c3 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/blas64/conv_symmetric.go @@ -0,0 +1,153 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blas64 + +import "gonum.org/v1/gonum/blas" + +// SymmetricCols represents a matrix using the conventional column-major storage scheme. +type SymmetricCols Symmetric + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and uplo as a and have adequate +// backing data storage. +func (t SymmetricCols) From(a Symmetric) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and uplo as a and have adequate +// backing data storage. +func (t Symmetric) From(a SymmetricCols) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + } +} + +// SymmetricBandCols represents a symmetric matrix using the band column-major storage scheme. +type SymmetricBandCols SymmetricBand + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t SymmetricBandCols) From(a SymmetricBand) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.K != a.K { + panic("blas64: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("blas64: short stride for source") + } + if t.Stride < t.K+1 { + panic("blas64: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + dst := BandCols{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := Band{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t SymmetricBand) From(a SymmetricBandCols) { + if t.N != a.N { + panic("blas64: mismatched dimension") + } + if t.K != a.K { + panic("blas64: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("blas64: short stride for source") + } + if t.Stride < t.K+1 { + panic("blas64: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("blas64: mismatched BLAS uplo") + } + dst := Band{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := BandCols{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("blas64: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} diff --git a/vendor/gonum.org/v1/gonum/blas/blas64/doc.go b/vendor/gonum.org/v1/gonum/blas/blas64/doc.go new file mode 100644 index 0000000000..7410cee486 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/blas64/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blas64 provides a simple interface to the float64 BLAS API. +package blas64 // import "gonum.org/v1/gonum/blas/blas64" diff --git a/vendor/gonum.org/v1/gonum/blas/cblas128/cblas128.go b/vendor/gonum.org/v1/gonum/blas/cblas128/cblas128.go new file mode 100644 index 0000000000..1205da8afa --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/cblas128/cblas128.go @@ -0,0 +1,508 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cblas128 + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/gonum" +) + +var cblas128 blas.Complex128 = gonum.Implementation{} + +// Use sets the BLAS complex128 implementation to be used by subsequent BLAS calls. +// The default implementation is +// gonum.org/v1/gonum/blas/gonum.Implementation. +func Use(b blas.Complex128) { + cblas128 = b +} + +// Implementation returns the current BLAS complex128 implementation. +// +// Implementation allows direct calls to the current the BLAS complex128 implementation +// giving finer control of parameters. +func Implementation() blas.Complex128 { + return cblas128 +} + +// Vector represents a vector with an associated element increment. +type Vector struct { + Inc int + Data []complex128 +} + +// General represents a matrix using the conventional storage scheme. +type General struct { + Rows, Cols int + Stride int + Data []complex128 +} + +// Band represents a band matrix using the band storage scheme. +type Band struct { + Rows, Cols int + KL, KU int + Stride int + Data []complex128 +} + +// Triangular represents a triangular matrix using the conventional storage scheme. +type Triangular struct { + N int + Stride int + Data []complex128 + Uplo blas.Uplo + Diag blas.Diag +} + +// TriangularBand represents a triangular matrix using the band storage scheme. +type TriangularBand struct { + N, K int + Stride int + Data []complex128 + Uplo blas.Uplo + Diag blas.Diag +} + +// TriangularPacked represents a triangular matrix using the packed storage scheme. +type TriangularPacked struct { + N int + Data []complex128 + Uplo blas.Uplo + Diag blas.Diag +} + +// Symmetric represents a symmetric matrix using the conventional storage scheme. +type Symmetric struct { + N int + Stride int + Data []complex128 + Uplo blas.Uplo +} + +// SymmetricBand represents a symmetric matrix using the band storage scheme. +type SymmetricBand struct { + N, K int + Stride int + Data []complex128 + Uplo blas.Uplo +} + +// SymmetricPacked represents a symmetric matrix using the packed storage scheme. +type SymmetricPacked struct { + N int + Data []complex128 + Uplo blas.Uplo +} + +// Hermitian represents an Hermitian matrix using the conventional storage scheme. +type Hermitian Symmetric + +// HermitianBand represents an Hermitian matrix using the band storage scheme. +type HermitianBand SymmetricBand + +// HermitianPacked represents an Hermitian matrix using the packed storage scheme. +type HermitianPacked SymmetricPacked + +// Level 1 + +const negInc = "cblas128: negative vector increment" + +// Dotu computes the dot product of the two vectors without +// complex conjugation: +// x^T * y. +func Dotu(n int, x, y Vector) complex128 { + return cblas128.Zdotu(n, x.Data, x.Inc, y.Data, y.Inc) +} + +// Dotc computes the dot product of the two vectors with +// complex conjugation: +// x^H * y. +func Dotc(n int, x, y Vector) complex128 { + return cblas128.Zdotc(n, x.Data, x.Inc, y.Data, y.Inc) +} + +// Nrm2 computes the Euclidean norm of the vector x: +// sqrt(\sum_i x[i] * x[i]). +// +// Nrm2 will panic if the vector increment is negative. +func Nrm2(n int, x Vector) float64 { + if x.Inc < 0 { + panic(negInc) + } + return cblas128.Dznrm2(n, x.Data, x.Inc) +} + +// Asum computes the sum of magnitudes of the real and imaginary parts of +// elements of the vector x: +// \sum_i (|Re x[i]| + |Im x[i]|). +// +// Asum will panic if the vector increment is negative. +func Asum(n int, x Vector) float64 { + if x.Inc < 0 { + panic(negInc) + } + return cblas128.Dzasum(n, x.Data, x.Inc) +} + +// Iamax returns the index of an element of x with the largest sum of +// magnitudes of the real and imaginary parts (|Re x[i]|+|Im x[i]|). +// If there are multiple such indices, the earliest is returned. +// +// Iamax returns -1 if n == 0. +// +// Iamax will panic if the vector increment is negative. +func Iamax(n int, x Vector) int { + if x.Inc < 0 { + panic(negInc) + } + return cblas128.Izamax(n, x.Data, x.Inc) +} + +// Swap exchanges the elements of two vectors: +// x[i], y[i] = y[i], x[i] for all i. +func Swap(n int, x, y Vector) { + cblas128.Zswap(n, x.Data, x.Inc, y.Data, y.Inc) +} + +// Copy copies the elements of x into the elements of y: +// y[i] = x[i] for all i. +func Copy(n int, x, y Vector) { + cblas128.Zcopy(n, x.Data, x.Inc, y.Data, y.Inc) +} + +// Axpy computes +// y = alpha * x + y, +// where x and y are vectors, and alpha is a scalar. +func Axpy(n int, alpha complex128, x, y Vector) { + cblas128.Zaxpy(n, alpha, x.Data, x.Inc, y.Data, y.Inc) +} + +// Scal computes +// x = alpha * x, +// where x is a vector, and alpha is a scalar. +// +// Scal will panic if the vector increment is negative. +func Scal(n int, alpha complex128, x Vector) { + if x.Inc < 0 { + panic(negInc) + } + cblas128.Zscal(n, alpha, x.Data, x.Inc) +} + +// Dscal computes +// x = alpha * x, +// where x is a vector, and alpha is a real scalar. +// +// Dscal will panic if the vector increment is negative. +func Dscal(n int, alpha float64, x Vector) { + if x.Inc < 0 { + panic(negInc) + } + cblas128.Zdscal(n, alpha, x.Data, x.Inc) +} + +// Level 2 + +// Gemv computes +// y = alpha * A * x + beta * y, if t == blas.NoTrans, +// y = alpha * A^T * x + beta * y, if t == blas.Trans, +// y = alpha * A^H * x + beta * y, if t == blas.ConjTrans, +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are +// scalars. +func Gemv(t blas.Transpose, alpha complex128, a General, x Vector, beta complex128, y Vector) { + cblas128.Zgemv(t, a.Rows, a.Cols, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Gbmv computes +// y = alpha * A * x + beta * y, if t == blas.NoTrans, +// y = alpha * A^T * x + beta * y, if t == blas.Trans, +// y = alpha * A^H * x + beta * y, if t == blas.ConjTrans, +// where A is an m×n band matrix, x and y are vectors, and alpha and beta are +// scalars. +func Gbmv(t blas.Transpose, alpha complex128, a Band, x Vector, beta complex128, y Vector) { + cblas128.Zgbmv(t, a.Rows, a.Cols, a.KL, a.KU, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Trmv computes +// x = A * x, if t == blas.NoTrans, +// x = A^T * x, if t == blas.Trans, +// x = A^H * x, if t == blas.ConjTrans, +// where A is an n×n triangular matrix, and x is a vector. +func Trmv(t blas.Transpose, a Triangular, x Vector) { + cblas128.Ztrmv(a.Uplo, t, a.Diag, a.N, a.Data, a.Stride, x.Data, x.Inc) +} + +// Tbmv computes +// x = A * x, if t == blas.NoTrans, +// x = A^T * x, if t == blas.Trans, +// x = A^H * x, if t == blas.ConjTrans, +// where A is an n×n triangular band matrix, and x is a vector. +func Tbmv(t blas.Transpose, a TriangularBand, x Vector) { + cblas128.Ztbmv(a.Uplo, t, a.Diag, a.N, a.K, a.Data, a.Stride, x.Data, x.Inc) +} + +// Tpmv computes +// x = A * x, if t == blas.NoTrans, +// x = A^T * x, if t == blas.Trans, +// x = A^H * x, if t == blas.ConjTrans, +// where A is an n×n triangular matrix in packed format, and x is a vector. +func Tpmv(t blas.Transpose, a TriangularPacked, x Vector) { + cblas128.Ztpmv(a.Uplo, t, a.Diag, a.N, a.Data, x.Data, x.Inc) +} + +// Trsv solves +// A * x = b, if t == blas.NoTrans, +// A^T * x = b, if t == blas.Trans, +// A^H * x = b, if t == blas.ConjTrans, +// where A is an n×n triangular matrix and x is a vector. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func Trsv(t blas.Transpose, a Triangular, x Vector) { + cblas128.Ztrsv(a.Uplo, t, a.Diag, a.N, a.Data, a.Stride, x.Data, x.Inc) +} + +// Tbsv solves +// A * x = b, if t == blas.NoTrans, +// A^T * x = b, if t == blas.Trans, +// A^H * x = b, if t == blas.ConjTrans, +// where A is an n×n triangular band matrix, and x is a vector. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func Tbsv(t blas.Transpose, a TriangularBand, x Vector) { + cblas128.Ztbsv(a.Uplo, t, a.Diag, a.N, a.K, a.Data, a.Stride, x.Data, x.Inc) +} + +// Tpsv solves +// A * x = b, if t == blas.NoTrans, +// A^T * x = b, if t == blas.Trans, +// A^H * x = b, if t == blas.ConjTrans, +// where A is an n×n triangular matrix in packed format and x is a vector. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func Tpsv(t blas.Transpose, a TriangularPacked, x Vector) { + cblas128.Ztpsv(a.Uplo, t, a.Diag, a.N, a.Data, x.Data, x.Inc) +} + +// Hemv computes +// y = alpha * A * x + beta * y, +// where A is an n×n Hermitian matrix, x and y are vectors, and alpha and +// beta are scalars. +func Hemv(alpha complex128, a Hermitian, x Vector, beta complex128, y Vector) { + cblas128.Zhemv(a.Uplo, a.N, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Hbmv performs +// y = alpha * A * x + beta * y, +// where A is an n×n Hermitian band matrix, x and y are vectors, and alpha +// and beta are scalars. +func Hbmv(alpha complex128, a HermitianBand, x Vector, beta complex128, y Vector) { + cblas128.Zhbmv(a.Uplo, a.N, a.K, alpha, a.Data, a.Stride, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Hpmv performs +// y = alpha * A * x + beta * y, +// where A is an n×n Hermitian matrix in packed format, x and y are vectors, +// and alpha and beta are scalars. +func Hpmv(alpha complex128, a HermitianPacked, x Vector, beta complex128, y Vector) { + cblas128.Zhpmv(a.Uplo, a.N, alpha, a.Data, x.Data, x.Inc, beta, y.Data, y.Inc) +} + +// Geru performs a rank-1 update +// A += alpha * x * y^T, +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Geru(alpha complex128, x, y Vector, a General) { + cblas128.Zgeru(a.Rows, a.Cols, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) +} + +// Gerc performs a rank-1 update +// A += alpha * x * y^H, +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Gerc(alpha complex128, x, y Vector, a General) { + cblas128.Zgerc(a.Rows, a.Cols, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) +} + +// Her performs a rank-1 update +// A += alpha * x * y^T, +// where A is an m×n Hermitian matrix, x and y are vectors, and alpha is a scalar. +func Her(alpha float64, x Vector, a Hermitian) { + cblas128.Zher(a.Uplo, a.N, alpha, x.Data, x.Inc, a.Data, a.Stride) +} + +// Hpr performs a rank-1 update +// A += alpha * x * x^H, +// where A is an n×n Hermitian matrix in packed format, x is a vector, and +// alpha is a scalar. +func Hpr(alpha float64, x Vector, a HermitianPacked) { + cblas128.Zhpr(a.Uplo, a.N, alpha, x.Data, x.Inc, a.Data) +} + +// Her2 performs a rank-2 update +// A += alpha * x * y^H + conj(alpha) * y * x^H, +// where A is an n×n Hermitian matrix, x and y are vectors, and alpha is a scalar. +func Her2(alpha complex128, x, y Vector, a Hermitian) { + cblas128.Zher2(a.Uplo, a.N, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data, a.Stride) +} + +// Hpr2 performs a rank-2 update +// A += alpha * x * y^H + conj(alpha) * y * x^H, +// where A is an n×n Hermitian matrix in packed format, x and y are vectors, +// and alpha is a scalar. +func Hpr2(alpha complex128, x, y Vector, a HermitianPacked) { + cblas128.Zhpr2(a.Uplo, a.N, alpha, x.Data, x.Inc, y.Data, y.Inc, a.Data) +} + +// Level 3 + +// Gemm computes +// C = alpha * A * B + beta * C, +// where A, B, and C are dense matrices, and alpha and beta are scalars. +// tA and tB specify whether A or B are transposed or conjugated. +func Gemm(tA, tB blas.Transpose, alpha complex128, a, b General, beta complex128, c General) { + var m, n, k int + if tA == blas.NoTrans { + m, k = a.Rows, a.Cols + } else { + m, k = a.Cols, a.Rows + } + if tB == blas.NoTrans { + n = b.Cols + } else { + n = b.Rows + } + cblas128.Zgemm(tA, tB, m, n, k, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) +} + +// Symm performs +// C = alpha * A * B + beta * C, if s == blas.Left, +// C = alpha * B * A + beta * C, if s == blas.Right, +// where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and +// alpha and beta are scalars. +func Symm(s blas.Side, alpha complex128, a Symmetric, b General, beta complex128, c General) { + var m, n int + if s == blas.Left { + m, n = a.N, b.Cols + } else { + m, n = b.Rows, a.N + } + cblas128.Zsymm(s, a.Uplo, m, n, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) +} + +// Syrk performs a symmetric rank-k update +// C = alpha * A * A^T + beta * C, if t == blas.NoTrans, +// C = alpha * A^T * A + beta * C, if t == blas.Trans, +// where C is an n×n symmetric matrix, A is an n×k matrix if t == blas.NoTrans +// and a k×n matrix otherwise, and alpha and beta are scalars. +func Syrk(t blas.Transpose, alpha complex128, a General, beta complex128, c Symmetric) { + var n, k int + if t == blas.NoTrans { + n, k = a.Rows, a.Cols + } else { + n, k = a.Cols, a.Rows + } + cblas128.Zsyrk(c.Uplo, t, n, k, alpha, a.Data, a.Stride, beta, c.Data, c.Stride) +} + +// Syr2k performs a symmetric rank-2k update +// C = alpha * A * B^T + alpha * B * A^T + beta * C, if t == blas.NoTrans, +// C = alpha * A^T * B + alpha * B^T * A + beta * C, if t == blas.Trans, +// where C is an n×n symmetric matrix, A and B are n×k matrices if +// t == blas.NoTrans and k×n otherwise, and alpha and beta are scalars. +func Syr2k(t blas.Transpose, alpha complex128, a, b General, beta complex128, c Symmetric) { + var n, k int + if t == blas.NoTrans { + n, k = a.Rows, a.Cols + } else { + n, k = a.Cols, a.Rows + } + cblas128.Zsyr2k(c.Uplo, t, n, k, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) +} + +// Trmm performs +// B = alpha * A * B, if tA == blas.NoTrans and s == blas.Left, +// B = alpha * A^T * B, if tA == blas.Trans and s == blas.Left, +// B = alpha * A^H * B, if tA == blas.ConjTrans and s == blas.Left, +// B = alpha * B * A, if tA == blas.NoTrans and s == blas.Right, +// B = alpha * B * A^T, if tA == blas.Trans and s == blas.Right, +// B = alpha * B * A^H, if tA == blas.ConjTrans and s == blas.Right, +// where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is +// a scalar. +func Trmm(s blas.Side, tA blas.Transpose, alpha complex128, a Triangular, b General) { + cblas128.Ztrmm(s, a.Uplo, tA, a.Diag, b.Rows, b.Cols, alpha, a.Data, a.Stride, b.Data, b.Stride) +} + +// Trsm solves +// A * X = alpha * B, if tA == blas.NoTrans and s == blas.Left, +// A^T * X = alpha * B, if tA == blas.Trans and s == blas.Left, +// A^H * X = alpha * B, if tA == blas.ConjTrans and s == blas.Left, +// X * A = alpha * B, if tA == blas.NoTrans and s == blas.Right, +// X * A^T = alpha * B, if tA == blas.Trans and s == blas.Right, +// X * A^H = alpha * B, if tA == blas.ConjTrans and s == blas.Right, +// where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and +// alpha is a scalar. +// +// At entry to the function, b contains the values of B, and the result is +// stored in-place into b. +// +// No check is made that A is invertible. +func Trsm(s blas.Side, tA blas.Transpose, alpha complex128, a Triangular, b General) { + cblas128.Ztrsm(s, a.Uplo, tA, a.Diag, b.Rows, b.Cols, alpha, a.Data, a.Stride, b.Data, b.Stride) +} + +// Hemm performs +// C = alpha * A * B + beta * C, if s == blas.Left, +// C = alpha * B * A + beta * C, if s == blas.Right, +// where A is an n×n or m×m Hermitian matrix, B and C are m×n matrices, and +// alpha and beta are scalars. +func Hemm(s blas.Side, alpha complex128, a Hermitian, b General, beta complex128, c General) { + var m, n int + if s == blas.Left { + m, n = a.N, b.Cols + } else { + m, n = b.Rows, a.N + } + cblas128.Zhemm(s, a.Uplo, m, n, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) +} + +// Herk performs the Hermitian rank-k update +// C = alpha * A * A^H + beta*C, if t == blas.NoTrans, +// C = alpha * A^H * A + beta*C, if t == blas.ConjTrans, +// where C is an n×n Hermitian matrix, A is an n×k matrix if t == blas.NoTrans +// and a k×n matrix otherwise, and alpha and beta are scalars. +func Herk(t blas.Transpose, alpha float64, a General, beta float64, c Hermitian) { + var n, k int + if t == blas.NoTrans { + n, k = a.Rows, a.Cols + } else { + n, k = a.Cols, a.Rows + } + cblas128.Zherk(c.Uplo, t, n, k, alpha, a.Data, a.Stride, beta, c.Data, c.Stride) +} + +// Her2k performs the Hermitian rank-2k update +// C = alpha * A * B^H + conj(alpha) * B * A^H + beta * C, if t == blas.NoTrans, +// C = alpha * A^H * B + conj(alpha) * B^H * A + beta * C, if t == blas.ConjTrans, +// where C is an n×n Hermitian matrix, A and B are n×k matrices if t == NoTrans +// and k×n matrices otherwise, and alpha and beta are scalars. +func Her2k(t blas.Transpose, alpha complex128, a, b General, beta float64, c Hermitian) { + var n, k int + if t == blas.NoTrans { + n, k = a.Rows, a.Cols + } else { + n, k = a.Cols, a.Rows + } + cblas128.Zher2k(c.Uplo, t, n, k, alpha, a.Data, a.Stride, b.Data, b.Stride, beta, c.Data, c.Stride) +} diff --git a/vendor/gonum.org/v1/gonum/blas/cblas128/conv.go b/vendor/gonum.org/v1/gonum/blas/cblas128/conv.go new file mode 100644 index 0000000000..93e3cd2f92 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/cblas128/conv.go @@ -0,0 +1,279 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cblas128 + +import "gonum.org/v1/gonum/blas" + +// GeneralCols represents a matrix using the conventional column-major storage scheme. +type GeneralCols General + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions as a and have adequate backing +// data storage. +func (t GeneralCols) From(a General) { + if t.Rows != a.Rows || t.Cols != a.Cols { + panic("cblas128: mismatched dimension") + } + if len(t.Data) < (t.Cols-1)*t.Stride+t.Rows { + panic("cblas128: short data slice") + } + for i := 0; i < a.Rows; i++ { + for j, v := range a.Data[i*a.Stride : i*a.Stride+a.Cols] { + t.Data[i+j*t.Stride] = v + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions as a and have adequate backing +// data storage. +func (t General) From(a GeneralCols) { + if t.Rows != a.Rows || t.Cols != a.Cols { + panic("cblas128: mismatched dimension") + } + if len(t.Data) < (t.Rows-1)*t.Stride+t.Cols { + panic("cblas128: short data slice") + } + for j := 0; j < a.Cols; j++ { + for i, v := range a.Data[j*a.Stride : j*a.Stride+a.Rows] { + t.Data[i*t.Stride+j] = v + } + } +} + +// TriangularCols represents a matrix using the conventional column-major storage scheme. +type TriangularCols Triangular + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, uplo and diag as a and have +// adequate backing data storage. +func (t TriangularCols) From(a Triangular) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + if t.Diag != a.Diag { + panic("cblas128: mismatched BLAS diag") + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + case blas.All: + for i := 0; i < a.N; i++ { + for j := 0; j < a.N; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, uplo and diag as a and have +// adequate backing data storage. +func (t Triangular) From(a TriangularCols) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + if t.Diag != a.Diag { + panic("cblas128: mismatched BLAS diag") + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + case blas.All: + for i := 0; i < a.N; i++ { + for j := 0; j < a.N; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + } +} + +// BandCols represents a matrix using the band column-major storage scheme. +type BandCols Band + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and bandwidth as a and have +// adequate backing data storage. +func (t BandCols) From(a Band) { + if t.Rows != a.Rows || t.Cols != a.Cols { + panic("cblas128: mismatched dimension") + } + if t.KL != a.KL || t.KU != a.KU { + panic("cblas128: mismatched bandwidth") + } + if a.Stride < a.KL+a.KU+1 { + panic("cblas128: short stride for source") + } + if t.Stride < t.KL+t.KU+1 { + panic("cblas128: short stride for destination") + } + for i := 0; i < a.Rows; i++ { + for j := max(0, i-a.KL); j < min(i+a.KU+1, a.Cols); j++ { + t.Data[i+t.KU-j+j*t.Stride] = a.Data[j+a.KL-i+i*a.Stride] + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and bandwidth as a and have +// adequate backing data storage. +func (t Band) From(a BandCols) { + if t.Rows != a.Rows || t.Cols != a.Cols { + panic("cblas128: mismatched dimension") + } + if t.KL != a.KL || t.KU != a.KU { + panic("cblas128: mismatched bandwidth") + } + if a.Stride < a.KL+a.KU+1 { + panic("cblas128: short stride for source") + } + if t.Stride < t.KL+t.KU+1 { + panic("cblas128: short stride for destination") + } + for j := 0; j < a.Cols; j++ { + for i := max(0, j-a.KU); i < min(j+a.KL+1, a.Rows); i++ { + t.Data[j+a.KL-i+i*a.Stride] = a.Data[i+t.KU-j+j*t.Stride] + } + } +} + +// TriangularBandCols represents a symmetric matrix using the band column-major storage scheme. +type TriangularBandCols TriangularBand + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t TriangularBandCols) From(a TriangularBand) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.K != a.K { + panic("cblas128: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("cblas128: short stride for source") + } + if t.Stride < t.K+1 { + panic("cblas128: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + if t.Diag != a.Diag { + panic("cblas128: mismatched BLAS diag") + } + dst := BandCols{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := Band{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t TriangularBand) From(a TriangularBandCols) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.K != a.K { + panic("cblas128: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("cblas128: short stride for source") + } + if t.Stride < t.K+1 { + panic("cblas128: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + if t.Diag != a.Diag { + panic("cblas128: mismatched BLAS diag") + } + dst := Band{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := BandCols{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/vendor/gonum.org/v1/gonum/blas/cblas128/conv_hermitian.go b/vendor/gonum.org/v1/gonum/blas/cblas128/conv_hermitian.go new file mode 100644 index 0000000000..51c3a5777b --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/cblas128/conv_hermitian.go @@ -0,0 +1,155 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cblas128 + +import "gonum.org/v1/gonum/blas" + +// HermitianCols represents a matrix using the conventional column-major storage scheme. +type HermitianCols Hermitian + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and uplo as a and have adequate +// backing data storage. +func (t HermitianCols) From(a Hermitian) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and uplo as a and have adequate +// backing data storage. +func (t Hermitian) From(a HermitianCols) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + } +} + +// HermitianBandCols represents an Hermitian matrix using the band column-major storage scheme. +type HermitianBandCols HermitianBand + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t HermitianBandCols) From(a HermitianBand) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.K != a.K { + panic("cblas128: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("cblas128: short stride for source") + } + if t.Stride < t.K+1 { + panic("cblas128: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + dst := BandCols{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := Band{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t HermitianBand) From(a HermitianBandCols) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.K != a.K { + panic("cblas128: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("cblas128: short stride for source") + } + if t.Stride < t.K+1 { + panic("cblas128: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + dst := Band{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := BandCols{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} diff --git a/vendor/gonum.org/v1/gonum/blas/cblas128/conv_symmetric.go b/vendor/gonum.org/v1/gonum/blas/cblas128/conv_symmetric.go new file mode 100644 index 0000000000..f1bf40c208 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/cblas128/conv_symmetric.go @@ -0,0 +1,155 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cblas128 + +import "gonum.org/v1/gonum/blas" + +// SymmetricCols represents a matrix using the conventional column-major storage scheme. +type SymmetricCols Symmetric + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and uplo as a and have adequate +// backing data storage. +func (t SymmetricCols) From(a Symmetric) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i+j*t.Stride] = a.Data[i*a.Stride+j] + } + } + } +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions and uplo as a and have adequate +// backing data storage. +func (t Symmetric) From(a SymmetricCols) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + for i := 0; i < a.N; i++ { + for j := i; j < a.N; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + case blas.Lower: + for i := 0; i < a.N; i++ { + for j := 0; j <= i; j++ { + t.Data[i*t.Stride+j] = a.Data[i+j*a.Stride] + } + } + } +} + +// SymmetricBandCols represents a symmetric matrix using the band column-major storage scheme. +type SymmetricBandCols SymmetricBand + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t SymmetricBandCols) From(a SymmetricBand) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.K != a.K { + panic("cblas128: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("cblas128: short stride for source") + } + if t.Stride < t.K+1 { + panic("cblas128: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + dst := BandCols{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := Band{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} + +// From fills the receiver with elements from a. The receiver +// must have the same dimensions, bandwidth and uplo as a and +// have adequate backing data storage. +func (t SymmetricBand) From(a SymmetricBandCols) { + if t.N != a.N { + panic("cblas128: mismatched dimension") + } + if t.K != a.K { + panic("cblas128: mismatched bandwidth") + } + if a.Stride < a.K+1 { + panic("cblas128: short stride for source") + } + if t.Stride < t.K+1 { + panic("cblas128: short stride for destination") + } + if t.Uplo != a.Uplo { + panic("cblas128: mismatched BLAS uplo") + } + dst := Band{ + Rows: t.N, Cols: t.N, + Stride: t.Stride, + Data: t.Data, + } + src := BandCols{ + Rows: a.N, Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } + switch a.Uplo { + default: + panic("cblas128: bad BLAS uplo") + case blas.Upper: + dst.KU = t.K + src.KU = a.K + case blas.Lower: + dst.KL = t.K + src.KL = a.K + } + dst.From(src) +} diff --git a/vendor/gonum.org/v1/gonum/blas/cblas128/doc.go b/vendor/gonum.org/v1/gonum/blas/cblas128/doc.go new file mode 100644 index 0000000000..09719b19e6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/cblas128/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cblas128 provides a simple interface to the complex128 BLAS API. +package cblas128 // import "gonum.org/v1/gonum/blas/cblas128" diff --git a/vendor/gonum.org/v1/gonum/blas/conversions.bash b/vendor/gonum.org/v1/gonum/blas/conversions.bash new file mode 100644 index 0000000000..d1c0ef0d99 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/conversions.bash @@ -0,0 +1,159 @@ +#!/usr/bin/env bash + +# Copyright ©2017 The Gonum Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# Generate code for blas32. +echo Generating blas32/conv.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > blas32/conv.go +cat blas64/conv.go \ +| gofmt -r 'float64 -> float32' \ +\ +| sed -e 's/blas64/blas32/' \ +\ +>> blas32/conv.go + +echo Generating blas32/conv_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > blas32/conv_test.go +cat blas64/conv_test.go \ +| gofmt -r 'float64 -> float32' \ +\ +| sed -e 's/blas64/blas32/' \ + -e 's_"math"_math "gonum.org/v1/gonum/internal/math32"_' \ +\ +>> blas32/conv_test.go + +echo Generating blas32/conv_symmetric.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > blas32/conv_symmetric.go +cat blas64/conv_symmetric.go \ +| gofmt -r 'float64 -> float32' \ +\ +| sed -e 's/blas64/blas32/' \ +\ +>> blas32/conv_symmetric.go + +echo Generating blas32/conv_symmetric_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > blas32/conv_symmetric_test.go +cat blas64/conv_symmetric_test.go \ +| gofmt -r 'float64 -> float32' \ +\ +| sed -e 's/blas64/blas32/' \ + -e 's_"math"_math "gonum.org/v1/gonum/internal/math32"_' \ +\ +>> blas32/conv_symmetric_test.go + + +# Generate code for cblas128. +echo Generating cblas128/conv.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas128/conv.go +cat blas64/conv.go \ +| gofmt -r 'float64 -> complex128' \ +\ +| sed -e 's/blas64/cblas128/' \ +\ +>> cblas128/conv.go + +echo Generating cblas128/conv_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas128/conv_test.go +cat blas64/conv_test.go \ +| gofmt -r 'float64 -> complex128' \ +\ +| sed -e 's/blas64/cblas128/' \ + -e 's_"math"_math "math/cmplx"_' \ +\ +>> cblas128/conv_test.go + +echo Generating cblas128/conv_symmetric.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas128/conv_symmetric.go +cat blas64/conv_symmetric.go \ +| gofmt -r 'float64 -> complex128' \ +\ +| sed -e 's/blas64/cblas128/' \ +\ +>> cblas128/conv_symmetric.go + +echo Generating cblas128/conv_symmetric_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas128/conv_symmetric_test.go +cat blas64/conv_symmetric_test.go \ +| gofmt -r 'float64 -> complex128' \ +\ +| sed -e 's/blas64/cblas128/' \ + -e 's_"math"_math "math/cmplx"_' \ +\ +>> cblas128/conv_symmetric_test.go + +echo Generating cblas128/conv_hermitian.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas128/conv_hermitian.go +cat blas64/conv_symmetric.go \ +| gofmt -r 'float64 -> complex128' \ +\ +| sed -e 's/blas64/cblas128/' \ + -e 's/Symmetric/Hermitian/g' \ + -e 's/a symmetric/an Hermitian/g' \ + -e 's/symmetric/hermitian/g' \ + -e 's/Sym/Herm/g' \ +\ +>> cblas128/conv_hermitian.go + +echo Generating cblas128/conv_hermitian_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas128/conv_hermitian_test.go +cat blas64/conv_symmetric_test.go \ +| gofmt -r 'float64 -> complex128' \ +\ +| sed -e 's/blas64/cblas128/' \ + -e 's/Symmetric/Hermitian/g' \ + -e 's/a symmetric/an Hermitian/g' \ + -e 's/symmetric/hermitian/g' \ + -e 's/Sym/Herm/g' \ + -e 's_"math"_math "math/cmplx"_' \ +\ +>> cblas128/conv_hermitian_test.go + + +# Generate code for cblas64. +echo Generating cblas64/conv.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas64/conv.go +cat blas64/conv.go \ +| gofmt -r 'float64 -> complex64' \ +\ +| sed -e 's/blas64/cblas64/' \ +\ +>> cblas64/conv.go + +echo Generating cblas64/conv_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas64/conv_test.go +cat blas64/conv_test.go \ +| gofmt -r 'float64 -> complex64' \ +\ +| sed -e 's/blas64/cblas64/' \ + -e 's_"math"_math "gonum.org/v1/gonum/internal/cmplx64"_' \ +\ +>> cblas64/conv_test.go + +echo Generating cblas64/conv_hermitian.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas64/conv_hermitian.go +cat blas64/conv_symmetric.go \ +| gofmt -r 'float64 -> complex64' \ +\ +| sed -e 's/blas64/cblas64/' \ + -e 's/Symmetric/Hermitian/g' \ + -e 's/a symmetric/an Hermitian/g' \ + -e 's/symmetric/hermitian/g' \ + -e 's/Sym/Herm/g' \ +\ +>> cblas64/conv_hermitian.go + +echo Generating cblas64/conv_hermitian_test.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas”; DO NOT EDIT.\n' > cblas64/conv_hermitian_test.go +cat blas64/conv_symmetric_test.go \ +| gofmt -r 'float64 -> complex64' \ +\ +| sed -e 's/blas64/cblas64/' \ + -e 's/Symmetric/Hermitian/g' \ + -e 's/a symmetric/an Hermitian/g' \ + -e 's/symmetric/hermitian/g' \ + -e 's/Sym/Herm/g' \ + -e 's_"math"_math "gonum.org/v1/gonum/internal/cmplx64"_' \ +\ +>> cblas64/conv_hermitian_test.go diff --git a/vendor/gonum.org/v1/gonum/blas/doc.go b/vendor/gonum.org/v1/gonum/blas/doc.go new file mode 100644 index 0000000000..ea4b16c904 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/doc.go @@ -0,0 +1,108 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package blas provides interfaces for the BLAS linear algebra standard. + +All methods must perform appropriate parameter checking and panic if +provided parameters that do not conform to the requirements specified +by the BLAS standard. + +Quick Reference Guide to the BLAS from http://www.netlib.org/lapack/lug/node145.html + +This version is modified to remove the "order" option. All matrix operations are +on row-order matrices. + +Level 1 BLAS + + dim scalar vector vector scalars 5-element prefixes + struct + + _rotg ( a, b ) S, D + _rotmg( d1, d2, a, b ) S, D + _rot ( n, x, incX, y, incY, c, s ) S, D + _rotm ( n, x, incX, y, incY, param ) S, D + _swap ( n, x, incX, y, incY ) S, D, C, Z + _scal ( n, alpha, x, incX ) S, D, C, Z, Cs, Zd + _copy ( n, x, incX, y, incY ) S, D, C, Z + _axpy ( n, alpha, x, incX, y, incY ) S, D, C, Z + _dot ( n, x, incX, y, incY ) S, D, Ds + _dotu ( n, x, incX, y, incY ) C, Z + _dotc ( n, x, incX, y, incY ) C, Z + __dot ( n, alpha, x, incX, y, incY ) Sds + _nrm2 ( n, x, incX ) S, D, Sc, Dz + _asum ( n, x, incX ) S, D, Sc, Dz + I_amax( n, x, incX ) s, d, c, z + +Level 2 BLAS + + options dim b-width scalar matrix vector scalar vector prefixes + + _gemv ( trans, m, n, alpha, a, lda, x, incX, beta, y, incY ) S, D, C, Z + _gbmv ( trans, m, n, kL, kU, alpha, a, lda, x, incX, beta, y, incY ) S, D, C, Z + _hemv ( uplo, n, alpha, a, lda, x, incX, beta, y, incY ) C, Z + _hbmv ( uplo, n, k, alpha, a, lda, x, incX, beta, y, incY ) C, Z + _hpmv ( uplo, n, alpha, ap, x, incX, beta, y, incY ) C, Z + _symv ( uplo, n, alpha, a, lda, x, incX, beta, y, incY ) S, D + _sbmv ( uplo, n, k, alpha, a, lda, x, incX, beta, y, incY ) S, D + _spmv ( uplo, n, alpha, ap, x, incX, beta, y, incY ) S, D + _trmv ( uplo, trans, diag, n, a, lda, x, incX ) S, D, C, Z + _tbmv ( uplo, trans, diag, n, k, a, lda, x, incX ) S, D, C, Z + _tpmv ( uplo, trans, diag, n, ap, x, incX ) S, D, C, Z + _trsv ( uplo, trans, diag, n, a, lda, x, incX ) S, D, C, Z + _tbsv ( uplo, trans, diag, n, k, a, lda, x, incX ) S, D, C, Z + _tpsv ( uplo, trans, diag, n, ap, x, incX ) S, D, C, Z + + options dim scalar vector vector matrix prefixes + + _ger ( m, n, alpha, x, incX, y, incY, a, lda ) S, D + _geru ( m, n, alpha, x, incX, y, incY, a, lda ) C, Z + _gerc ( m, n, alpha, x, incX, y, incY, a, lda ) C, Z + _her ( uplo, n, alpha, x, incX, a, lda ) C, Z + _hpr ( uplo, n, alpha, x, incX, ap ) C, Z + _her2 ( uplo, n, alpha, x, incX, y, incY, a, lda ) C, Z + _hpr2 ( uplo, n, alpha, x, incX, y, incY, ap ) C, Z + _syr ( uplo, n, alpha, x, incX, a, lda ) S, D + _spr ( uplo, n, alpha, x, incX, ap ) S, D + _syr2 ( uplo, n, alpha, x, incX, y, incY, a, lda ) S, D + _spr2 ( uplo, n, alpha, x, incX, y, incY, ap ) S, D + +Level 3 BLAS + + options dim scalar matrix matrix scalar matrix prefixes + + _gemm ( transA, transB, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc ) S, D, C, Z + _symm ( side, uplo, m, n, alpha, a, lda, b, ldb, beta, c, ldc ) S, D, C, Z + _hemm ( side, uplo, m, n, alpha, a, lda, b, ldb, beta, c, ldc ) C, Z + _syrk ( uplo, trans, n, k, alpha, a, lda, beta, c, ldc ) S, D, C, Z + _herk ( uplo, trans, n, k, alpha, a, lda, beta, c, ldc ) C, Z + _syr2k( uplo, trans, n, k, alpha, a, lda, b, ldb, beta, c, ldc ) S, D, C, Z + _her2k( uplo, trans, n, k, alpha, a, lda, b, ldb, beta, c, ldc ) C, Z + _trmm ( side, uplo, transA, diag, m, n, alpha, a, lda, b, ldb ) S, D, C, Z + _trsm ( side, uplo, transA, diag, m, n, alpha, a, lda, b, ldb ) S, D, C, Z + +Meaning of prefixes + + S - float32 C - complex64 + D - float64 Z - complex128 + +Matrix types + + GE - GEneral GB - General Band + SY - SYmmetric SB - Symmetric Band SP - Symmetric Packed + HE - HErmitian HB - Hermitian Band HP - Hermitian Packed + TR - TRiangular TB - Triangular Band TP - Triangular Packed + +Options + + trans = NoTrans, Trans, ConjTrans + uplo = Upper, Lower + diag = Nonunit, Unit + side = Left, Right (A or op(A) on the left, or A or op(A) on the right) + +For real matrices, Trans and ConjTrans have the same meaning. +For Hermitian matrices, trans = Trans is not allowed. +For complex symmetric matrices, trans = ConjTrans is not allowed. +*/ +package blas // import "gonum.org/v1/gonum/blas" diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/dgemm.go b/vendor/gonum.org/v1/gonum/blas/gonum/dgemm.go new file mode 100644 index 0000000000..ec3fcc61cb --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/dgemm.go @@ -0,0 +1,314 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "runtime" + "sync" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f64" +) + +// Dgemm performs one of the matrix-matrix operations +// C = alpha * A * B + beta * C +// C = alpha * A^T * B + beta * C +// C = alpha * A * B^T + beta * C +// C = alpha * A^T * B^T + beta * C +// where A is an m×k or k×m dense matrix, B is an n×k or k×n dense matrix, C is +// an m×n matrix, and alpha and beta are scalars. tA and tB specify whether A or +// B are transposed. +func (Implementation) Dgemm(tA, tB blas.Transpose, m, n, k int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) { + switch tA { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch tB { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + aTrans := tA == blas.Trans || tA == blas.ConjTrans + if aTrans { + if lda < max(1, m) { + panic(badLdA) + } + } else { + if lda < max(1, k) { + panic(badLdA) + } + } + bTrans := tB == blas.Trans || tB == blas.ConjTrans + if bTrans { + if ldb < max(1, k) { + panic(badLdB) + } + } else { + if ldb < max(1, n) { + panic(badLdB) + } + } + if ldc < max(1, n) { + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if aTrans { + if len(a) < (k-1)*lda+m { + panic(shortA) + } + } else { + if len(a) < (m-1)*lda+k { + panic(shortA) + } + } + if bTrans { + if len(b) < (n-1)*ldb+k { + panic(shortB) + } + } else { + if len(b) < (k-1)*ldb+n { + panic(shortB) + } + } + if len(c) < (m-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + // scale c + if beta != 1 { + if beta == 0 { + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + } else { + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := range ctmp { + ctmp[j] *= beta + } + } + } + } + + dgemmParallel(aTrans, bTrans, m, n, k, a, lda, b, ldb, c, ldc, alpha) +} + +func dgemmParallel(aTrans, bTrans bool, m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { + // dgemmParallel computes a parallel matrix multiplication by partitioning + // a and b into sub-blocks, and updating c with the multiplication of the sub-block + // In all cases, + // A = [ A_11 A_12 ... A_1j + // A_21 A_22 ... A_2j + // ... + // A_i1 A_i2 ... A_ij] + // + // and same for B. All of the submatrix sizes are blockSize×blockSize except + // at the edges. + // + // In all cases, there is one dimension for each matrix along which + // C must be updated sequentially. + // Cij = \sum_k Aik Bki, (A * B) + // Cij = \sum_k Aki Bkj, (A^T * B) + // Cij = \sum_k Aik Bjk, (A * B^T) + // Cij = \sum_k Aki Bjk, (A^T * B^T) + // + // This code computes one {i, j} block sequentially along the k dimension, + // and computes all of the {i, j} blocks concurrently. This + // partitioning allows Cij to be updated in-place without race-conditions. + // Instead of launching a goroutine for each possible concurrent computation, + // a number of worker goroutines are created and channels are used to pass + // available and completed cases. + // + // http://alexkr.com/docs/matrixmult.pdf is a good reference on matrix-matrix + // multiplies, though this code does not copy matrices to attempt to eliminate + // cache misses. + + maxKLen := k + parBlocks := blocks(m, blockSize) * blocks(n, blockSize) + if parBlocks < minParBlock { + // The matrix multiplication is small in the dimensions where it can be + // computed concurrently. Just do it in serial. + dgemmSerial(aTrans, bTrans, m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + } + + nWorkers := runtime.GOMAXPROCS(0) + if parBlocks < nWorkers { + nWorkers = parBlocks + } + // There is a tradeoff between the workers having to wait for work + // and a large buffer making operations slow. + buf := buffMul * nWorkers + if buf > parBlocks { + buf = parBlocks + } + + sendChan := make(chan subMul, buf) + + // Launch workers. A worker receives an {i, j} submatrix of c, and computes + // A_ik B_ki (or the transposed version) storing the result in c_ij. When the + // channel is finally closed, it signals to the waitgroup that it has finished + // computing. + var wg sync.WaitGroup + for i := 0; i < nWorkers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for sub := range sendChan { + i := sub.i + j := sub.j + leni := blockSize + if i+leni > m { + leni = m - i + } + lenj := blockSize + if j+lenj > n { + lenj = n - j + } + + cSub := sliceView64(c, ldc, i, j, leni, lenj) + + // Compute A_ik B_kj for all k + for k := 0; k < maxKLen; k += blockSize { + lenk := blockSize + if k+lenk > maxKLen { + lenk = maxKLen - k + } + var aSub, bSub []float64 + if aTrans { + aSub = sliceView64(a, lda, k, i, lenk, leni) + } else { + aSub = sliceView64(a, lda, i, k, leni, lenk) + } + if bTrans { + bSub = sliceView64(b, ldb, j, k, lenj, lenk) + } else { + bSub = sliceView64(b, ldb, k, j, lenk, lenj) + } + dgemmSerial(aTrans, bTrans, leni, lenj, lenk, aSub, lda, bSub, ldb, cSub, ldc, alpha) + } + } + }() + } + + // Send out all of the {i, j} subblocks for computation. + for i := 0; i < m; i += blockSize { + for j := 0; j < n; j += blockSize { + sendChan <- subMul{ + i: i, + j: j, + } + } + } + close(sendChan) + wg.Wait() +} + +// dgemmSerial is serial matrix multiply +func dgemmSerial(aTrans, bTrans bool, m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { + switch { + case !aTrans && !bTrans: + dgemmSerialNotNot(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + case aTrans && !bTrans: + dgemmSerialTransNot(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + case !aTrans && bTrans: + dgemmSerialNotTrans(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + case aTrans && bTrans: + dgemmSerialTransTrans(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + default: + panic("unreachable") + } +} + +// dgemmSerial where neither a nor b are transposed +func dgemmSerialNotNot(m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for l, v := range a[i*lda : i*lda+k] { + tmp := alpha * v + if tmp != 0 { + f64.AxpyUnitary(tmp, b[l*ldb:l*ldb+n], ctmp) + } + } + } +} + +// dgemmSerial where neither a is transposed and b is not +func dgemmSerialTransNot(m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for l := 0; l < k; l++ { + btmp := b[l*ldb : l*ldb+n] + for i, v := range a[l*lda : l*lda+m] { + tmp := alpha * v + if tmp != 0 { + ctmp := c[i*ldc : i*ldc+n] + f64.AxpyUnitary(tmp, btmp, ctmp) + } + } + } +} + +// dgemmSerial where neither a is not transposed and b is +func dgemmSerialNotTrans(m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for i := 0; i < m; i++ { + atmp := a[i*lda : i*lda+k] + ctmp := c[i*ldc : i*ldc+n] + for j := 0; j < n; j++ { + ctmp[j] += alpha * f64.DotUnitary(atmp, b[j*ldb:j*ldb+k]) + } + } +} + +// dgemmSerial where both are transposed +func dgemmSerialTransTrans(m, n, k int, a []float64, lda int, b []float64, ldb int, c []float64, ldc int, alpha float64) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for l := 0; l < k; l++ { + for i, v := range a[l*lda : l*lda+m] { + tmp := alpha * v + if tmp != 0 { + ctmp := c[i*ldc : i*ldc+n] + f64.AxpyInc(tmp, b[l:], ctmp, uintptr(n), uintptr(ldb), 1, 0, 0) + } + } + } +} + +func sliceView64(a []float64, lda, i, j, r, c int) []float64 { + return a[i*lda+j : (i+r-1)*lda+j+c] +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/doc.go b/vendor/gonum.org/v1/gonum/blas/gonum/doc.go new file mode 100644 index 0000000000..3f4b6c1d05 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/doc.go @@ -0,0 +1,88 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Ensure changes made to blas/native are reflected in blas/cgo where relevant. + +/* +Package gonum is a Go implementation of the BLAS API. This implementation +panics when the input arguments are invalid as per the standard, for example +if a vector increment is zero. Note that the treatment of NaN values +is not specified, and differs among the BLAS implementations. +gonum.org/v1/gonum/blas/blas64 provides helpful wrapper functions to the BLAS +interface. The rest of this text describes the layout of the data for the input types. + +Note that in the function documentation, x[i] refers to the i^th element +of the vector, which will be different from the i^th element of the slice if +incX != 1. + +See http://www.netlib.org/lapack/explore-html/d4/de1/_l_i_c_e_n_s_e_source.html +for more license information. + +Vector arguments are effectively strided slices. They have two input arguments, +a number of elements, n, and an increment, incX. The increment specifies the +distance between elements of the vector. The actual Go slice may be longer +than necessary. +The increment may be positive or negative, except in functions with only +a single vector argument where the increment may only be positive. If the increment +is negative, s[0] is the last element in the slice. Note that this is not the same +as counting backward from the end of the slice, as len(s) may be longer than +necessary. So, for example, if n = 5 and incX = 3, the elements of s are + [0 * * 1 * * 2 * * 3 * * 4 * * * ...] +where ∗ elements are never accessed. If incX = -3, the same elements are +accessed, just in reverse order (4, 3, 2, 1, 0). + +Dense matrices are specified by a number of rows, a number of columns, and a stride. +The stride specifies the number of entries in the slice between the first element +of successive rows. The stride must be at least as large as the number of columns +but may be longer. + [a00 ... a0n a0* ... a1stride-1 a21 ... amn am* ... amstride-1] +Thus, dense[i*ld + j] refers to the {i, j}th element of the matrix. + +Symmetric and triangular matrices (non-packed) are stored identically to Dense, +except that only elements in one triangle of the matrix are accessed. + +Packed symmetric and packed triangular matrices are laid out with the entries +condensed such that all of the unreferenced elements are removed. So, the upper triangular +matrix + [ + 1 2 3 + 0 4 5 + 0 0 6 + ] +and the lower-triangular matrix + [ + 1 0 0 + 2 3 0 + 4 5 6 + ] +will both be compacted as [1 2 3 4 5 6]. The (i, j) element of the original +dense matrix can be found at element i*n - (i-1)*i/2 + j for upper triangular, +and at element i * (i+1) /2 + j for lower triangular. + +Banded matrices are laid out in a compact format, constructed by removing the +zeros in the rows and aligning the diagonals. For example, the matrix + [ + 1 2 3 0 0 0 + 4 5 6 7 0 0 + 0 8 9 10 11 0 + 0 0 12 13 14 15 + 0 0 0 16 17 18 + 0 0 0 0 19 20 + ] + +implicitly becomes (∗ entries are never accessed) + [ + * 1 2 3 + 4 5 6 7 + 8 9 10 11 + 12 13 14 15 + 16 17 18 * + 19 20 * * + ] +which is given to the BLAS routine as [∗ 1 2 3 4 ...]. + +See http://www.crest.iu.edu/research/mtl/reference/html/banded.html +for more information +*/ +package gonum // import "gonum.org/v1/gonum/blas/gonum" diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/errors.go b/vendor/gonum.org/v1/gonum/blas/gonum/errors.go new file mode 100644 index 0000000000..e98575d0fa --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/errors.go @@ -0,0 +1,35 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +// Panic strings used during parameter checks. +// This list is duplicated in netlib/blas/netlib. Keep in sync. +const ( + zeroIncX = "blas: zero x index increment" + zeroIncY = "blas: zero y index increment" + + mLT0 = "blas: m < 0" + nLT0 = "blas: n < 0" + kLT0 = "blas: k < 0" + kLLT0 = "blas: kL < 0" + kULT0 = "blas: kU < 0" + + badUplo = "blas: illegal triangle" + badTranspose = "blas: illegal transpose" + badDiag = "blas: illegal diagonal" + badSide = "blas: illegal side" + badFlag = "blas: illegal rotm flag" + + badLdA = "blas: bad leading dimension of A" + badLdB = "blas: bad leading dimension of B" + badLdC = "blas: bad leading dimension of C" + + shortX = "blas: insufficient length of x" + shortY = "blas: insufficient length of y" + shortAP = "blas: insufficient length of ap" + shortA = "blas: insufficient length of a" + shortB = "blas: insufficient length of b" + shortC = "blas: insufficient length of c" +) diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/gemv.go b/vendor/gonum.org/v1/gonum/blas/gonum/gemv.go new file mode 100644 index 0000000000..9b9a1beb09 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/gemv.go @@ -0,0 +1,190 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f32" + "gonum.org/v1/gonum/internal/asm/f64" +) + +// TODO(Kunde21): Merge these methods back into level2double/level2single when Sgemv assembly kernels are merged into f32. + +// Dgemv computes +// y = alpha * A * x + beta * y if tA = blas.NoTrans +// y = alpha * A^T * x + beta * y if tA = blas.Trans or blas.ConjTrans +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func (Implementation) Dgemv(tA blas.Transpose, m, n int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + // Set up indexes + lenX := m + lenY := n + if tA == blas.NoTrans { + lenX = n + lenY = m + } + + // Quick return if possible + if m == 0 || n == 0 { + return + } + + if (incX > 0 && (lenX-1)*incX >= len(x)) || (incX < 0 && (1-lenX)*incX >= len(x)) { + panic(shortX) + } + if (incY > 0 && (lenY-1)*incY >= len(y)) || (incY < 0 && (1-lenY)*incY >= len(y)) { + panic(shortY) + } + if len(a) < lda*(m-1)+n { + panic(shortA) + } + + // Quick return if possible + if alpha == 0 && beta == 1 { + return + } + + if alpha == 0 { + // First form y = beta * y + if incY > 0 { + Implementation{}.Dscal(lenY, beta, y, incY) + } else { + Implementation{}.Dscal(lenY, beta, y, -incY) + } + return + } + + // Form y = alpha * A * x + y + if tA == blas.NoTrans { + f64.GemvN(uintptr(m), uintptr(n), alpha, a, uintptr(lda), x, uintptr(incX), beta, y, uintptr(incY)) + return + } + // Cases where a is transposed. + f64.GemvT(uintptr(m), uintptr(n), alpha, a, uintptr(lda), x, uintptr(incX), beta, y, uintptr(incY)) +} + +// Sgemv computes +// y = alpha * A * x + beta * y if tA = blas.NoTrans +// y = alpha * A^T * x + beta * y if tA = blas.Trans or blas.ConjTrans +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sgemv(tA blas.Transpose, m, n int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) { + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // Set up indexes + lenX := m + lenY := n + if tA == blas.NoTrans { + lenX = n + lenY = m + } + if (incX > 0 && (lenX-1)*incX >= len(x)) || (incX < 0 && (1-lenX)*incX >= len(x)) { + panic(shortX) + } + if (incY > 0 && (lenY-1)*incY >= len(y)) || (incY < 0 && (1-lenY)*incY >= len(y)) { + panic(shortY) + } + if len(a) < lda*(m-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // First form y = beta * y + if incY > 0 { + Implementation{}.Sscal(lenY, beta, y, incY) + } else { + Implementation{}.Sscal(lenY, beta, y, -incY) + } + + if alpha == 0 { + return + } + + var kx, ky int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if incY < 0 { + ky = -(lenY - 1) * incY + } + + // Form y = alpha * A * x + y + if tA == blas.NoTrans { + if incX == 1 && incY == 1 { + for i := 0; i < m; i++ { + y[i] += alpha * f32.DotUnitary(a[lda*i:lda*i+n], x[:n]) + } + return + } + iy := ky + for i := 0; i < m; i++ { + y[iy] += alpha * f32.DotInc(x, a[lda*i:lda*i+n], uintptr(n), uintptr(incX), 1, uintptr(kx), 0) + iy += incY + } + return + } + // Cases where a is transposed. + if incX == 1 && incY == 1 { + for i := 0; i < m; i++ { + tmp := alpha * x[i] + if tmp != 0 { + f32.AxpyUnitaryTo(y, tmp, a[lda*i:lda*i+n], y[:n]) + } + } + return + } + ix := kx + for i := 0; i < m; i++ { + tmp := alpha * x[ix] + if tmp != 0 { + f32.AxpyInc(tmp, a[lda*i:lda*i+n], y, uintptr(n), 1, uintptr(incY), 0, uintptr(ky)) + } + ix += incX + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/gonum.go b/vendor/gonum.org/v1/gonum/blas/gonum/gonum.go new file mode 100644 index 0000000000..8ab8d43e18 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/gonum.go @@ -0,0 +1,58 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate ./single_precision.bash + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/internal/math32" +) + +type Implementation struct{} + +// [SD]gemm behavior constants. These are kept here to keep them out of the +// way during single precision code genration. +const ( + blockSize = 64 // b x b matrix + minParBlock = 4 // minimum number of blocks needed to go parallel + buffMul = 4 // how big is the buffer relative to the number of workers +) + +// subMul is a common type shared by [SD]gemm. +type subMul struct { + i, j int // index of block +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func min(a, b int) int { + if a > b { + return b + } + return a +} + +// blocks returns the number of divisions of the dimension length with the given +// block size. +func blocks(dim, bsize int) int { + return (dim + bsize - 1) / bsize +} + +// dcabs1 returns |real(z)|+|imag(z)|. +func dcabs1(z complex128) float64 { + return math.Abs(real(z)) + math.Abs(imag(z)) +} + +// scabs1 returns |real(z)|+|imag(z)|. +func scabs1(z complex64) float32 { + return math32.Abs(real(z)) + math32.Abs(imag(z)) +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx128.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx128.go new file mode 100644 index 0000000000..e37bf44dd3 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx128.go @@ -0,0 +1,445 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/c128" +) + +var _ blas.Complex128Level1 = Implementation{} + +// Dzasum returns the sum of the absolute values of the elements of x +// \sum_i |Re(x[i])| + |Im(x[i])| +// Dzasum returns 0 if incX is negative. +func (Implementation) Dzasum(n int, x []complex128, incX int) float64 { + if n < 0 { + panic(nLT0) + } + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + var sum float64 + if incX == 1 { + if len(x) < n { + panic(shortX) + } + for _, v := range x[:n] { + sum += dcabs1(v) + } + return sum + } + if (n-1)*incX >= len(x) { + panic(shortX) + } + for i := 0; i < n; i++ { + v := x[i*incX] + sum += dcabs1(v) + } + return sum +} + +// Dznrm2 computes the Euclidean norm of the complex vector x, +// ‖x‖_2 = sqrt(\sum_i x[i] * conj(x[i])). +// This function returns 0 if incX is negative. +func (Implementation) Dznrm2(n int, x []complex128, incX int) float64 { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + if n < 1 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if (n-1)*incX >= len(x) { + panic(shortX) + } + var ( + scale float64 + ssq float64 = 1 + ) + if incX == 1 { + for _, v := range x[:n] { + re, im := math.Abs(real(v)), math.Abs(imag(v)) + if re != 0 { + if re > scale { + ssq = 1 + ssq*(scale/re)*(scale/re) + scale = re + } else { + ssq += (re / scale) * (re / scale) + } + } + if im != 0 { + if im > scale { + ssq = 1 + ssq*(scale/im)*(scale/im) + scale = im + } else { + ssq += (im / scale) * (im / scale) + } + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(ssq) + } + for ix := 0; ix < n*incX; ix += incX { + re, im := math.Abs(real(x[ix])), math.Abs(imag(x[ix])) + if re != 0 { + if re > scale { + ssq = 1 + ssq*(scale/re)*(scale/re) + scale = re + } else { + ssq += (re / scale) * (re / scale) + } + } + if im != 0 { + if im > scale { + ssq = 1 + ssq*(scale/im)*(scale/im) + scale = im + } else { + ssq += (im / scale) * (im / scale) + } + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(ssq) +} + +// Izamax returns the index of the first element of x having largest |Re(·)|+|Im(·)|. +// Izamax returns -1 if n is 0 or incX is negative. +func (Implementation) Izamax(n int, x []complex128, incX int) int { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + // Return invalid index. + return -1 + } + if n < 1 { + if n == 0 { + // Return invalid index. + return -1 + } + panic(nLT0) + } + if len(x) <= (n-1)*incX { + panic(shortX) + } + idx := 0 + max := dcabs1(x[0]) + if incX == 1 { + for i, v := range x[1:n] { + absV := dcabs1(v) + if absV > max { + max = absV + idx = i + 1 + } + } + return idx + } + ix := incX + for i := 1; i < n; i++ { + absV := dcabs1(x[ix]) + if absV > max { + max = absV + idx = i + } + ix += incX + } + return idx +} + +// Zaxpy adds alpha times x to y: +// y[i] += alpha * x[i] for all i +func (Implementation) Zaxpy(n int, alpha complex128, x []complex128, incX int, y []complex128, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(shortX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(shortY) + } + if alpha == 0 { + return + } + if incX == 1 && incY == 1 { + c128.AxpyUnitary(alpha, x[:n], y[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (1 - n) * incX + } + if incY < 0 { + iy = (1 - n) * incY + } + c128.AxpyInc(alpha, x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Zcopy copies the vector x to vector y. +func (Implementation) Zcopy(n int, x []complex128, incX int, y []complex128, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(shortX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(shortY) + } + if incX == 1 && incY == 1 { + copy(y[:n], x[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + y[iy] = x[ix] + ix += incX + iy += incY + } +} + +// Zdotc computes the dot product +// x^H · y +// of two complex vectors x and y. +func (Implementation) Zdotc(n int, x []complex128, incX int, y []complex128, incY int) complex128 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(shortX) + } + if len(y) < n { + panic(shortY) + } + return c128.DotcUnitary(x[:n], y[:n]) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || (n-1)*incX >= len(x) { + panic(shortX) + } + if iy >= len(y) || (n-1)*incY >= len(y) { + panic(shortY) + } + return c128.DotcInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Zdotu computes the dot product +// x^T · y +// of two complex vectors x and y. +func (Implementation) Zdotu(n int, x []complex128, incX int, y []complex128, incY int) complex128 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(shortX) + } + if len(y) < n { + panic(shortY) + } + return c128.DotuUnitary(x[:n], y[:n]) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || (n-1)*incX >= len(x) { + panic(shortX) + } + if iy >= len(y) || (n-1)*incY >= len(y) { + panic(shortY) + } + return c128.DotuInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Zdscal scales the vector x by a real scalar alpha. +// Zdscal has no effect if incX < 0. +func (Implementation) Zdscal(n int, alpha float64, x []complex128, incX int) { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return + } + if (n-1)*incX >= len(x) { + panic(shortX) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if alpha == 0 { + if incX == 1 { + x = x[:n] + for i := range x { + x[i] = 0 + } + return + } + for ix := 0; ix < n*incX; ix += incX { + x[ix] = 0 + } + return + } + if incX == 1 { + x = x[:n] + for i, v := range x { + x[i] = complex(alpha*real(v), alpha*imag(v)) + } + return + } + for ix := 0; ix < n*incX; ix += incX { + v := x[ix] + x[ix] = complex(alpha*real(v), alpha*imag(v)) + } +} + +// Zscal scales the vector x by a complex scalar alpha. +// Zscal has no effect if incX < 0. +func (Implementation) Zscal(n int, alpha complex128, x []complex128, incX int) { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return + } + if (n-1)*incX >= len(x) { + panic(shortX) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if alpha == 0 { + if incX == 1 { + x = x[:n] + for i := range x { + x[i] = 0 + } + return + } + for ix := 0; ix < n*incX; ix += incX { + x[ix] = 0 + } + return + } + if incX == 1 { + c128.ScalUnitary(alpha, x[:n]) + return + } + c128.ScalInc(alpha, x, uintptr(n), uintptr(incX)) +} + +// Zswap exchanges the elements of two complex vectors x and y. +func (Implementation) Zswap(n int, x []complex128, incX int, y []complex128, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(shortX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(shortY) + } + if incX == 1 && incY == 1 { + x = x[:n] + for i, v := range x { + x[i], y[i] = y[i], v + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + x[ix], y[iy] = y[iy], x[ix] + ix += incX + iy += incY + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx64.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx64.go new file mode 100644 index 0000000000..ba192ea595 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level1cmplx64.go @@ -0,0 +1,467 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + math "gonum.org/v1/gonum/internal/math32" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/c64" +) + +var _ blas.Complex64Level1 = Implementation{} + +// Scasum returns the sum of the absolute values of the elements of x +// \sum_i |Re(x[i])| + |Im(x[i])| +// Scasum returns 0 if incX is negative. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Scasum(n int, x []complex64, incX int) float32 { + if n < 0 { + panic(nLT0) + } + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + var sum float32 + if incX == 1 { + if len(x) < n { + panic(shortX) + } + for _, v := range x[:n] { + sum += scabs1(v) + } + return sum + } + if (n-1)*incX >= len(x) { + panic(shortX) + } + for i := 0; i < n; i++ { + v := x[i*incX] + sum += scabs1(v) + } + return sum +} + +// Scnrm2 computes the Euclidean norm of the complex vector x, +// ‖x‖_2 = sqrt(\sum_i x[i] * conj(x[i])). +// This function returns 0 if incX is negative. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Scnrm2(n int, x []complex64, incX int) float32 { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + if n < 1 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if (n-1)*incX >= len(x) { + panic(shortX) + } + var ( + scale float32 + ssq float32 = 1 + ) + if incX == 1 { + for _, v := range x[:n] { + re, im := math.Abs(real(v)), math.Abs(imag(v)) + if re != 0 { + if re > scale { + ssq = 1 + ssq*(scale/re)*(scale/re) + scale = re + } else { + ssq += (re / scale) * (re / scale) + } + } + if im != 0 { + if im > scale { + ssq = 1 + ssq*(scale/im)*(scale/im) + scale = im + } else { + ssq += (im / scale) * (im / scale) + } + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(ssq) + } + for ix := 0; ix < n*incX; ix += incX { + re, im := math.Abs(real(x[ix])), math.Abs(imag(x[ix])) + if re != 0 { + if re > scale { + ssq = 1 + ssq*(scale/re)*(scale/re) + scale = re + } else { + ssq += (re / scale) * (re / scale) + } + } + if im != 0 { + if im > scale { + ssq = 1 + ssq*(scale/im)*(scale/im) + scale = im + } else { + ssq += (im / scale) * (im / scale) + } + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(ssq) +} + +// Icamax returns the index of the first element of x having largest |Re(·)|+|Im(·)|. +// Icamax returns -1 if n is 0 or incX is negative. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Icamax(n int, x []complex64, incX int) int { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + // Return invalid index. + return -1 + } + if n < 1 { + if n == 0 { + // Return invalid index. + return -1 + } + panic(nLT0) + } + if len(x) <= (n-1)*incX { + panic(shortX) + } + idx := 0 + max := scabs1(x[0]) + if incX == 1 { + for i, v := range x[1:n] { + absV := scabs1(v) + if absV > max { + max = absV + idx = i + 1 + } + } + return idx + } + ix := incX + for i := 1; i < n; i++ { + absV := scabs1(x[ix]) + if absV > max { + max = absV + idx = i + } + ix += incX + } + return idx +} + +// Caxpy adds alpha times x to y: +// y[i] += alpha * x[i] for all i +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Caxpy(n int, alpha complex64, x []complex64, incX int, y []complex64, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(shortX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(shortY) + } + if alpha == 0 { + return + } + if incX == 1 && incY == 1 { + c64.AxpyUnitary(alpha, x[:n], y[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (1 - n) * incX + } + if incY < 0 { + iy = (1 - n) * incY + } + c64.AxpyInc(alpha, x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Ccopy copies the vector x to vector y. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Ccopy(n int, x []complex64, incX int, y []complex64, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(shortX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(shortY) + } + if incX == 1 && incY == 1 { + copy(y[:n], x[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + y[iy] = x[ix] + ix += incX + iy += incY + } +} + +// Cdotc computes the dot product +// x^H · y +// of two complex vectors x and y. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cdotc(n int, x []complex64, incX int, y []complex64, incY int) complex64 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(shortX) + } + if len(y) < n { + panic(shortY) + } + return c64.DotcUnitary(x[:n], y[:n]) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || (n-1)*incX >= len(x) { + panic(shortX) + } + if iy >= len(y) || (n-1)*incY >= len(y) { + panic(shortY) + } + return c64.DotcInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Cdotu computes the dot product +// x^T · y +// of two complex vectors x and y. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cdotu(n int, x []complex64, incX int, y []complex64, incY int) complex64 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(shortX) + } + if len(y) < n { + panic(shortY) + } + return c64.DotuUnitary(x[:n], y[:n]) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || (n-1)*incX >= len(x) { + panic(shortX) + } + if iy >= len(y) || (n-1)*incY >= len(y) { + panic(shortY) + } + return c64.DotuInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Csscal scales the vector x by a real scalar alpha. +// Csscal has no effect if incX < 0. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Csscal(n int, alpha float32, x []complex64, incX int) { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return + } + if (n-1)*incX >= len(x) { + panic(shortX) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if alpha == 0 { + if incX == 1 { + x = x[:n] + for i := range x { + x[i] = 0 + } + return + } + for ix := 0; ix < n*incX; ix += incX { + x[ix] = 0 + } + return + } + if incX == 1 { + x = x[:n] + for i, v := range x { + x[i] = complex(alpha*real(v), alpha*imag(v)) + } + return + } + for ix := 0; ix < n*incX; ix += incX { + v := x[ix] + x[ix] = complex(alpha*real(v), alpha*imag(v)) + } +} + +// Cscal scales the vector x by a complex scalar alpha. +// Cscal has no effect if incX < 0. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cscal(n int, alpha complex64, x []complex64, incX int) { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return + } + if (n-1)*incX >= len(x) { + panic(shortX) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if alpha == 0 { + if incX == 1 { + x = x[:n] + for i := range x { + x[i] = 0 + } + return + } + for ix := 0; ix < n*incX; ix += incX { + x[ix] = 0 + } + return + } + if incX == 1 { + c64.ScalUnitary(alpha, x[:n]) + return + } + c64.ScalInc(alpha, x, uintptr(n), uintptr(incX)) +} + +// Cswap exchanges the elements of two complex vectors x and y. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cswap(n int, x []complex64, incX int, y []complex64, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { + panic(shortX) + } + if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { + panic(shortY) + } + if incX == 1 && incY == 1 { + x = x[:n] + for i, v := range x { + x[i], y[i] = y[i], v + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + x[ix], y[iy] = y[iy], x[ix] + ix += incX + iy += incY + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1float32.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1float32.go new file mode 100644 index 0000000000..ee82083a6b --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level1float32.go @@ -0,0 +1,644 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + math "gonum.org/v1/gonum/internal/math32" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f32" +) + +var _ blas.Float32Level1 = Implementation{} + +// Snrm2 computes the Euclidean norm of a vector, +// sqrt(\sum_i x[i] * x[i]). +// This function returns 0 if incX is negative. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Snrm2(n int, x []float32, incX int) float32 { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + if len(x) <= (n-1)*incX { + panic(shortX) + } + if n < 2 { + if n == 1 { + return math.Abs(x[0]) + } + if n == 0 { + return 0 + } + panic(nLT0) + } + var ( + scale float32 = 0 + sumSquares float32 = 1 + ) + if incX == 1 { + x = x[:n] + for _, v := range x { + if v == 0 { + continue + } + absxi := math.Abs(v) + if math.IsNaN(absxi) { + return math.NaN() + } + if scale < absxi { + sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) + scale = absxi + } else { + sumSquares = sumSquares + (absxi/scale)*(absxi/scale) + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(sumSquares) + } + for ix := 0; ix < n*incX; ix += incX { + val := x[ix] + if val == 0 { + continue + } + absxi := math.Abs(val) + if math.IsNaN(absxi) { + return math.NaN() + } + if scale < absxi { + sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) + scale = absxi + } else { + sumSquares = sumSquares + (absxi/scale)*(absxi/scale) + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(sumSquares) +} + +// Sasum computes the sum of the absolute values of the elements of x. +// \sum_i |x[i]| +// Sasum returns 0 if incX is negative. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sasum(n int, x []float32, incX int) float32 { + var sum float32 + if n < 0 { + panic(nLT0) + } + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + if len(x) <= (n-1)*incX { + panic(shortX) + } + if incX == 1 { + x = x[:n] + for _, v := range x { + sum += math.Abs(v) + } + return sum + } + for i := 0; i < n; i++ { + sum += math.Abs(x[i*incX]) + } + return sum +} + +// Isamax returns the index of an element of x with the largest absolute value. +// If there are multiple such indices the earliest is returned. +// Isamax returns -1 if n == 0. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Isamax(n int, x []float32, incX int) int { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return -1 + } + if len(x) <= (n-1)*incX { + panic(shortX) + } + if n < 2 { + if n == 1 { + return 0 + } + if n == 0 { + return -1 // Netlib returns invalid index when n == 0. + } + panic(nLT0) + } + idx := 0 + max := math.Abs(x[0]) + if incX == 1 { + for i, v := range x[:n] { + absV := math.Abs(v) + if absV > max { + max = absV + idx = i + } + } + return idx + } + ix := incX + for i := 1; i < n; i++ { + v := x[ix] + absV := math.Abs(v) + if absV > max { + max = absV + idx = i + } + ix += incX + } + return idx +} + +// Sswap exchanges the elements of two vectors. +// x[i], y[i] = y[i], x[i] for all i +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sswap(n int, x []float32, incX int, y []float32, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if incX == 1 && incY == 1 { + x = x[:n] + for i, v := range x { + x[i], y[i] = y[i], v + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + x[ix], y[iy] = y[iy], x[ix] + ix += incX + iy += incY + } +} + +// Scopy copies the elements of x into the elements of y. +// y[i] = x[i] for all i +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Scopy(n int, x []float32, incX int, y []float32, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if incX == 1 && incY == 1 { + copy(y[:n], x[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + y[iy] = x[ix] + ix += incX + iy += incY + } +} + +// Saxpy adds alpha times x to y +// y[i] += alpha * x[i] for all i +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Saxpy(n int, alpha float32, x []float32, incX int, y []float32, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if alpha == 0 { + return + } + if incX == 1 && incY == 1 { + f32.AxpyUnitary(alpha, x[:n], y[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + f32.AxpyInc(alpha, x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Srotg computes the plane rotation +// _ _ _ _ _ _ +// | c s | | a | | r | +// | -s c | * | b | = | 0 | +// ‾ ‾ ‾ ‾ ‾ ‾ +// where +// r = ±√(a^2 + b^2) +// c = a/r, the cosine of the plane rotation +// s = b/r, the sine of the plane rotation +// +// NOTE: There is a discrepancy between the reference implementation and the BLAS +// technical manual regarding the sign for r when a or b are zero. +// Srotg agrees with the definition in the manual and other +// common BLAS implementations. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Srotg(a, b float32) (c, s, r, z float32) { + if b == 0 && a == 0 { + return 1, 0, a, 0 + } + absA := math.Abs(a) + absB := math.Abs(b) + aGTb := absA > absB + r = math.Hypot(a, b) + if aGTb { + r = math.Copysign(r, a) + } else { + r = math.Copysign(r, b) + } + c = a / r + s = b / r + if aGTb { + z = s + } else if c != 0 { // r == 0 case handled above + z = 1 / c + } else { + z = 1 + } + return +} + +// Srotmg computes the modified Givens rotation. See +// http://www.netlib.org/lapack/explore-html/df/deb/drotmg_8f.html +// for more details. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Srotmg(d1, d2, x1, y1 float32) (p blas.SrotmParams, rd1, rd2, rx1 float32) { + // The implementation of Drotmg used here is taken from Hopkins 1997 + // Appendix A: https://doi.org/10.1145/289251.289253 + // with the exception of the gam constants below. + + const ( + gam = 4096.0 + gamsq = gam * gam + rgamsq = 1.0 / gamsq + ) + + if d1 < 0 { + p.Flag = blas.Rescaling // Error state. + return p, 0, 0, 0 + } + + if d2 == 0 || y1 == 0 { + p.Flag = blas.Identity + return p, d1, d2, x1 + } + + var h11, h12, h21, h22 float32 + if (d1 == 0 || x1 == 0) && d2 > 0 { + p.Flag = blas.Diagonal + h12 = 1 + h21 = -1 + x1 = y1 + d1, d2 = d2, d1 + } else { + p2 := d2 * y1 + p1 := d1 * x1 + q2 := p2 * y1 + q1 := p1 * x1 + if math.Abs(q1) > math.Abs(q2) { + p.Flag = blas.OffDiagonal + h11 = 1 + h22 = 1 + h21 = -y1 / x1 + h12 = p2 / p1 + u := 1 - h12*h21 + if u <= 0 { + p.Flag = blas.Rescaling // Error state. + return p, 0, 0, 0 + } + + d1 /= u + d2 /= u + x1 *= u + } else { + if q2 < 0 { + p.Flag = blas.Rescaling // Error state. + return p, 0, 0, 0 + } + + p.Flag = blas.Diagonal + h21 = -1 + h12 = 1 + h11 = p1 / p2 + h22 = x1 / y1 + u := 1 + h11*h22 + d1, d2 = d2/u, d1/u + x1 = y1 * u + } + } + + for d1 <= rgamsq && d1 != 0 { + p.Flag = blas.Rescaling + d1 = (d1 * gam) * gam + x1 /= gam + h11 /= gam + h12 /= gam + } + for d1 > gamsq { + p.Flag = blas.Rescaling + d1 = (d1 / gam) / gam + x1 *= gam + h11 *= gam + h12 *= gam + } + + for math.Abs(d2) <= rgamsq && d2 != 0 { + p.Flag = blas.Rescaling + d2 = (d2 * gam) * gam + h21 /= gam + h22 /= gam + } + for math.Abs(d2) > gamsq { + p.Flag = blas.Rescaling + d2 = (d2 / gam) / gam + h21 *= gam + h22 *= gam + } + + switch p.Flag { + case blas.Diagonal: + p.H = [4]float32{0: h11, 3: h22} + case blas.OffDiagonal: + p.H = [4]float32{1: h21, 2: h12} + case blas.Rescaling: + p.H = [4]float32{h11, h21, h12, h22} + default: + panic(badFlag) + } + + return p, d1, d2, x1 +} + +// Srot applies a plane transformation. +// x[i] = c * x[i] + s * y[i] +// y[i] = c * y[i] - s * x[i] +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Srot(n int, x []float32, incX int, y []float32, incY int, c float32, s float32) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = c*vx+s*vy, c*vy-s*vx + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = c*vx+s*vy, c*vy-s*vx + ix += incX + iy += incY + } +} + +// Srotm applies the modified Givens rotation to the 2×n matrix. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Srotm(n int, x []float32, incX int, y []float32, incY int, p blas.SrotmParams) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + if p.Flag == blas.Identity { + return + } + + switch p.Flag { + case blas.Rescaling: + h11 := p.H[0] + h12 := p.H[2] + h21 := p.H[1] + h22 := p.H[3] + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = vx*h11+vy*h12, vx*h21+vy*h22 + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = vx*h11+vy*h12, vx*h21+vy*h22 + ix += incX + iy += incY + } + case blas.OffDiagonal: + h12 := p.H[2] + h21 := p.H[1] + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = vx+vy*h12, vx*h21+vy + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = vx+vy*h12, vx*h21+vy + ix += incX + iy += incY + } + case blas.Diagonal: + h11 := p.H[0] + h22 := p.H[3] + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = vx*h11+vy, -vx+vy*h22 + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = vx*h11+vy, -vx+vy*h22 + ix += incX + iy += incY + } + } +} + +// Sscal scales x by alpha. +// x[i] *= alpha +// Sscal has no effect if incX < 0. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sscal(n int, alpha float32, x []float32, incX int) { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (n-1)*incX >= len(x) { + panic(shortX) + } + if alpha == 0 { + if incX == 1 { + x = x[:n] + for i := range x { + x[i] = 0 + } + return + } + for ix := 0; ix < n*incX; ix += incX { + x[ix] = 0 + } + return + } + if incX == 1 { + f32.ScalUnitary(alpha, x[:n]) + return + } + f32.ScalInc(alpha, x, uintptr(n), uintptr(incX)) +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_dsdot.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_dsdot.go new file mode 100644 index 0000000000..089e0d8f0d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_dsdot.go @@ -0,0 +1,53 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/internal/asm/f32" +) + +// Dsdot computes the dot product of the two vectors +// \sum_i x[i]*y[i] +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Dsdot(n int, x []float32, incX int, y []float32, incY int) float64 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(shortX) + } + if len(y) < n { + panic(shortY) + } + return f32.DdotUnitary(x[:n], y[:n]) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || ix+(n-1)*incX >= len(x) { + panic(shortX) + } + if iy >= len(y) || iy+(n-1)*incY >= len(y) { + panic(shortY) + } + return f32.DdotInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdot.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdot.go new file mode 100644 index 0000000000..41c3e79239 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdot.go @@ -0,0 +1,53 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/internal/asm/f32" +) + +// Sdot computes the dot product of the two vectors +// \sum_i x[i]*y[i] +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sdot(n int, x []float32, incX int, y []float32, incY int) float32 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(shortX) + } + if len(y) < n { + panic(shortY) + } + return f32.DotUnitary(x[:n], y[:n]) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || ix+(n-1)*incX >= len(x) { + panic(shortX) + } + if iy >= len(y) || iy+(n-1)*incY >= len(y) { + panic(shortY) + } + return f32.DotInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdsdot.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdsdot.go new file mode 100644 index 0000000000..69dd8aa1f0 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level1float32_sdsdot.go @@ -0,0 +1,53 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/internal/asm/f32" +) + +// Sdsdot computes the dot product of the two vectors plus a constant +// alpha + \sum_i x[i]*y[i] +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sdsdot(n int, alpha float32, x []float32, incX int, y []float32, incY int) float32 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(shortX) + } + if len(y) < n { + panic(shortY) + } + return alpha + float32(f32.DdotUnitary(x[:n], y[:n])) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || ix+(n-1)*incX >= len(x) { + panic(shortX) + } + if iy >= len(y) || iy+(n-1)*incY >= len(y) { + panic(shortY) + } + return alpha + float32(f32.DdotInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy))) +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1float64.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1float64.go new file mode 100644 index 0000000000..2e8ed543a3 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level1float64.go @@ -0,0 +1,620 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f64" +) + +var _ blas.Float64Level1 = Implementation{} + +// Dnrm2 computes the Euclidean norm of a vector, +// sqrt(\sum_i x[i] * x[i]). +// This function returns 0 if incX is negative. +func (Implementation) Dnrm2(n int, x []float64, incX int) float64 { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + if len(x) <= (n-1)*incX { + panic(shortX) + } + if n < 2 { + if n == 1 { + return math.Abs(x[0]) + } + if n == 0 { + return 0 + } + panic(nLT0) + } + var ( + scale float64 = 0 + sumSquares float64 = 1 + ) + if incX == 1 { + x = x[:n] + for _, v := range x { + if v == 0 { + continue + } + absxi := math.Abs(v) + if math.IsNaN(absxi) { + return math.NaN() + } + if scale < absxi { + sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) + scale = absxi + } else { + sumSquares = sumSquares + (absxi/scale)*(absxi/scale) + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(sumSquares) + } + for ix := 0; ix < n*incX; ix += incX { + val := x[ix] + if val == 0 { + continue + } + absxi := math.Abs(val) + if math.IsNaN(absxi) { + return math.NaN() + } + if scale < absxi { + sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) + scale = absxi + } else { + sumSquares = sumSquares + (absxi/scale)*(absxi/scale) + } + } + if math.IsInf(scale, 1) { + return math.Inf(1) + } + return scale * math.Sqrt(sumSquares) +} + +// Dasum computes the sum of the absolute values of the elements of x. +// \sum_i |x[i]| +// Dasum returns 0 if incX is negative. +func (Implementation) Dasum(n int, x []float64, incX int) float64 { + var sum float64 + if n < 0 { + panic(nLT0) + } + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return 0 + } + if len(x) <= (n-1)*incX { + panic(shortX) + } + if incX == 1 { + x = x[:n] + for _, v := range x { + sum += math.Abs(v) + } + return sum + } + for i := 0; i < n; i++ { + sum += math.Abs(x[i*incX]) + } + return sum +} + +// Idamax returns the index of an element of x with the largest absolute value. +// If there are multiple such indices the earliest is returned. +// Idamax returns -1 if n == 0. +func (Implementation) Idamax(n int, x []float64, incX int) int { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return -1 + } + if len(x) <= (n-1)*incX { + panic(shortX) + } + if n < 2 { + if n == 1 { + return 0 + } + if n == 0 { + return -1 // Netlib returns invalid index when n == 0. + } + panic(nLT0) + } + idx := 0 + max := math.Abs(x[0]) + if incX == 1 { + for i, v := range x[:n] { + absV := math.Abs(v) + if absV > max { + max = absV + idx = i + } + } + return idx + } + ix := incX + for i := 1; i < n; i++ { + v := x[ix] + absV := math.Abs(v) + if absV > max { + max = absV + idx = i + } + ix += incX + } + return idx +} + +// Dswap exchanges the elements of two vectors. +// x[i], y[i] = y[i], x[i] for all i +func (Implementation) Dswap(n int, x []float64, incX int, y []float64, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if incX == 1 && incY == 1 { + x = x[:n] + for i, v := range x { + x[i], y[i] = y[i], v + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + x[ix], y[iy] = y[iy], x[ix] + ix += incX + iy += incY + } +} + +// Dcopy copies the elements of x into the elements of y. +// y[i] = x[i] for all i +func (Implementation) Dcopy(n int, x []float64, incX int, y []float64, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if incX == 1 && incY == 1 { + copy(y[:n], x[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + y[iy] = x[ix] + ix += incX + iy += incY + } +} + +// Daxpy adds alpha times x to y +// y[i] += alpha * x[i] for all i +func (Implementation) Daxpy(n int, alpha float64, x []float64, incX int, y []float64, incY int) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if alpha == 0 { + return + } + if incX == 1 && incY == 1 { + f64.AxpyUnitary(alpha, x[:n], y[:n]) + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + f64.AxpyInc(alpha, x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} + +// Drotg computes the plane rotation +// _ _ _ _ _ _ +// | c s | | a | | r | +// | -s c | * | b | = | 0 | +// ‾ ‾ ‾ ‾ ‾ ‾ +// where +// r = ±√(a^2 + b^2) +// c = a/r, the cosine of the plane rotation +// s = b/r, the sine of the plane rotation +// +// NOTE: There is a discrepancy between the reference implementation and the BLAS +// technical manual regarding the sign for r when a or b are zero. +// Drotg agrees with the definition in the manual and other +// common BLAS implementations. +func (Implementation) Drotg(a, b float64) (c, s, r, z float64) { + if b == 0 && a == 0 { + return 1, 0, a, 0 + } + absA := math.Abs(a) + absB := math.Abs(b) + aGTb := absA > absB + r = math.Hypot(a, b) + if aGTb { + r = math.Copysign(r, a) + } else { + r = math.Copysign(r, b) + } + c = a / r + s = b / r + if aGTb { + z = s + } else if c != 0 { // r == 0 case handled above + z = 1 / c + } else { + z = 1 + } + return +} + +// Drotmg computes the modified Givens rotation. See +// http://www.netlib.org/lapack/explore-html/df/deb/drotmg_8f.html +// for more details. +func (Implementation) Drotmg(d1, d2, x1, y1 float64) (p blas.DrotmParams, rd1, rd2, rx1 float64) { + // The implementation of Drotmg used here is taken from Hopkins 1997 + // Appendix A: https://doi.org/10.1145/289251.289253 + // with the exception of the gam constants below. + + const ( + gam = 4096.0 + gamsq = gam * gam + rgamsq = 1.0 / gamsq + ) + + if d1 < 0 { + p.Flag = blas.Rescaling // Error state. + return p, 0, 0, 0 + } + + if d2 == 0 || y1 == 0 { + p.Flag = blas.Identity + return p, d1, d2, x1 + } + + var h11, h12, h21, h22 float64 + if (d1 == 0 || x1 == 0) && d2 > 0 { + p.Flag = blas.Diagonal + h12 = 1 + h21 = -1 + x1 = y1 + d1, d2 = d2, d1 + } else { + p2 := d2 * y1 + p1 := d1 * x1 + q2 := p2 * y1 + q1 := p1 * x1 + if math.Abs(q1) > math.Abs(q2) { + p.Flag = blas.OffDiagonal + h11 = 1 + h22 = 1 + h21 = -y1 / x1 + h12 = p2 / p1 + u := 1 - h12*h21 + if u <= 0 { + p.Flag = blas.Rescaling // Error state. + return p, 0, 0, 0 + } + + d1 /= u + d2 /= u + x1 *= u + } else { + if q2 < 0 { + p.Flag = blas.Rescaling // Error state. + return p, 0, 0, 0 + } + + p.Flag = blas.Diagonal + h21 = -1 + h12 = 1 + h11 = p1 / p2 + h22 = x1 / y1 + u := 1 + h11*h22 + d1, d2 = d2/u, d1/u + x1 = y1 * u + } + } + + for d1 <= rgamsq && d1 != 0 { + p.Flag = blas.Rescaling + d1 = (d1 * gam) * gam + x1 /= gam + h11 /= gam + h12 /= gam + } + for d1 > gamsq { + p.Flag = blas.Rescaling + d1 = (d1 / gam) / gam + x1 *= gam + h11 *= gam + h12 *= gam + } + + for math.Abs(d2) <= rgamsq && d2 != 0 { + p.Flag = blas.Rescaling + d2 = (d2 * gam) * gam + h21 /= gam + h22 /= gam + } + for math.Abs(d2) > gamsq { + p.Flag = blas.Rescaling + d2 = (d2 / gam) / gam + h21 *= gam + h22 *= gam + } + + switch p.Flag { + case blas.Diagonal: + p.H = [4]float64{0: h11, 3: h22} + case blas.OffDiagonal: + p.H = [4]float64{1: h21, 2: h12} + case blas.Rescaling: + p.H = [4]float64{h11, h21, h12, h22} + default: + panic(badFlag) + } + + return p, d1, d2, x1 +} + +// Drot applies a plane transformation. +// x[i] = c * x[i] + s * y[i] +// y[i] = c * y[i] - s * x[i] +func (Implementation) Drot(n int, x []float64, incX int, y []float64, incY int, c float64, s float64) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = c*vx+s*vy, c*vy-s*vx + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = c*vx+s*vy, c*vy-s*vx + ix += incX + iy += incY + } +} + +// Drotm applies the modified Givens rotation to the 2×n matrix. +func (Implementation) Drotm(n int, x []float64, incX int, y []float64, incY int, p blas.DrotmParams) { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return + } + panic(nLT0) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + if p.Flag == blas.Identity { + return + } + + switch p.Flag { + case blas.Rescaling: + h11 := p.H[0] + h12 := p.H[2] + h21 := p.H[1] + h22 := p.H[3] + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = vx*h11+vy*h12, vx*h21+vy*h22 + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = vx*h11+vy*h12, vx*h21+vy*h22 + ix += incX + iy += incY + } + case blas.OffDiagonal: + h12 := p.H[2] + h21 := p.H[1] + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = vx+vy*h12, vx*h21+vy + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = vx+vy*h12, vx*h21+vy + ix += incX + iy += incY + } + case blas.Diagonal: + h11 := p.H[0] + h22 := p.H[3] + if incX == 1 && incY == 1 { + x = x[:n] + for i, vx := range x { + vy := y[i] + x[i], y[i] = vx*h11+vy, -vx+vy*h22 + } + return + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + for i := 0; i < n; i++ { + vx := x[ix] + vy := y[iy] + x[ix], y[iy] = vx*h11+vy, -vx+vy*h22 + ix += incX + iy += incY + } + } +} + +// Dscal scales x by alpha. +// x[i] *= alpha +// Dscal has no effect if incX < 0. +func (Implementation) Dscal(n int, alpha float64, x []float64, incX int) { + if incX < 1 { + if incX == 0 { + panic(zeroIncX) + } + return + } + if n < 1 { + if n == 0 { + return + } + panic(nLT0) + } + if (n-1)*incX >= len(x) { + panic(shortX) + } + if alpha == 0 { + if incX == 1 { + x = x[:n] + for i := range x { + x[i] = 0 + } + return + } + for ix := 0; ix < n*incX; ix += incX { + x[ix] = 0 + } + return + } + if incX == 1 { + f64.ScalUnitary(alpha, x[:n]) + return + } + f64.ScalInc(alpha, x, uintptr(n), uintptr(incX)) +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level1float64_ddot.go b/vendor/gonum.org/v1/gonum/blas/gonum/level1float64_ddot.go new file mode 100644 index 0000000000..be87ba13db --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level1float64_ddot.go @@ -0,0 +1,49 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/internal/asm/f64" +) + +// Ddot computes the dot product of the two vectors +// \sum_i x[i]*y[i] +func (Implementation) Ddot(n int, x []float64, incX int, y []float64, incY int) float64 { + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + if n <= 0 { + if n == 0 { + return 0 + } + panic(nLT0) + } + if incX == 1 && incY == 1 { + if len(x) < n { + panic(shortX) + } + if len(y) < n { + panic(shortY) + } + return f64.DotUnitary(x[:n], y[:n]) + } + var ix, iy int + if incX < 0 { + ix = (-n + 1) * incX + } + if incY < 0 { + iy = (-n + 1) * incY + } + if ix >= len(x) || ix+(n-1)*incX >= len(x) { + panic(shortX) + } + if iy >= len(y) || iy+(n-1)*incY >= len(y) { + panic(shortY) + } + return f64.DotInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx128.go b/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx128.go new file mode 100644 index 0000000000..03ee328fdb --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx128.go @@ -0,0 +1,2906 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math/cmplx" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/c128" +) + +var _ blas.Complex128Level2 = Implementation{} + +// Zgbmv performs one of the matrix-vector operations +// y = alpha * A * x + beta * y if trans = blas.NoTrans +// y = alpha * A^T * x + beta * y if trans = blas.Trans +// y = alpha * A^H * x + beta * y if trans = blas.ConjTrans +// where alpha and beta are scalars, x and y are vectors, and A is an m×n band matrix +// with kL sub-diagonals and kU super-diagonals. +func (Implementation) Zgbmv(trans blas.Transpose, m, n, kL, kU int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if kL < 0 { + panic(kLLT0) + } + if kU < 0 { + panic(kULT0) + } + if lda < kL+kU+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(min(m, n+kL)-1)+kL+kU+1 { + panic(shortA) + } + var lenX, lenY int + if trans == blas.NoTrans { + lenX, lenY = n, m + } else { + lenX, lenY = m, n + } + if (incX > 0 && len(x) <= (lenX-1)*incX) || (incX < 0 && len(x) <= (1-lenX)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (lenY-1)*incY) || (incY < 0 && len(y) <= (1-lenY)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + var kx int + if incX < 0 { + kx = (1 - lenX) * incX + } + var ky int + if incY < 0 { + ky = (1 - lenY) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:lenY] { + y[i] = 0 + } + } else { + c128.ScalUnitary(beta, y[:lenY]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < lenY; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + c128.ScalInc(beta, y, uintptr(lenY), uintptr(incY)) + } else { + c128.ScalInc(beta, y, uintptr(lenY), uintptr(-incY)) + } + } + } + } + + nRow := min(m, n+kL) + nCol := kL + 1 + kU + switch trans { + case blas.NoTrans: + iy := ky + if incX == 1 { + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) + xtmp := x[off : off+u-l] + var sum complex128 + for j, v := range aRow { + sum += xtmp[j] * v + } + y[iy] += alpha * sum + iy += incY + } + } else { + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) * incX + jx := kx + var sum complex128 + for _, v := range aRow { + sum += x[off+jx] * v + jx += incX + } + y[iy] += alpha * sum + iy += incY + } + } + case blas.Trans: + if incX == 1 { + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) * incY + alphaxi := alpha * x[i] + jy := ky + for _, v := range aRow { + y[off+jy] += alphaxi * v + jy += incY + } + } + } else { + ix := kx + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) * incY + alphaxi := alpha * x[ix] + jy := ky + for _, v := range aRow { + y[off+jy] += alphaxi * v + jy += incY + } + ix += incX + } + } + case blas.ConjTrans: + if incX == 1 { + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) * incY + alphaxi := alpha * x[i] + jy := ky + for _, v := range aRow { + y[off+jy] += alphaxi * cmplx.Conj(v) + jy += incY + } + } + } else { + ix := kx + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) * incY + alphaxi := alpha * x[ix] + jy := ky + for _, v := range aRow { + y[off+jy] += alphaxi * cmplx.Conj(v) + jy += incY + } + ix += incX + } + } + } +} + +// Zgemv performs one of the matrix-vector operations +// y = alpha * A * x + beta * y if trans = blas.NoTrans +// y = alpha * A^T * x + beta * y if trans = blas.Trans +// y = alpha * A^H * x + beta * y if trans = blas.ConjTrans +// where alpha and beta are scalars, x and y are vectors, and A is an m×n dense matrix. +func (Implementation) Zgemv(trans blas.Transpose, m, n int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + var lenX, lenY int + if trans == blas.NoTrans { + lenX = n + lenY = m + } else { + lenX = m + lenY = n + } + if len(a) < lda*(m-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (lenX-1)*incX) || (incX < 0 && len(x) <= (1-lenX)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (lenY-1)*incY) || (incY < 0 && len(y) <= (1-lenY)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + var kx int + if incX < 0 { + kx = (1 - lenX) * incX + } + var ky int + if incY < 0 { + ky = (1 - lenY) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:lenY] { + y[i] = 0 + } + } else { + c128.ScalUnitary(beta, y[:lenY]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < lenY; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + c128.ScalInc(beta, y, uintptr(lenY), uintptr(incY)) + } else { + c128.ScalInc(beta, y, uintptr(lenY), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + switch trans { + default: + // Form y = alpha*A*x + y. + iy := ky + if incX == 1 { + for i := 0; i < m; i++ { + y[iy] += alpha * c128.DotuUnitary(a[i*lda:i*lda+n], x[:n]) + iy += incY + } + return + } + for i := 0; i < m; i++ { + y[iy] += alpha * c128.DotuInc(a[i*lda:i*lda+n], x, uintptr(n), 1, uintptr(incX), 0, uintptr(kx)) + iy += incY + } + return + + case blas.Trans: + // Form y = alpha*A^T*x + y. + ix := kx + if incY == 1 { + for i := 0; i < m; i++ { + c128.AxpyUnitary(alpha*x[ix], a[i*lda:i*lda+n], y[:n]) + ix += incX + } + return + } + for i := 0; i < m; i++ { + c128.AxpyInc(alpha*x[ix], a[i*lda:i*lda+n], y, uintptr(n), 1, uintptr(incY), 0, uintptr(ky)) + ix += incX + } + return + + case blas.ConjTrans: + // Form y = alpha*A^H*x + y. + ix := kx + if incY == 1 { + for i := 0; i < m; i++ { + tmp := alpha * x[ix] + for j := 0; j < n; j++ { + y[j] += tmp * cmplx.Conj(a[i*lda+j]) + } + ix += incX + } + return + } + for i := 0; i < m; i++ { + tmp := alpha * x[ix] + jy := ky + for j := 0; j < n; j++ { + y[jy] += tmp * cmplx.Conj(a[i*lda+j]) + jy += incY + } + ix += incX + } + return + } +} + +// Zgerc performs the rank-one operation +// A += alpha * x * y^H +// where A is an m×n dense matrix, alpha is a scalar, x is an m element vector, +// and y is an n element vector. +func (Implementation) Zgerc(m, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) { + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (m-1)*incX) || (incX < 0 && len(x) <= (1-m)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(a) < lda*(m-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var kx, jy int + if incX < 0 { + kx = (1 - m) * incX + } + if incY < 0 { + jy = (1 - n) * incY + } + for j := 0; j < n; j++ { + if y[jy] != 0 { + tmp := alpha * cmplx.Conj(y[jy]) + c128.AxpyInc(tmp, x, a[j:], uintptr(m), uintptr(incX), uintptr(lda), uintptr(kx), 0) + } + jy += incY + } +} + +// Zgeru performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, alpha is a scalar, x is an m element vector, +// and y is an n element vector. +func (Implementation) Zgeru(m, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) { + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (m-1)*incX) || (incX < 0 && len(x) <= (1-m)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(a) < lda*(m-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var kx int + if incX < 0 { + kx = (1 - m) * incX + } + if incY == 1 { + for i := 0; i < m; i++ { + if x[kx] != 0 { + tmp := alpha * x[kx] + c128.AxpyUnitary(tmp, y[:n], a[i*lda:i*lda+n]) + } + kx += incX + } + return + } + var jy int + if incY < 0 { + jy = (1 - n) * incY + } + for i := 0; i < m; i++ { + if x[kx] != 0 { + tmp := alpha * x[kx] + c128.AxpyInc(tmp, y, a[i*lda:i*lda+n], uintptr(n), uintptr(incY), 1, uintptr(jy), 0) + } + kx += incX + } +} + +// Zhbmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where alpha and beta are scalars, x and y are vectors, and A is an n×n +// Hermitian band matrix with k super-diagonals. The imaginary parts of +// the diagonal elements of A are ignored and assumed to be zero. +func (Implementation) Zhbmv(uplo blas.Uplo, n, k int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up the start indices in X and Y. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + var ky int + if incY < 0 { + ky = (1 - n) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + for i, v := range y[:n] { + y[i] = beta * v + } + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + for i := 0; i < n; i++ { + y[iy] = beta * y[iy] + iy += incY + } + } + } + } + + if alpha == 0 { + return + } + + // The elements of A are accessed sequentially with one pass through a. + switch uplo { + case blas.Upper: + iy := ky + if incX == 1 { + for i := 0; i < n; i++ { + aRow := a[i*lda:] + alphaxi := alpha * x[i] + sum := alphaxi * complex(real(aRow[0]), 0) + u := min(k+1, n-i) + jy := incY + for j := 1; j < u; j++ { + v := aRow[j] + sum += alpha * x[i+j] * v + y[iy+jy] += alphaxi * cmplx.Conj(v) + jy += incY + } + y[iy] += sum + iy += incY + } + } else { + ix := kx + for i := 0; i < n; i++ { + aRow := a[i*lda:] + alphaxi := alpha * x[ix] + sum := alphaxi * complex(real(aRow[0]), 0) + u := min(k+1, n-i) + jx := incX + jy := incY + for j := 1; j < u; j++ { + v := aRow[j] + sum += alpha * x[ix+jx] * v + y[iy+jy] += alphaxi * cmplx.Conj(v) + jx += incX + jy += incY + } + y[iy] += sum + ix += incX + iy += incY + } + } + case blas.Lower: + iy := ky + if incX == 1 { + for i := 0; i < n; i++ { + l := max(0, k-i) + alphaxi := alpha * x[i] + jy := l * incY + aRow := a[i*lda:] + for j := l; j < k; j++ { + v := aRow[j] + y[iy] += alpha * v * x[i-k+j] + y[iy-k*incY+jy] += alphaxi * cmplx.Conj(v) + jy += incY + } + y[iy] += alphaxi * complex(real(aRow[k]), 0) + iy += incY + } + } else { + ix := kx + for i := 0; i < n; i++ { + l := max(0, k-i) + alphaxi := alpha * x[ix] + jx := l * incX + jy := l * incY + aRow := a[i*lda:] + for j := l; j < k; j++ { + v := aRow[j] + y[iy] += alpha * v * x[ix-k*incX+jx] + y[iy-k*incY+jy] += alphaxi * cmplx.Conj(v) + jx += incX + jy += incY + } + y[iy] += alphaxi * complex(real(aRow[k]), 0) + ix += incX + iy += incY + } + } + } +} + +// Zhemv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where alpha and beta are scalars, x and y are vectors, and A is an n×n +// Hermitian matrix. The imaginary parts of the diagonal elements of A are +// ignored and assumed to be zero. +func (Implementation) Zhemv(uplo blas.Uplo, n int, alpha complex128, a []complex128, lda int, x []complex128, incX int, beta complex128, y []complex128, incY int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up the start indices in X and Y. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + var ky int + if incY < 0 { + ky = (1 - n) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + for i, v := range y[:n] { + y[i] = beta * v + } + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + for i := 0; i < n; i++ { + y[iy] = beta * y[iy] + iy += incY + } + } + } + } + + if alpha == 0 { + return + } + + // The elements of A are accessed sequentially with one pass through + // the triangular part of A. + + if uplo == blas.Upper { + // Form y when A is stored in upper triangle. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + tmp1 := alpha * x[i] + var tmp2 complex128 + for j := i + 1; j < n; j++ { + y[j] += tmp1 * cmplx.Conj(a[i*lda+j]) + tmp2 += a[i*lda+j] * x[j] + } + aii := complex(real(a[i*lda+i]), 0) + y[i] += tmp1*aii + alpha*tmp2 + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + tmp1 := alpha * x[ix] + var tmp2 complex128 + jx := ix + jy := iy + for j := i + 1; j < n; j++ { + jx += incX + jy += incY + y[jy] += tmp1 * cmplx.Conj(a[i*lda+j]) + tmp2 += a[i*lda+j] * x[jx] + } + aii := complex(real(a[i*lda+i]), 0) + y[iy] += tmp1*aii + alpha*tmp2 + ix += incX + iy += incY + } + } + return + } + + // Form y when A is stored in lower triangle. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + tmp1 := alpha * x[i] + var tmp2 complex128 + for j := 0; j < i; j++ { + y[j] += tmp1 * cmplx.Conj(a[i*lda+j]) + tmp2 += a[i*lda+j] * x[j] + } + aii := complex(real(a[i*lda+i]), 0) + y[i] += tmp1*aii + alpha*tmp2 + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + tmp1 := alpha * x[ix] + var tmp2 complex128 + jx := kx + jy := ky + for j := 0; j < i; j++ { + y[jy] += tmp1 * cmplx.Conj(a[i*lda+j]) + tmp2 += a[i*lda+j] * x[jx] + jx += incX + jy += incY + } + aii := complex(real(a[i*lda+i]), 0) + y[iy] += tmp1*aii + alpha*tmp2 + ix += incX + iy += incY + } + } +} + +// Zher performs the Hermitian rank-one operation +// A += alpha * x * x^H +// where A is an n×n Hermitian matrix, alpha is a real scalar, and x is an n +// element vector. On entry, the imaginary parts of the diagonal elements of A +// are ignored and assumed to be zero, on return they will be set to zero. +func (Implementation) Zher(uplo blas.Uplo, n int, alpha float64, x []complex128, incX int, a []complex128, lda int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if len(a) < lda*(n-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 { + tmp := complex(alpha*real(x[i]), alpha*imag(x[i])) + aii := real(a[i*lda+i]) + xtmp := real(tmp * cmplx.Conj(x[i])) + a[i*lda+i] = complex(aii+xtmp, 0) + for j := i + 1; j < n; j++ { + a[i*lda+j] += tmp * cmplx.Conj(x[j]) + } + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + } + return + } + + ix := kx + for i := 0; i < n; i++ { + if x[ix] != 0 { + tmp := complex(alpha*real(x[ix]), alpha*imag(x[ix])) + aii := real(a[i*lda+i]) + xtmp := real(tmp * cmplx.Conj(x[ix])) + a[i*lda+i] = complex(aii+xtmp, 0) + jx := ix + incX + for j := i + 1; j < n; j++ { + a[i*lda+j] += tmp * cmplx.Conj(x[jx]) + jx += incX + } + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + ix += incX + } + return + } + + if incX == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 { + tmp := complex(alpha*real(x[i]), alpha*imag(x[i])) + for j := 0; j < i; j++ { + a[i*lda+j] += tmp * cmplx.Conj(x[j]) + } + aii := real(a[i*lda+i]) + xtmp := real(tmp * cmplx.Conj(x[i])) + a[i*lda+i] = complex(aii+xtmp, 0) + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + } + return + } + + ix := kx + for i := 0; i < n; i++ { + if x[ix] != 0 { + tmp := complex(alpha*real(x[ix]), alpha*imag(x[ix])) + jx := kx + for j := 0; j < i; j++ { + a[i*lda+j] += tmp * cmplx.Conj(x[jx]) + jx += incX + } + aii := real(a[i*lda+i]) + xtmp := real(tmp * cmplx.Conj(x[ix])) + a[i*lda+i] = complex(aii+xtmp, 0) + + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + ix += incX + } +} + +// Zher2 performs the Hermitian rank-two operation +// A += alpha * x * y^H + conj(alpha) * y * x^H +// where alpha is a scalar, x and y are n element vectors and A is an n×n +// Hermitian matrix. On entry, the imaginary parts of the diagonal elements are +// ignored and assumed to be zero. On return they will be set to zero. +func (Implementation) Zher2(uplo blas.Uplo, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, a []complex128, lda int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(a) < lda*(n-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var kx, ky int + var ix, iy int + if incX != 1 || incY != 1 { + if incX < 0 { + kx = (1 - n) * incX + } + if incY < 0 { + ky = (1 - n) * incY + } + ix = kx + iy = ky + } + if uplo == blas.Upper { + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 || y[i] != 0 { + tmp1 := alpha * x[i] + tmp2 := cmplx.Conj(alpha) * y[i] + aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) + a[i*lda+i] = complex(aii, 0) + for j := i + 1; j < n; j++ { + a[i*lda+j] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) + } + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + } + return + } + for i := 0; i < n; i++ { + if x[ix] != 0 || y[iy] != 0 { + tmp1 := alpha * x[ix] + tmp2 := cmplx.Conj(alpha) * y[iy] + aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) + a[i*lda+i] = complex(aii, 0) + jx := ix + incX + jy := iy + incY + for j := i + 1; j < n; j++ { + a[i*lda+j] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) + jx += incX + jy += incY + } + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + ix += incX + iy += incY + } + return + } + + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 || y[i] != 0 { + tmp1 := alpha * x[i] + tmp2 := cmplx.Conj(alpha) * y[i] + for j := 0; j < i; j++ { + a[i*lda+j] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) + } + aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) + a[i*lda+i] = complex(aii, 0) + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + } + return + } + for i := 0; i < n; i++ { + if x[ix] != 0 || y[iy] != 0 { + tmp1 := alpha * x[ix] + tmp2 := cmplx.Conj(alpha) * y[iy] + jx := kx + jy := ky + for j := 0; j < i; j++ { + a[i*lda+j] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) + jx += incX + jy += incY + } + aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) + a[i*lda+i] = complex(aii, 0) + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + ix += incX + iy += incY + } +} + +// Zhpmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where alpha and beta are scalars, x and y are vectors, and A is an n×n +// Hermitian matrix in packed form. The imaginary parts of the diagonal +// elements of A are ignored and assumed to be zero. +func (Implementation) Zhpmv(uplo blas.Uplo, n int, alpha complex128, ap []complex128, x []complex128, incX int, beta complex128, y []complex128, incY int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up the start indices in X and Y. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + var ky int + if incY < 0 { + ky = (1 - n) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + for i, v := range y[:n] { + y[i] = beta * v + } + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + for i := 0; i < n; i++ { + y[iy] *= beta + iy += incY + } + } + } + } + + if alpha == 0 { + return + } + + // The elements of A are accessed sequentially with one pass through ap. + + var kk int + if uplo == blas.Upper { + // Form y when ap contains the upper triangle. + // Here, kk points to the current diagonal element in ap. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + tmp1 := alpha * x[i] + y[i] += tmp1 * complex(real(ap[kk]), 0) + var tmp2 complex128 + k := kk + 1 + for j := i + 1; j < n; j++ { + y[j] += tmp1 * cmplx.Conj(ap[k]) + tmp2 += ap[k] * x[j] + k++ + } + y[i] += alpha * tmp2 + kk += n - i + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + tmp1 := alpha * x[ix] + y[iy] += tmp1 * complex(real(ap[kk]), 0) + var tmp2 complex128 + jx := ix + jy := iy + for k := kk + 1; k < kk+n-i; k++ { + jx += incX + jy += incY + y[jy] += tmp1 * cmplx.Conj(ap[k]) + tmp2 += ap[k] * x[jx] + } + y[iy] += alpha * tmp2 + ix += incX + iy += incY + kk += n - i + } + } + return + } + + // Form y when ap contains the lower triangle. + // Here, kk points to the beginning of current row in ap. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + tmp1 := alpha * x[i] + var tmp2 complex128 + k := kk + for j := 0; j < i; j++ { + y[j] += tmp1 * cmplx.Conj(ap[k]) + tmp2 += ap[k] * x[j] + k++ + } + aii := complex(real(ap[kk+i]), 0) + y[i] += tmp1*aii + alpha*tmp2 + kk += i + 1 + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + tmp1 := alpha * x[ix] + var tmp2 complex128 + jx := kx + jy := ky + for k := kk; k < kk+i; k++ { + y[jy] += tmp1 * cmplx.Conj(ap[k]) + tmp2 += ap[k] * x[jx] + jx += incX + jy += incY + } + aii := complex(real(ap[kk+i]), 0) + y[iy] += tmp1*aii + alpha*tmp2 + ix += incX + iy += incY + kk += i + 1 + } + } +} + +// Zhpr performs the Hermitian rank-1 operation +// A += alpha * x * x^H +// where alpha is a real scalar, x is a vector, and A is an n×n hermitian matrix +// in packed form. On entry, the imaginary parts of the diagonal elements are +// assumed to be zero, and on return they are set to zero. +func (Implementation) Zhpr(uplo blas.Uplo, n int, alpha float64, x []complex128, incX int, ap []complex128) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through ap. + + var kk int + if uplo == blas.Upper { + // Form A when upper triangle is stored in AP. + // Here, kk points to the current diagonal element in ap. + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if xi != 0 { + aii := real(ap[kk]) + alpha*real(cmplx.Conj(xi)*xi) + ap[kk] = complex(aii, 0) + + tmp := complex(alpha, 0) * xi + a := ap[kk+1 : kk+n-i] + x := x[i+1 : n] + for j, v := range x { + a[j] += tmp * cmplx.Conj(v) + } + } else { + ap[kk] = complex(real(ap[kk]), 0) + } + kk += n - i + } + } else { + ix := kx + for i := 0; i < n; i++ { + xi := x[ix] + if xi != 0 { + aii := real(ap[kk]) + alpha*real(cmplx.Conj(xi)*xi) + ap[kk] = complex(aii, 0) + + tmp := complex(alpha, 0) * xi + jx := ix + incX + a := ap[kk+1 : kk+n-i] + for k := range a { + a[k] += tmp * cmplx.Conj(x[jx]) + jx += incX + } + } else { + ap[kk] = complex(real(ap[kk]), 0) + } + ix += incX + kk += n - i + } + } + return + } + + // Form A when lower triangle is stored in AP. + // Here, kk points to the beginning of current row in ap. + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if xi != 0 { + tmp := complex(alpha, 0) * xi + a := ap[kk : kk+i] + for j, v := range x[:i] { + a[j] += tmp * cmplx.Conj(v) + } + + aii := real(ap[kk+i]) + alpha*real(cmplx.Conj(xi)*xi) + ap[kk+i] = complex(aii, 0) + } else { + ap[kk+i] = complex(real(ap[kk+i]), 0) + } + kk += i + 1 + } + } else { + ix := kx + for i := 0; i < n; i++ { + xi := x[ix] + if xi != 0 { + tmp := complex(alpha, 0) * xi + a := ap[kk : kk+i] + jx := kx + for k := range a { + a[k] += tmp * cmplx.Conj(x[jx]) + jx += incX + } + + aii := real(ap[kk+i]) + alpha*real(cmplx.Conj(xi)*xi) + ap[kk+i] = complex(aii, 0) + } else { + ap[kk+i] = complex(real(ap[kk+i]), 0) + } + ix += incX + kk += i + 1 + } + } +} + +// Zhpr2 performs the Hermitian rank-2 operation +// A += alpha * x * y^H + conj(alpha) * y * x^H +// where alpha is a complex scalar, x and y are n element vectors, and A is an +// n×n Hermitian matrix, supplied in packed form. On entry, the imaginary parts +// of the diagonal elements are assumed to be zero, and on return they are set to zero. +func (Implementation) Zhpr2(uplo blas.Uplo, n int, alpha complex128, x []complex128, incX int, y []complex128, incY int, ap []complex128) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + // Set up start indices in X and Y. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + var ky int + if incY < 0 { + ky = (1 - n) * incY + } + + // The elements of A are accessed sequentially with one pass through ap. + + var kk int + if uplo == blas.Upper { + // Form A when upper triangle is stored in AP. + // Here, kk points to the current diagonal element in ap. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 || y[i] != 0 { + tmp1 := alpha * x[i] + tmp2 := cmplx.Conj(alpha) * y[i] + aii := real(ap[kk]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) + ap[kk] = complex(aii, 0) + k := kk + 1 + for j := i + 1; j < n; j++ { + ap[k] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) + k++ + } + } else { + ap[kk] = complex(real(ap[kk]), 0) + } + kk += n - i + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + if x[ix] != 0 || y[iy] != 0 { + tmp1 := alpha * x[ix] + tmp2 := cmplx.Conj(alpha) * y[iy] + aii := real(ap[kk]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) + ap[kk] = complex(aii, 0) + jx := ix + incX + jy := iy + incY + for k := kk + 1; k < kk+n-i; k++ { + ap[k] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) + jx += incX + jy += incY + } + } else { + ap[kk] = complex(real(ap[kk]), 0) + } + ix += incX + iy += incY + kk += n - i + } + } + return + } + + // Form A when lower triangle is stored in AP. + // Here, kk points to the beginning of current row in ap. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 || y[i] != 0 { + tmp1 := alpha * x[i] + tmp2 := cmplx.Conj(alpha) * y[i] + k := kk + for j := 0; j < i; j++ { + ap[k] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) + k++ + } + aii := real(ap[kk+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) + ap[kk+i] = complex(aii, 0) + } else { + ap[kk+i] = complex(real(ap[kk+i]), 0) + } + kk += i + 1 + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + if x[ix] != 0 || y[iy] != 0 { + tmp1 := alpha * x[ix] + tmp2 := cmplx.Conj(alpha) * y[iy] + jx := kx + jy := ky + for k := kk; k < kk+i; k++ { + ap[k] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) + jx += incX + jy += incY + } + aii := real(ap[kk+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) + ap[kk+i] = complex(aii, 0) + } else { + ap[kk+i] = complex(real(ap[kk+i]), 0) + } + ix += incX + iy += incY + kk += i + 1 + } + } +} + +// Ztbmv performs one of the matrix-vector operations +// x = A * x if trans = blas.NoTrans +// x = A^T * x if trans = blas.Trans +// x = A^H * x if trans = blas.ConjTrans +// where x is an n element vector and A is an n×n triangular band matrix, with +// (k+1) diagonals. +func (Implementation) Ztbmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, k int, a []complex128, lda int, x []complex128, incX int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + switch trans { + case blas.NoTrans: + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if diag == blas.NonUnit { + xi *= a[i*lda] + } + kk := min(k, n-i-1) + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + xi += x[i+j+1] * aij + } + x[i] = xi + } + } else { + ix := kx + for i := 0; i < n; i++ { + xi := x[ix] + if diag == blas.NonUnit { + xi *= a[i*lda] + } + kk := min(k, n-i-1) + jx := ix + incX + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + xi += x[jx] * aij + jx += incX + } + x[ix] = xi + ix += incX + } + } + } else { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + xi *= a[i*lda+k] + } + kk := min(k, i) + for j, aij := range a[i*lda+k-kk : i*lda+k] { + xi += x[i-kk+j] * aij + } + x[i] = xi + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + xi *= a[i*lda+k] + } + kk := min(k, i) + jx := ix - kk*incX + for _, aij := range a[i*lda+k-kk : i*lda+k] { + xi += x[jx] * aij + jx += incX + } + x[ix] = xi + ix -= incX + } + } + } + case blas.Trans: + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + xi := x[i] + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + x[i+j+1] += xi * aij + } + if diag == blas.NonUnit { + x[i] *= a[i*lda] + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + jx := ix + incX + xi := x[ix] + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + x[jx] += xi * aij + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= a[i*lda] + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + kk := min(k, i) + xi := x[i] + for j, aij := range a[i*lda+k-kk : i*lda+k] { + x[i-kk+j] += xi * aij + } + if diag == blas.NonUnit { + x[i] *= a[i*lda+k] + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + kk := min(k, i) + jx := ix - kk*incX + xi := x[ix] + for _, aij := range a[i*lda+k-kk : i*lda+k] { + x[jx] += xi * aij + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= a[i*lda+k] + } + ix += incX + } + } + } + case blas.ConjTrans: + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + xi := x[i] + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + x[i+j+1] += xi * cmplx.Conj(aij) + } + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(a[i*lda]) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + jx := ix + incX + xi := x[ix] + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + x[jx] += xi * cmplx.Conj(aij) + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(a[i*lda]) + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + kk := min(k, i) + xi := x[i] + for j, aij := range a[i*lda+k-kk : i*lda+k] { + x[i-kk+j] += xi * cmplx.Conj(aij) + } + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(a[i*lda+k]) + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + kk := min(k, i) + jx := ix - kk*incX + xi := x[ix] + for _, aij := range a[i*lda+k-kk : i*lda+k] { + x[jx] += xi * cmplx.Conj(aij) + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(a[i*lda+k]) + } + ix += incX + } + } + } + } +} + +// Ztbsv solves one of the systems of equations +// A * x = b if trans == blas.NoTrans +// A^T * x = b if trans == blas.Trans +// A^H * x = b if trans == blas.ConjTrans +// where b and x are n element vectors and A is an n×n triangular band matrix +// with (k+1) diagonals. +// +// On entry, x contains the values of b, and the solution is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func (Implementation) Ztbsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, k int, a []complex128, lda int, x []complex128, incX int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + switch trans { + case blas.NoTrans: + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + var sum complex128 + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + sum += x[i+1+j] * aij + } + x[i] -= sum + if diag == blas.NonUnit { + x[i] /= a[i*lda] + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + var sum complex128 + jx := ix + incX + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + sum += x[jx] * aij + jx += incX + } + x[ix] -= sum + if diag == blas.NonUnit { + x[ix] /= a[i*lda] + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + kk := min(k, i) + var sum complex128 + for j, aij := range a[i*lda+k-kk : i*lda+k] { + sum += x[i-kk+j] * aij + } + x[i] -= sum + if diag == blas.NonUnit { + x[i] /= a[i*lda+k] + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + kk := min(k, i) + var sum complex128 + jx := ix - kk*incX + for _, aij := range a[i*lda+k-kk : i*lda+k] { + sum += x[jx] * aij + jx += incX + } + x[ix] -= sum + if diag == blas.NonUnit { + x[ix] /= a[i*lda+k] + } + ix += incX + } + } + } + case blas.Trans: + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[i] /= a[i*lda] + } + kk := min(k, n-i-1) + xi := x[i] + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + x[i+1+j] -= xi * aij + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[ix] /= a[i*lda] + } + kk := min(k, n-i-1) + xi := x[ix] + jx := ix + incX + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + x[jx] -= xi * aij + jx += incX + } + ix += incX + } + } + } else { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[i] /= a[i*lda+k] + } + kk := min(k, i) + xi := x[i] + for j, aij := range a[i*lda+k-kk : i*lda+k] { + x[i-kk+j] -= xi * aij + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[ix] /= a[i*lda+k] + } + kk := min(k, i) + xi := x[ix] + jx := ix - kk*incX + for _, aij := range a[i*lda+k-kk : i*lda+k] { + x[jx] -= xi * aij + jx += incX + } + ix -= incX + } + } + } + case blas.ConjTrans: + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[i] /= cmplx.Conj(a[i*lda]) + } + kk := min(k, n-i-1) + xi := x[i] + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + x[i+1+j] -= xi * cmplx.Conj(aij) + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[ix] /= cmplx.Conj(a[i*lda]) + } + kk := min(k, n-i-1) + xi := x[ix] + jx := ix + incX + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + x[jx] -= xi * cmplx.Conj(aij) + jx += incX + } + ix += incX + } + } + } else { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[i] /= cmplx.Conj(a[i*lda+k]) + } + kk := min(k, i) + xi := x[i] + for j, aij := range a[i*lda+k-kk : i*lda+k] { + x[i-kk+j] -= xi * cmplx.Conj(aij) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[ix] /= cmplx.Conj(a[i*lda+k]) + } + kk := min(k, i) + xi := x[ix] + jx := ix - kk*incX + for _, aij := range a[i*lda+k-kk : i*lda+k] { + x[jx] -= xi * cmplx.Conj(aij) + jx += incX + } + ix -= incX + } + } + } + } +} + +// Ztpmv performs one of the matrix-vector operations +// x = A * x if trans = blas.NoTrans +// x = A^T * x if trans = blas.Trans +// x = A^H * x if trans = blas.ConjTrans +// where x is an n element vector and A is an n×n triangular matrix, supplied in +// packed form. +func (Implementation) Ztpmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, ap []complex128, x []complex128, incX int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through A. + + if trans == blas.NoTrans { + // Form x = A*x. + if uplo == blas.Upper { + // kk points to the current diagonal element in ap. + kk := 0 + if incX == 1 { + x = x[:n] + for i := range x { + if diag == blas.NonUnit { + x[i] *= ap[kk] + } + if n-i-1 > 0 { + x[i] += c128.DotuUnitary(ap[kk+1:kk+n-i], x[i+1:]) + } + kk += n - i + } + } else { + ix := kx + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[ix] *= ap[kk] + } + if n-i-1 > 0 { + x[ix] += c128.DotuInc(ap[kk+1:kk+n-i], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) + } + ix += incX + kk += n - i + } + } + } else { + // kk points to the beginning of current row in ap. + kk := n*(n+1)/2 - n + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[i] *= ap[kk+i] + } + if i > 0 { + x[i] += c128.DotuUnitary(ap[kk:kk+i], x[:i]) + } + kk -= i + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[ix] *= ap[kk+i] + } + if i > 0 { + x[ix] += c128.DotuInc(ap[kk:kk+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + } + ix -= incX + kk -= i + } + } + } + return + } + + if trans == blas.Trans { + // Form x = A^T*x. + if uplo == blas.Upper { + // kk points to the current diagonal element in ap. + kk := n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + x[i] *= ap[kk] + } + if n-i-1 > 0 { + c128.AxpyUnitary(xi, ap[kk+1:kk+n-i], x[i+1:n]) + } + kk -= n - i + 1 + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + x[ix] *= ap[kk] + } + if n-i-1 > 0 { + c128.AxpyInc(xi, ap[kk+1:kk+n-i], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) + } + ix -= incX + kk -= n - i + 1 + } + } + } else { + // kk points to the beginning of current row in ap. + kk := 0 + if incX == 1 { + x = x[:n] + for i := range x { + if i > 0 { + c128.AxpyUnitary(x[i], ap[kk:kk+i], x[:i]) + } + if diag == blas.NonUnit { + x[i] *= ap[kk+i] + } + kk += i + 1 + } + } else { + ix := kx + for i := 0; i < n; i++ { + if i > 0 { + c128.AxpyInc(x[ix], ap[kk:kk+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + } + if diag == blas.NonUnit { + x[ix] *= ap[kk+i] + } + ix += incX + kk += i + 1 + } + } + } + return + } + + // Form x = A^H*x. + if uplo == blas.Upper { + // kk points to the current diagonal element in ap. + kk := n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(ap[kk]) + } + k := kk + 1 + for j := i + 1; j < n; j++ { + x[j] += xi * cmplx.Conj(ap[k]) + k++ + } + kk -= n - i + 1 + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(ap[kk]) + } + jx := ix + incX + k := kk + 1 + for j := i + 1; j < n; j++ { + x[jx] += xi * cmplx.Conj(ap[k]) + jx += incX + k++ + } + ix -= incX + kk -= n - i + 1 + } + } + } else { + // kk points to the beginning of current row in ap. + kk := 0 + if incX == 1 { + x = x[:n] + for i, xi := range x { + for j := 0; j < i; j++ { + x[j] += xi * cmplx.Conj(ap[kk+j]) + } + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(ap[kk+i]) + } + kk += i + 1 + } + } else { + ix := kx + for i := 0; i < n; i++ { + xi := x[ix] + jx := kx + for j := 0; j < i; j++ { + x[jx] += xi * cmplx.Conj(ap[kk+j]) + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(ap[kk+i]) + } + ix += incX + kk += i + 1 + } + } + } +} + +// Ztpsv solves one of the systems of equations +// A * x = b if trans == blas.NoTrans +// A^T * x = b if trans == blas.Trans +// A^H * x = b if trans == blas.ConjTrans +// where b and x are n element vectors and A is an n×n triangular matrix in +// packed form. +// +// On entry, x contains the values of b, and the solution is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func (Implementation) Ztpsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, ap []complex128, x []complex128, incX int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through ap. + + if trans == blas.NoTrans { + // Form x = inv(A)*x. + if uplo == blas.Upper { + kk := n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + aii := ap[kk] + if n-i-1 > 0 { + x[i] -= c128.DotuUnitary(x[i+1:n], ap[kk+1:kk+n-i]) + } + if diag == blas.NonUnit { + x[i] /= aii + } + kk -= n - i + 1 + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + aii := ap[kk] + if n-i-1 > 0 { + x[ix] -= c128.DotuInc(x, ap[kk+1:kk+n-i], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) + } + if diag == blas.NonUnit { + x[ix] /= aii + } + ix -= incX + kk -= n - i + 1 + } + } + } else { + kk := 0 + if incX == 1 { + for i := 0; i < n; i++ { + if i > 0 { + x[i] -= c128.DotuUnitary(x[:i], ap[kk:kk+i]) + } + if diag == blas.NonUnit { + x[i] /= ap[kk+i] + } + kk += i + 1 + } + } else { + ix := kx + for i := 0; i < n; i++ { + if i > 0 { + x[ix] -= c128.DotuInc(x, ap[kk:kk+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) + } + if diag == blas.NonUnit { + x[ix] /= ap[kk+i] + } + ix += incX + kk += i + 1 + } + } + } + return + } + + if trans == blas.Trans { + // Form x = inv(A^T)*x. + if uplo == blas.Upper { + kk := 0 + if incX == 1 { + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[j] /= ap[kk] + } + if n-j-1 > 0 { + c128.AxpyUnitary(-x[j], ap[kk+1:kk+n-j], x[j+1:n]) + } + kk += n - j + } + } else { + jx := kx + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[jx] /= ap[kk] + } + if n-j-1 > 0 { + c128.AxpyInc(-x[jx], ap[kk+1:kk+n-j], x, uintptr(n-j-1), 1, uintptr(incX), 0, uintptr(jx+incX)) + } + jx += incX + kk += n - j + } + } + } else { + kk := n*(n+1)/2 - n + if incX == 1 { + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[j] /= ap[kk+j] + } + if j > 0 { + c128.AxpyUnitary(-x[j], ap[kk:kk+j], x[:j]) + } + kk -= j + } + } else { + jx := kx + (n-1)*incX + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[jx] /= ap[kk+j] + } + if j > 0 { + c128.AxpyInc(-x[jx], ap[kk:kk+j], x, uintptr(j), 1, uintptr(incX), 0, uintptr(kx)) + } + jx -= incX + kk -= j + } + } + } + return + } + + // Form x = inv(A^H)*x. + if uplo == blas.Upper { + kk := 0 + if incX == 1 { + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[j] /= cmplx.Conj(ap[kk]) + } + xj := x[j] + k := kk + 1 + for i := j + 1; i < n; i++ { + x[i] -= xj * cmplx.Conj(ap[k]) + k++ + } + kk += n - j + } + } else { + jx := kx + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[jx] /= cmplx.Conj(ap[kk]) + } + xj := x[jx] + ix := jx + incX + k := kk + 1 + for i := j + 1; i < n; i++ { + x[ix] -= xj * cmplx.Conj(ap[k]) + ix += incX + k++ + } + jx += incX + kk += n - j + } + } + } else { + kk := n*(n+1)/2 - n + if incX == 1 { + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[j] /= cmplx.Conj(ap[kk+j]) + } + xj := x[j] + for i := 0; i < j; i++ { + x[i] -= xj * cmplx.Conj(ap[kk+i]) + } + kk -= j + } + } else { + jx := kx + (n-1)*incX + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[jx] /= cmplx.Conj(ap[kk+j]) + } + xj := x[jx] + ix := kx + for i := 0; i < j; i++ { + x[ix] -= xj * cmplx.Conj(ap[kk+i]) + ix += incX + } + jx -= incX + kk -= j + } + } + } +} + +// Ztrmv performs one of the matrix-vector operations +// x = A * x if trans = blas.NoTrans +// x = A^T * x if trans = blas.Trans +// x = A^H * x if trans = blas.ConjTrans +// where x is a vector, and A is an n×n triangular matrix. +func (Implementation) Ztrmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, a []complex128, lda int, x []complex128, incX int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through A. + + if trans == blas.NoTrans { + // Form x = A*x. + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[i] *= a[i*lda+i] + } + if n-i-1 > 0 { + x[i] += c128.DotuUnitary(a[i*lda+i+1:i*lda+n], x[i+1:n]) + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[ix] *= a[i*lda+i] + } + if n-i-1 > 0 { + x[ix] += c128.DotuInc(a[i*lda+i+1:i*lda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) + } + ix += incX + } + } + } else { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[i] *= a[i*lda+i] + } + if i > 0 { + x[i] += c128.DotuUnitary(a[i*lda:i*lda+i], x[:i]) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[ix] *= a[i*lda+i] + } + if i > 0 { + x[ix] += c128.DotuInc(a[i*lda:i*lda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + } + ix -= incX + } + } + } + return + } + + if trans == blas.Trans { + // Form x = A^T*x. + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + x[i] *= a[i*lda+i] + } + if n-i-1 > 0 { + c128.AxpyUnitary(xi, a[i*lda+i+1:i*lda+n], x[i+1:n]) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + x[ix] *= a[i*lda+i] + } + if n-i-1 > 0 { + c128.AxpyInc(xi, a[i*lda+i+1:i*lda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + if i > 0 { + c128.AxpyUnitary(x[i], a[i*lda:i*lda+i], x[:i]) + } + if diag == blas.NonUnit { + x[i] *= a[i*lda+i] + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if i > 0 { + c128.AxpyInc(x[ix], a[i*lda:i*lda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + } + if diag == blas.NonUnit { + x[ix] *= a[i*lda+i] + } + ix += incX + } + } + } + return + } + + // Form x = A^H*x. + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(a[i*lda+i]) + } + for j := i + 1; j < n; j++ { + x[j] += xi * cmplx.Conj(a[i*lda+j]) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(a[i*lda+i]) + } + jx := ix + incX + for j := i + 1; j < n; j++ { + x[jx] += xi * cmplx.Conj(a[i*lda+j]) + jx += incX + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + for j := 0; j < i; j++ { + x[j] += x[i] * cmplx.Conj(a[i*lda+j]) + } + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(a[i*lda+i]) + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + jx := kx + for j := 0; j < i; j++ { + x[jx] += x[ix] * cmplx.Conj(a[i*lda+j]) + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(a[i*lda+i]) + } + ix += incX + } + } + } +} + +// Ztrsv solves one of the systems of equations +// A * x = b if trans == blas.NoTrans +// A^T * x = b if trans == blas.Trans +// A^H * x = b if trans == blas.ConjTrans +// where b and x are n element vectors and A is an n×n triangular matrix. +// +// On entry, x contains the values of b, and the solution is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func (Implementation) Ztrsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, a []complex128, lda int, x []complex128, incX int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through A. + + if trans == blas.NoTrans { + // Form x = inv(A)*x. + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + aii := a[i*lda+i] + if n-i-1 > 0 { + x[i] -= c128.DotuUnitary(x[i+1:n], a[i*lda+i+1:i*lda+n]) + } + if diag == blas.NonUnit { + x[i] /= aii + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + aii := a[i*lda+i] + if n-i-1 > 0 { + x[ix] -= c128.DotuInc(x, a[i*lda+i+1:i*lda+n], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) + } + if diag == blas.NonUnit { + x[ix] /= aii + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + if i > 0 { + x[i] -= c128.DotuUnitary(x[:i], a[i*lda:i*lda+i]) + } + if diag == blas.NonUnit { + x[i] /= a[i*lda+i] + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if i > 0 { + x[ix] -= c128.DotuInc(x, a[i*lda:i*lda+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) + } + if diag == blas.NonUnit { + x[ix] /= a[i*lda+i] + } + ix += incX + } + } + } + return + } + + if trans == blas.Trans { + // Form x = inv(A^T)*x. + if uplo == blas.Upper { + if incX == 1 { + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[j] /= a[j*lda+j] + } + if n-j-1 > 0 { + c128.AxpyUnitary(-x[j], a[j*lda+j+1:j*lda+n], x[j+1:n]) + } + } + } else { + jx := kx + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[jx] /= a[j*lda+j] + } + if n-j-1 > 0 { + c128.AxpyInc(-x[jx], a[j*lda+j+1:j*lda+n], x, uintptr(n-j-1), 1, uintptr(incX), 0, uintptr(jx+incX)) + } + jx += incX + } + } + } else { + if incX == 1 { + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[j] /= a[j*lda+j] + } + xj := x[j] + if j > 0 { + c128.AxpyUnitary(-xj, a[j*lda:j*lda+j], x[:j]) + } + } + } else { + jx := kx + (n-1)*incX + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[jx] /= a[j*lda+j] + } + if j > 0 { + c128.AxpyInc(-x[jx], a[j*lda:j*lda+j], x, uintptr(j), 1, uintptr(incX), 0, uintptr(kx)) + } + jx -= incX + } + } + } + return + } + + // Form x = inv(A^H)*x. + if uplo == blas.Upper { + if incX == 1 { + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[j] /= cmplx.Conj(a[j*lda+j]) + } + xj := x[j] + for i := j + 1; i < n; i++ { + x[i] -= xj * cmplx.Conj(a[j*lda+i]) + } + } + } else { + jx := kx + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[jx] /= cmplx.Conj(a[j*lda+j]) + } + xj := x[jx] + ix := jx + incX + for i := j + 1; i < n; i++ { + x[ix] -= xj * cmplx.Conj(a[j*lda+i]) + ix += incX + } + jx += incX + } + } + } else { + if incX == 1 { + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[j] /= cmplx.Conj(a[j*lda+j]) + } + xj := x[j] + for i := 0; i < j; i++ { + x[i] -= xj * cmplx.Conj(a[j*lda+i]) + } + } + } else { + jx := kx + (n-1)*incX + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[jx] /= cmplx.Conj(a[j*lda+j]) + } + xj := x[jx] + ix := kx + for i := 0; i < j; i++ { + x[ix] -= xj * cmplx.Conj(a[j*lda+i]) + ix += incX + } + jx -= incX + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx64.go b/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx64.go new file mode 100644 index 0000000000..10faf8f7d8 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level2cmplx64.go @@ -0,0 +1,2942 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + cmplx "gonum.org/v1/gonum/internal/cmplx64" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/c64" +) + +var _ blas.Complex64Level2 = Implementation{} + +// Cgbmv performs one of the matrix-vector operations +// y = alpha * A * x + beta * y if trans = blas.NoTrans +// y = alpha * A^T * x + beta * y if trans = blas.Trans +// y = alpha * A^H * x + beta * y if trans = blas.ConjTrans +// where alpha and beta are scalars, x and y are vectors, and A is an m×n band matrix +// with kL sub-diagonals and kU super-diagonals. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cgbmv(trans blas.Transpose, m, n, kL, kU int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if kL < 0 { + panic(kLLT0) + } + if kU < 0 { + panic(kULT0) + } + if lda < kL+kU+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(min(m, n+kL)-1)+kL+kU+1 { + panic(shortA) + } + var lenX, lenY int + if trans == blas.NoTrans { + lenX, lenY = n, m + } else { + lenX, lenY = m, n + } + if (incX > 0 && len(x) <= (lenX-1)*incX) || (incX < 0 && len(x) <= (1-lenX)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (lenY-1)*incY) || (incY < 0 && len(y) <= (1-lenY)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + var kx int + if incX < 0 { + kx = (1 - lenX) * incX + } + var ky int + if incY < 0 { + ky = (1 - lenY) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:lenY] { + y[i] = 0 + } + } else { + c64.ScalUnitary(beta, y[:lenY]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < lenY; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + c64.ScalInc(beta, y, uintptr(lenY), uintptr(incY)) + } else { + c64.ScalInc(beta, y, uintptr(lenY), uintptr(-incY)) + } + } + } + } + + nRow := min(m, n+kL) + nCol := kL + 1 + kU + switch trans { + case blas.NoTrans: + iy := ky + if incX == 1 { + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) + xtmp := x[off : off+u-l] + var sum complex64 + for j, v := range aRow { + sum += xtmp[j] * v + } + y[iy] += alpha * sum + iy += incY + } + } else { + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) * incX + jx := kx + var sum complex64 + for _, v := range aRow { + sum += x[off+jx] * v + jx += incX + } + y[iy] += alpha * sum + iy += incY + } + } + case blas.Trans: + if incX == 1 { + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) * incY + alphaxi := alpha * x[i] + jy := ky + for _, v := range aRow { + y[off+jy] += alphaxi * v + jy += incY + } + } + } else { + ix := kx + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) * incY + alphaxi := alpha * x[ix] + jy := ky + for _, v := range aRow { + y[off+jy] += alphaxi * v + jy += incY + } + ix += incX + } + } + case blas.ConjTrans: + if incX == 1 { + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) * incY + alphaxi := alpha * x[i] + jy := ky + for _, v := range aRow { + y[off+jy] += alphaxi * cmplx.Conj(v) + jy += incY + } + } + } else { + ix := kx + for i := 0; i < nRow; i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + aRow := a[i*lda+l : i*lda+u] + off := max(0, i-kL) * incY + alphaxi := alpha * x[ix] + jy := ky + for _, v := range aRow { + y[off+jy] += alphaxi * cmplx.Conj(v) + jy += incY + } + ix += incX + } + } + } +} + +// Cgemv performs one of the matrix-vector operations +// y = alpha * A * x + beta * y if trans = blas.NoTrans +// y = alpha * A^T * x + beta * y if trans = blas.Trans +// y = alpha * A^H * x + beta * y if trans = blas.ConjTrans +// where alpha and beta are scalars, x and y are vectors, and A is an m×n dense matrix. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cgemv(trans blas.Transpose, m, n int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + var lenX, lenY int + if trans == blas.NoTrans { + lenX = n + lenY = m + } else { + lenX = m + lenY = n + } + if len(a) < lda*(m-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (lenX-1)*incX) || (incX < 0 && len(x) <= (1-lenX)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (lenY-1)*incY) || (incY < 0 && len(y) <= (1-lenY)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + var kx int + if incX < 0 { + kx = (1 - lenX) * incX + } + var ky int + if incY < 0 { + ky = (1 - lenY) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:lenY] { + y[i] = 0 + } + } else { + c64.ScalUnitary(beta, y[:lenY]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < lenY; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + c64.ScalInc(beta, y, uintptr(lenY), uintptr(incY)) + } else { + c64.ScalInc(beta, y, uintptr(lenY), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + switch trans { + default: + // Form y = alpha*A*x + y. + iy := ky + if incX == 1 { + for i := 0; i < m; i++ { + y[iy] += alpha * c64.DotuUnitary(a[i*lda:i*lda+n], x[:n]) + iy += incY + } + return + } + for i := 0; i < m; i++ { + y[iy] += alpha * c64.DotuInc(a[i*lda:i*lda+n], x, uintptr(n), 1, uintptr(incX), 0, uintptr(kx)) + iy += incY + } + return + + case blas.Trans: + // Form y = alpha*A^T*x + y. + ix := kx + if incY == 1 { + for i := 0; i < m; i++ { + c64.AxpyUnitary(alpha*x[ix], a[i*lda:i*lda+n], y[:n]) + ix += incX + } + return + } + for i := 0; i < m; i++ { + c64.AxpyInc(alpha*x[ix], a[i*lda:i*lda+n], y, uintptr(n), 1, uintptr(incY), 0, uintptr(ky)) + ix += incX + } + return + + case blas.ConjTrans: + // Form y = alpha*A^H*x + y. + ix := kx + if incY == 1 { + for i := 0; i < m; i++ { + tmp := alpha * x[ix] + for j := 0; j < n; j++ { + y[j] += tmp * cmplx.Conj(a[i*lda+j]) + } + ix += incX + } + return + } + for i := 0; i < m; i++ { + tmp := alpha * x[ix] + jy := ky + for j := 0; j < n; j++ { + y[jy] += tmp * cmplx.Conj(a[i*lda+j]) + jy += incY + } + ix += incX + } + return + } +} + +// Cgerc performs the rank-one operation +// A += alpha * x * y^H +// where A is an m×n dense matrix, alpha is a scalar, x is an m element vector, +// and y is an n element vector. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cgerc(m, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) { + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (m-1)*incX) || (incX < 0 && len(x) <= (1-m)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(a) < lda*(m-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var kx, jy int + if incX < 0 { + kx = (1 - m) * incX + } + if incY < 0 { + jy = (1 - n) * incY + } + for j := 0; j < n; j++ { + if y[jy] != 0 { + tmp := alpha * cmplx.Conj(y[jy]) + c64.AxpyInc(tmp, x, a[j:], uintptr(m), uintptr(incX), uintptr(lda), uintptr(kx), 0) + } + jy += incY + } +} + +// Cgeru performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, alpha is a scalar, x is an m element vector, +// and y is an n element vector. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cgeru(m, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) { + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (m-1)*incX) || (incX < 0 && len(x) <= (1-m)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(a) < lda*(m-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var kx int + if incX < 0 { + kx = (1 - m) * incX + } + if incY == 1 { + for i := 0; i < m; i++ { + if x[kx] != 0 { + tmp := alpha * x[kx] + c64.AxpyUnitary(tmp, y[:n], a[i*lda:i*lda+n]) + } + kx += incX + } + return + } + var jy int + if incY < 0 { + jy = (1 - n) * incY + } + for i := 0; i < m; i++ { + if x[kx] != 0 { + tmp := alpha * x[kx] + c64.AxpyInc(tmp, y, a[i*lda:i*lda+n], uintptr(n), uintptr(incY), 1, uintptr(jy), 0) + } + kx += incX + } +} + +// Chbmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where alpha and beta are scalars, x and y are vectors, and A is an n×n +// Hermitian band matrix with k super-diagonals. The imaginary parts of +// the diagonal elements of A are ignored and assumed to be zero. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Chbmv(uplo blas.Uplo, n, k int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up the start indices in X and Y. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + var ky int + if incY < 0 { + ky = (1 - n) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + for i, v := range y[:n] { + y[i] = beta * v + } + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + for i := 0; i < n; i++ { + y[iy] = beta * y[iy] + iy += incY + } + } + } + } + + if alpha == 0 { + return + } + + // The elements of A are accessed sequentially with one pass through a. + switch uplo { + case blas.Upper: + iy := ky + if incX == 1 { + for i := 0; i < n; i++ { + aRow := a[i*lda:] + alphaxi := alpha * x[i] + sum := alphaxi * complex(real(aRow[0]), 0) + u := min(k+1, n-i) + jy := incY + for j := 1; j < u; j++ { + v := aRow[j] + sum += alpha * x[i+j] * v + y[iy+jy] += alphaxi * cmplx.Conj(v) + jy += incY + } + y[iy] += sum + iy += incY + } + } else { + ix := kx + for i := 0; i < n; i++ { + aRow := a[i*lda:] + alphaxi := alpha * x[ix] + sum := alphaxi * complex(real(aRow[0]), 0) + u := min(k+1, n-i) + jx := incX + jy := incY + for j := 1; j < u; j++ { + v := aRow[j] + sum += alpha * x[ix+jx] * v + y[iy+jy] += alphaxi * cmplx.Conj(v) + jx += incX + jy += incY + } + y[iy] += sum + ix += incX + iy += incY + } + } + case blas.Lower: + iy := ky + if incX == 1 { + for i := 0; i < n; i++ { + l := max(0, k-i) + alphaxi := alpha * x[i] + jy := l * incY + aRow := a[i*lda:] + for j := l; j < k; j++ { + v := aRow[j] + y[iy] += alpha * v * x[i-k+j] + y[iy-k*incY+jy] += alphaxi * cmplx.Conj(v) + jy += incY + } + y[iy] += alphaxi * complex(real(aRow[k]), 0) + iy += incY + } + } else { + ix := kx + for i := 0; i < n; i++ { + l := max(0, k-i) + alphaxi := alpha * x[ix] + jx := l * incX + jy := l * incY + aRow := a[i*lda:] + for j := l; j < k; j++ { + v := aRow[j] + y[iy] += alpha * v * x[ix-k*incX+jx] + y[iy-k*incY+jy] += alphaxi * cmplx.Conj(v) + jx += incX + jy += incY + } + y[iy] += alphaxi * complex(real(aRow[k]), 0) + ix += incX + iy += incY + } + } + } +} + +// Chemv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where alpha and beta are scalars, x and y are vectors, and A is an n×n +// Hermitian matrix. The imaginary parts of the diagonal elements of A are +// ignored and assumed to be zero. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Chemv(uplo blas.Uplo, n int, alpha complex64, a []complex64, lda int, x []complex64, incX int, beta complex64, y []complex64, incY int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up the start indices in X and Y. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + var ky int + if incY < 0 { + ky = (1 - n) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + for i, v := range y[:n] { + y[i] = beta * v + } + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + for i := 0; i < n; i++ { + y[iy] = beta * y[iy] + iy += incY + } + } + } + } + + if alpha == 0 { + return + } + + // The elements of A are accessed sequentially with one pass through + // the triangular part of A. + + if uplo == blas.Upper { + // Form y when A is stored in upper triangle. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + tmp1 := alpha * x[i] + var tmp2 complex64 + for j := i + 1; j < n; j++ { + y[j] += tmp1 * cmplx.Conj(a[i*lda+j]) + tmp2 += a[i*lda+j] * x[j] + } + aii := complex(real(a[i*lda+i]), 0) + y[i] += tmp1*aii + alpha*tmp2 + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + tmp1 := alpha * x[ix] + var tmp2 complex64 + jx := ix + jy := iy + for j := i + 1; j < n; j++ { + jx += incX + jy += incY + y[jy] += tmp1 * cmplx.Conj(a[i*lda+j]) + tmp2 += a[i*lda+j] * x[jx] + } + aii := complex(real(a[i*lda+i]), 0) + y[iy] += tmp1*aii + alpha*tmp2 + ix += incX + iy += incY + } + } + return + } + + // Form y when A is stored in lower triangle. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + tmp1 := alpha * x[i] + var tmp2 complex64 + for j := 0; j < i; j++ { + y[j] += tmp1 * cmplx.Conj(a[i*lda+j]) + tmp2 += a[i*lda+j] * x[j] + } + aii := complex(real(a[i*lda+i]), 0) + y[i] += tmp1*aii + alpha*tmp2 + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + tmp1 := alpha * x[ix] + var tmp2 complex64 + jx := kx + jy := ky + for j := 0; j < i; j++ { + y[jy] += tmp1 * cmplx.Conj(a[i*lda+j]) + tmp2 += a[i*lda+j] * x[jx] + jx += incX + jy += incY + } + aii := complex(real(a[i*lda+i]), 0) + y[iy] += tmp1*aii + alpha*tmp2 + ix += incX + iy += incY + } + } +} + +// Cher performs the Hermitian rank-one operation +// A += alpha * x * x^H +// where A is an n×n Hermitian matrix, alpha is a real scalar, and x is an n +// element vector. On entry, the imaginary parts of the diagonal elements of A +// are ignored and assumed to be zero, on return they will be set to zero. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cher(uplo blas.Uplo, n int, alpha float32, x []complex64, incX int, a []complex64, lda int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if len(a) < lda*(n-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 { + tmp := complex(alpha*real(x[i]), alpha*imag(x[i])) + aii := real(a[i*lda+i]) + xtmp := real(tmp * cmplx.Conj(x[i])) + a[i*lda+i] = complex(aii+xtmp, 0) + for j := i + 1; j < n; j++ { + a[i*lda+j] += tmp * cmplx.Conj(x[j]) + } + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + } + return + } + + ix := kx + for i := 0; i < n; i++ { + if x[ix] != 0 { + tmp := complex(alpha*real(x[ix]), alpha*imag(x[ix])) + aii := real(a[i*lda+i]) + xtmp := real(tmp * cmplx.Conj(x[ix])) + a[i*lda+i] = complex(aii+xtmp, 0) + jx := ix + incX + for j := i + 1; j < n; j++ { + a[i*lda+j] += tmp * cmplx.Conj(x[jx]) + jx += incX + } + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + ix += incX + } + return + } + + if incX == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 { + tmp := complex(alpha*real(x[i]), alpha*imag(x[i])) + for j := 0; j < i; j++ { + a[i*lda+j] += tmp * cmplx.Conj(x[j]) + } + aii := real(a[i*lda+i]) + xtmp := real(tmp * cmplx.Conj(x[i])) + a[i*lda+i] = complex(aii+xtmp, 0) + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + } + return + } + + ix := kx + for i := 0; i < n; i++ { + if x[ix] != 0 { + tmp := complex(alpha*real(x[ix]), alpha*imag(x[ix])) + jx := kx + for j := 0; j < i; j++ { + a[i*lda+j] += tmp * cmplx.Conj(x[jx]) + jx += incX + } + aii := real(a[i*lda+i]) + xtmp := real(tmp * cmplx.Conj(x[ix])) + a[i*lda+i] = complex(aii+xtmp, 0) + + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + ix += incX + } +} + +// Cher2 performs the Hermitian rank-two operation +// A += alpha * x * y^H + conj(alpha) * y * x^H +// where alpha is a scalar, x and y are n element vectors and A is an n×n +// Hermitian matrix. On entry, the imaginary parts of the diagonal elements are +// ignored and assumed to be zero. On return they will be set to zero. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cher2(uplo blas.Uplo, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, a []complex64, lda int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(a) < lda*(n-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var kx, ky int + var ix, iy int + if incX != 1 || incY != 1 { + if incX < 0 { + kx = (1 - n) * incX + } + if incY < 0 { + ky = (1 - n) * incY + } + ix = kx + iy = ky + } + if uplo == blas.Upper { + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 || y[i] != 0 { + tmp1 := alpha * x[i] + tmp2 := cmplx.Conj(alpha) * y[i] + aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) + a[i*lda+i] = complex(aii, 0) + for j := i + 1; j < n; j++ { + a[i*lda+j] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) + } + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + } + return + } + for i := 0; i < n; i++ { + if x[ix] != 0 || y[iy] != 0 { + tmp1 := alpha * x[ix] + tmp2 := cmplx.Conj(alpha) * y[iy] + aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) + a[i*lda+i] = complex(aii, 0) + jx := ix + incX + jy := iy + incY + for j := i + 1; j < n; j++ { + a[i*lda+j] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) + jx += incX + jy += incY + } + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + ix += incX + iy += incY + } + return + } + + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 || y[i] != 0 { + tmp1 := alpha * x[i] + tmp2 := cmplx.Conj(alpha) * y[i] + for j := 0; j < i; j++ { + a[i*lda+j] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) + } + aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) + a[i*lda+i] = complex(aii, 0) + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + } + return + } + for i := 0; i < n; i++ { + if x[ix] != 0 || y[iy] != 0 { + tmp1 := alpha * x[ix] + tmp2 := cmplx.Conj(alpha) * y[iy] + jx := kx + jy := ky + for j := 0; j < i; j++ { + a[i*lda+j] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) + jx += incX + jy += incY + } + aii := real(a[i*lda+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) + a[i*lda+i] = complex(aii, 0) + } else { + aii := real(a[i*lda+i]) + a[i*lda+i] = complex(aii, 0) + } + ix += incX + iy += incY + } +} + +// Chpmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where alpha and beta are scalars, x and y are vectors, and A is an n×n +// Hermitian matrix in packed form. The imaginary parts of the diagonal +// elements of A are ignored and assumed to be zero. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Chpmv(uplo blas.Uplo, n int, alpha complex64, ap []complex64, x []complex64, incX int, beta complex64, y []complex64, incY int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up the start indices in X and Y. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + var ky int + if incY < 0 { + ky = (1 - n) * incY + } + + // Form y = beta*y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + for i, v := range y[:n] { + y[i] = beta * v + } + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + for i := 0; i < n; i++ { + y[iy] *= beta + iy += incY + } + } + } + } + + if alpha == 0 { + return + } + + // The elements of A are accessed sequentially with one pass through ap. + + var kk int + if uplo == blas.Upper { + // Form y when ap contains the upper triangle. + // Here, kk points to the current diagonal element in ap. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + tmp1 := alpha * x[i] + y[i] += tmp1 * complex(real(ap[kk]), 0) + var tmp2 complex64 + k := kk + 1 + for j := i + 1; j < n; j++ { + y[j] += tmp1 * cmplx.Conj(ap[k]) + tmp2 += ap[k] * x[j] + k++ + } + y[i] += alpha * tmp2 + kk += n - i + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + tmp1 := alpha * x[ix] + y[iy] += tmp1 * complex(real(ap[kk]), 0) + var tmp2 complex64 + jx := ix + jy := iy + for k := kk + 1; k < kk+n-i; k++ { + jx += incX + jy += incY + y[jy] += tmp1 * cmplx.Conj(ap[k]) + tmp2 += ap[k] * x[jx] + } + y[iy] += alpha * tmp2 + ix += incX + iy += incY + kk += n - i + } + } + return + } + + // Form y when ap contains the lower triangle. + // Here, kk points to the beginning of current row in ap. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + tmp1 := alpha * x[i] + var tmp2 complex64 + k := kk + for j := 0; j < i; j++ { + y[j] += tmp1 * cmplx.Conj(ap[k]) + tmp2 += ap[k] * x[j] + k++ + } + aii := complex(real(ap[kk+i]), 0) + y[i] += tmp1*aii + alpha*tmp2 + kk += i + 1 + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + tmp1 := alpha * x[ix] + var tmp2 complex64 + jx := kx + jy := ky + for k := kk; k < kk+i; k++ { + y[jy] += tmp1 * cmplx.Conj(ap[k]) + tmp2 += ap[k] * x[jx] + jx += incX + jy += incY + } + aii := complex(real(ap[kk+i]), 0) + y[iy] += tmp1*aii + alpha*tmp2 + ix += incX + iy += incY + kk += i + 1 + } + } +} + +// Chpr performs the Hermitian rank-1 operation +// A += alpha * x * x^H +// where alpha is a real scalar, x is a vector, and A is an n×n hermitian matrix +// in packed form. On entry, the imaginary parts of the diagonal elements are +// assumed to be zero, and on return they are set to zero. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Chpr(uplo blas.Uplo, n int, alpha float32, x []complex64, incX int, ap []complex64) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through ap. + + var kk int + if uplo == blas.Upper { + // Form A when upper triangle is stored in AP. + // Here, kk points to the current diagonal element in ap. + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if xi != 0 { + aii := real(ap[kk]) + alpha*real(cmplx.Conj(xi)*xi) + ap[kk] = complex(aii, 0) + + tmp := complex(alpha, 0) * xi + a := ap[kk+1 : kk+n-i] + x := x[i+1 : n] + for j, v := range x { + a[j] += tmp * cmplx.Conj(v) + } + } else { + ap[kk] = complex(real(ap[kk]), 0) + } + kk += n - i + } + } else { + ix := kx + for i := 0; i < n; i++ { + xi := x[ix] + if xi != 0 { + aii := real(ap[kk]) + alpha*real(cmplx.Conj(xi)*xi) + ap[kk] = complex(aii, 0) + + tmp := complex(alpha, 0) * xi + jx := ix + incX + a := ap[kk+1 : kk+n-i] + for k := range a { + a[k] += tmp * cmplx.Conj(x[jx]) + jx += incX + } + } else { + ap[kk] = complex(real(ap[kk]), 0) + } + ix += incX + kk += n - i + } + } + return + } + + // Form A when lower triangle is stored in AP. + // Here, kk points to the beginning of current row in ap. + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if xi != 0 { + tmp := complex(alpha, 0) * xi + a := ap[kk : kk+i] + for j, v := range x[:i] { + a[j] += tmp * cmplx.Conj(v) + } + + aii := real(ap[kk+i]) + alpha*real(cmplx.Conj(xi)*xi) + ap[kk+i] = complex(aii, 0) + } else { + ap[kk+i] = complex(real(ap[kk+i]), 0) + } + kk += i + 1 + } + } else { + ix := kx + for i := 0; i < n; i++ { + xi := x[ix] + if xi != 0 { + tmp := complex(alpha, 0) * xi + a := ap[kk : kk+i] + jx := kx + for k := range a { + a[k] += tmp * cmplx.Conj(x[jx]) + jx += incX + } + + aii := real(ap[kk+i]) + alpha*real(cmplx.Conj(xi)*xi) + ap[kk+i] = complex(aii, 0) + } else { + ap[kk+i] = complex(real(ap[kk+i]), 0) + } + ix += incX + kk += i + 1 + } + } +} + +// Chpr2 performs the Hermitian rank-2 operation +// A += alpha * x * y^H + conj(alpha) * y * x^H +// where alpha is a complex scalar, x and y are n element vectors, and A is an +// n×n Hermitian matrix, supplied in packed form. On entry, the imaginary parts +// of the diagonal elements are assumed to be zero, and on return they are set to zero. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Chpr2(uplo blas.Uplo, n int, alpha complex64, x []complex64, incX int, y []complex64, incY int, ap []complex64) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + // Set up start indices in X and Y. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + var ky int + if incY < 0 { + ky = (1 - n) * incY + } + + // The elements of A are accessed sequentially with one pass through ap. + + var kk int + if uplo == blas.Upper { + // Form A when upper triangle is stored in AP. + // Here, kk points to the current diagonal element in ap. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 || y[i] != 0 { + tmp1 := alpha * x[i] + tmp2 := cmplx.Conj(alpha) * y[i] + aii := real(ap[kk]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) + ap[kk] = complex(aii, 0) + k := kk + 1 + for j := i + 1; j < n; j++ { + ap[k] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) + k++ + } + } else { + ap[kk] = complex(real(ap[kk]), 0) + } + kk += n - i + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + if x[ix] != 0 || y[iy] != 0 { + tmp1 := alpha * x[ix] + tmp2 := cmplx.Conj(alpha) * y[iy] + aii := real(ap[kk]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) + ap[kk] = complex(aii, 0) + jx := ix + incX + jy := iy + incY + for k := kk + 1; k < kk+n-i; k++ { + ap[k] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) + jx += incX + jy += incY + } + } else { + ap[kk] = complex(real(ap[kk]), 0) + } + ix += incX + iy += incY + kk += n - i + } + } + return + } + + // Form A when lower triangle is stored in AP. + // Here, kk points to the beginning of current row in ap. + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + if x[i] != 0 || y[i] != 0 { + tmp1 := alpha * x[i] + tmp2 := cmplx.Conj(alpha) * y[i] + k := kk + for j := 0; j < i; j++ { + ap[k] += tmp1*cmplx.Conj(y[j]) + tmp2*cmplx.Conj(x[j]) + k++ + } + aii := real(ap[kk+i]) + real(tmp1*cmplx.Conj(y[i])) + real(tmp2*cmplx.Conj(x[i])) + ap[kk+i] = complex(aii, 0) + } else { + ap[kk+i] = complex(real(ap[kk+i]), 0) + } + kk += i + 1 + } + } else { + ix := kx + iy := ky + for i := 0; i < n; i++ { + if x[ix] != 0 || y[iy] != 0 { + tmp1 := alpha * x[ix] + tmp2 := cmplx.Conj(alpha) * y[iy] + jx := kx + jy := ky + for k := kk; k < kk+i; k++ { + ap[k] += tmp1*cmplx.Conj(y[jy]) + tmp2*cmplx.Conj(x[jx]) + jx += incX + jy += incY + } + aii := real(ap[kk+i]) + real(tmp1*cmplx.Conj(y[iy])) + real(tmp2*cmplx.Conj(x[ix])) + ap[kk+i] = complex(aii, 0) + } else { + ap[kk+i] = complex(real(ap[kk+i]), 0) + } + ix += incX + iy += incY + kk += i + 1 + } + } +} + +// Ctbmv performs one of the matrix-vector operations +// x = A * x if trans = blas.NoTrans +// x = A^T * x if trans = blas.Trans +// x = A^H * x if trans = blas.ConjTrans +// where x is an n element vector and A is an n×n triangular band matrix, with +// (k+1) diagonals. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Ctbmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, k int, a []complex64, lda int, x []complex64, incX int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + switch trans { + case blas.NoTrans: + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if diag == blas.NonUnit { + xi *= a[i*lda] + } + kk := min(k, n-i-1) + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + xi += x[i+j+1] * aij + } + x[i] = xi + } + } else { + ix := kx + for i := 0; i < n; i++ { + xi := x[ix] + if diag == blas.NonUnit { + xi *= a[i*lda] + } + kk := min(k, n-i-1) + jx := ix + incX + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + xi += x[jx] * aij + jx += incX + } + x[ix] = xi + ix += incX + } + } + } else { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + xi *= a[i*lda+k] + } + kk := min(k, i) + for j, aij := range a[i*lda+k-kk : i*lda+k] { + xi += x[i-kk+j] * aij + } + x[i] = xi + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + xi *= a[i*lda+k] + } + kk := min(k, i) + jx := ix - kk*incX + for _, aij := range a[i*lda+k-kk : i*lda+k] { + xi += x[jx] * aij + jx += incX + } + x[ix] = xi + ix -= incX + } + } + } + case blas.Trans: + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + xi := x[i] + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + x[i+j+1] += xi * aij + } + if diag == blas.NonUnit { + x[i] *= a[i*lda] + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + jx := ix + incX + xi := x[ix] + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + x[jx] += xi * aij + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= a[i*lda] + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + kk := min(k, i) + xi := x[i] + for j, aij := range a[i*lda+k-kk : i*lda+k] { + x[i-kk+j] += xi * aij + } + if diag == blas.NonUnit { + x[i] *= a[i*lda+k] + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + kk := min(k, i) + jx := ix - kk*incX + xi := x[ix] + for _, aij := range a[i*lda+k-kk : i*lda+k] { + x[jx] += xi * aij + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= a[i*lda+k] + } + ix += incX + } + } + } + case blas.ConjTrans: + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + xi := x[i] + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + x[i+j+1] += xi * cmplx.Conj(aij) + } + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(a[i*lda]) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + jx := ix + incX + xi := x[ix] + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + x[jx] += xi * cmplx.Conj(aij) + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(a[i*lda]) + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + kk := min(k, i) + xi := x[i] + for j, aij := range a[i*lda+k-kk : i*lda+k] { + x[i-kk+j] += xi * cmplx.Conj(aij) + } + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(a[i*lda+k]) + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + kk := min(k, i) + jx := ix - kk*incX + xi := x[ix] + for _, aij := range a[i*lda+k-kk : i*lda+k] { + x[jx] += xi * cmplx.Conj(aij) + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(a[i*lda+k]) + } + ix += incX + } + } + } + } +} + +// Ctbsv solves one of the systems of equations +// A * x = b if trans == blas.NoTrans +// A^T * x = b if trans == blas.Trans +// A^H * x = b if trans == blas.ConjTrans +// where b and x are n element vectors and A is an n×n triangular band matrix +// with (k+1) diagonals. +// +// On entry, x contains the values of b, and the solution is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Ctbsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, k int, a []complex64, lda int, x []complex64, incX int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + switch trans { + case blas.NoTrans: + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + var sum complex64 + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + sum += x[i+1+j] * aij + } + x[i] -= sum + if diag == blas.NonUnit { + x[i] /= a[i*lda] + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + kk := min(k, n-i-1) + var sum complex64 + jx := ix + incX + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + sum += x[jx] * aij + jx += incX + } + x[ix] -= sum + if diag == blas.NonUnit { + x[ix] /= a[i*lda] + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + kk := min(k, i) + var sum complex64 + for j, aij := range a[i*lda+k-kk : i*lda+k] { + sum += x[i-kk+j] * aij + } + x[i] -= sum + if diag == blas.NonUnit { + x[i] /= a[i*lda+k] + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + kk := min(k, i) + var sum complex64 + jx := ix - kk*incX + for _, aij := range a[i*lda+k-kk : i*lda+k] { + sum += x[jx] * aij + jx += incX + } + x[ix] -= sum + if diag == blas.NonUnit { + x[ix] /= a[i*lda+k] + } + ix += incX + } + } + } + case blas.Trans: + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[i] /= a[i*lda] + } + kk := min(k, n-i-1) + xi := x[i] + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + x[i+1+j] -= xi * aij + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[ix] /= a[i*lda] + } + kk := min(k, n-i-1) + xi := x[ix] + jx := ix + incX + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + x[jx] -= xi * aij + jx += incX + } + ix += incX + } + } + } else { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[i] /= a[i*lda+k] + } + kk := min(k, i) + xi := x[i] + for j, aij := range a[i*lda+k-kk : i*lda+k] { + x[i-kk+j] -= xi * aij + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[ix] /= a[i*lda+k] + } + kk := min(k, i) + xi := x[ix] + jx := ix - kk*incX + for _, aij := range a[i*lda+k-kk : i*lda+k] { + x[jx] -= xi * aij + jx += incX + } + ix -= incX + } + } + } + case blas.ConjTrans: + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[i] /= cmplx.Conj(a[i*lda]) + } + kk := min(k, n-i-1) + xi := x[i] + for j, aij := range a[i*lda+1 : i*lda+kk+1] { + x[i+1+j] -= xi * cmplx.Conj(aij) + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[ix] /= cmplx.Conj(a[i*lda]) + } + kk := min(k, n-i-1) + xi := x[ix] + jx := ix + incX + for _, aij := range a[i*lda+1 : i*lda+kk+1] { + x[jx] -= xi * cmplx.Conj(aij) + jx += incX + } + ix += incX + } + } + } else { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[i] /= cmplx.Conj(a[i*lda+k]) + } + kk := min(k, i) + xi := x[i] + for j, aij := range a[i*lda+k-kk : i*lda+k] { + x[i-kk+j] -= xi * cmplx.Conj(aij) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[ix] /= cmplx.Conj(a[i*lda+k]) + } + kk := min(k, i) + xi := x[ix] + jx := ix - kk*incX + for _, aij := range a[i*lda+k-kk : i*lda+k] { + x[jx] -= xi * cmplx.Conj(aij) + jx += incX + } + ix -= incX + } + } + } + } +} + +// Ctpmv performs one of the matrix-vector operations +// x = A * x if trans = blas.NoTrans +// x = A^T * x if trans = blas.Trans +// x = A^H * x if trans = blas.ConjTrans +// where x is an n element vector and A is an n×n triangular matrix, supplied in +// packed form. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Ctpmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, ap []complex64, x []complex64, incX int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through A. + + if trans == blas.NoTrans { + // Form x = A*x. + if uplo == blas.Upper { + // kk points to the current diagonal element in ap. + kk := 0 + if incX == 1 { + x = x[:n] + for i := range x { + if diag == blas.NonUnit { + x[i] *= ap[kk] + } + if n-i-1 > 0 { + x[i] += c64.DotuUnitary(ap[kk+1:kk+n-i], x[i+1:]) + } + kk += n - i + } + } else { + ix := kx + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[ix] *= ap[kk] + } + if n-i-1 > 0 { + x[ix] += c64.DotuInc(ap[kk+1:kk+n-i], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) + } + ix += incX + kk += n - i + } + } + } else { + // kk points to the beginning of current row in ap. + kk := n*(n+1)/2 - n + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[i] *= ap[kk+i] + } + if i > 0 { + x[i] += c64.DotuUnitary(ap[kk:kk+i], x[:i]) + } + kk -= i + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[ix] *= ap[kk+i] + } + if i > 0 { + x[ix] += c64.DotuInc(ap[kk:kk+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + } + ix -= incX + kk -= i + } + } + } + return + } + + if trans == blas.Trans { + // Form x = A^T*x. + if uplo == blas.Upper { + // kk points to the current diagonal element in ap. + kk := n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + x[i] *= ap[kk] + } + if n-i-1 > 0 { + c64.AxpyUnitary(xi, ap[kk+1:kk+n-i], x[i+1:n]) + } + kk -= n - i + 1 + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + x[ix] *= ap[kk] + } + if n-i-1 > 0 { + c64.AxpyInc(xi, ap[kk+1:kk+n-i], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) + } + ix -= incX + kk -= n - i + 1 + } + } + } else { + // kk points to the beginning of current row in ap. + kk := 0 + if incX == 1 { + x = x[:n] + for i := range x { + if i > 0 { + c64.AxpyUnitary(x[i], ap[kk:kk+i], x[:i]) + } + if diag == blas.NonUnit { + x[i] *= ap[kk+i] + } + kk += i + 1 + } + } else { + ix := kx + for i := 0; i < n; i++ { + if i > 0 { + c64.AxpyInc(x[ix], ap[kk:kk+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + } + if diag == blas.NonUnit { + x[ix] *= ap[kk+i] + } + ix += incX + kk += i + 1 + } + } + } + return + } + + // Form x = A^H*x. + if uplo == blas.Upper { + // kk points to the current diagonal element in ap. + kk := n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(ap[kk]) + } + k := kk + 1 + for j := i + 1; j < n; j++ { + x[j] += xi * cmplx.Conj(ap[k]) + k++ + } + kk -= n - i + 1 + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(ap[kk]) + } + jx := ix + incX + k := kk + 1 + for j := i + 1; j < n; j++ { + x[jx] += xi * cmplx.Conj(ap[k]) + jx += incX + k++ + } + ix -= incX + kk -= n - i + 1 + } + } + } else { + // kk points to the beginning of current row in ap. + kk := 0 + if incX == 1 { + x = x[:n] + for i, xi := range x { + for j := 0; j < i; j++ { + x[j] += xi * cmplx.Conj(ap[kk+j]) + } + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(ap[kk+i]) + } + kk += i + 1 + } + } else { + ix := kx + for i := 0; i < n; i++ { + xi := x[ix] + jx := kx + for j := 0; j < i; j++ { + x[jx] += xi * cmplx.Conj(ap[kk+j]) + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(ap[kk+i]) + } + ix += incX + kk += i + 1 + } + } + } +} + +// Ctpsv solves one of the systems of equations +// A * x = b if trans == blas.NoTrans +// A^T * x = b if trans == blas.Trans +// A^H * x = b if trans == blas.ConjTrans +// where b and x are n element vectors and A is an n×n triangular matrix in +// packed form. +// +// On entry, x contains the values of b, and the solution is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Ctpsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, ap []complex64, x []complex64, incX int) { + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through ap. + + if trans == blas.NoTrans { + // Form x = inv(A)*x. + if uplo == blas.Upper { + kk := n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + aii := ap[kk] + if n-i-1 > 0 { + x[i] -= c64.DotuUnitary(x[i+1:n], ap[kk+1:kk+n-i]) + } + if diag == blas.NonUnit { + x[i] /= aii + } + kk -= n - i + 1 + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + aii := ap[kk] + if n-i-1 > 0 { + x[ix] -= c64.DotuInc(x, ap[kk+1:kk+n-i], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) + } + if diag == blas.NonUnit { + x[ix] /= aii + } + ix -= incX + kk -= n - i + 1 + } + } + } else { + kk := 0 + if incX == 1 { + for i := 0; i < n; i++ { + if i > 0 { + x[i] -= c64.DotuUnitary(x[:i], ap[kk:kk+i]) + } + if diag == blas.NonUnit { + x[i] /= ap[kk+i] + } + kk += i + 1 + } + } else { + ix := kx + for i := 0; i < n; i++ { + if i > 0 { + x[ix] -= c64.DotuInc(x, ap[kk:kk+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) + } + if diag == blas.NonUnit { + x[ix] /= ap[kk+i] + } + ix += incX + kk += i + 1 + } + } + } + return + } + + if trans == blas.Trans { + // Form x = inv(A^T)*x. + if uplo == blas.Upper { + kk := 0 + if incX == 1 { + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[j] /= ap[kk] + } + if n-j-1 > 0 { + c64.AxpyUnitary(-x[j], ap[kk+1:kk+n-j], x[j+1:n]) + } + kk += n - j + } + } else { + jx := kx + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[jx] /= ap[kk] + } + if n-j-1 > 0 { + c64.AxpyInc(-x[jx], ap[kk+1:kk+n-j], x, uintptr(n-j-1), 1, uintptr(incX), 0, uintptr(jx+incX)) + } + jx += incX + kk += n - j + } + } + } else { + kk := n*(n+1)/2 - n + if incX == 1 { + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[j] /= ap[kk+j] + } + if j > 0 { + c64.AxpyUnitary(-x[j], ap[kk:kk+j], x[:j]) + } + kk -= j + } + } else { + jx := kx + (n-1)*incX + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[jx] /= ap[kk+j] + } + if j > 0 { + c64.AxpyInc(-x[jx], ap[kk:kk+j], x, uintptr(j), 1, uintptr(incX), 0, uintptr(kx)) + } + jx -= incX + kk -= j + } + } + } + return + } + + // Form x = inv(A^H)*x. + if uplo == blas.Upper { + kk := 0 + if incX == 1 { + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[j] /= cmplx.Conj(ap[kk]) + } + xj := x[j] + k := kk + 1 + for i := j + 1; i < n; i++ { + x[i] -= xj * cmplx.Conj(ap[k]) + k++ + } + kk += n - j + } + } else { + jx := kx + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[jx] /= cmplx.Conj(ap[kk]) + } + xj := x[jx] + ix := jx + incX + k := kk + 1 + for i := j + 1; i < n; i++ { + x[ix] -= xj * cmplx.Conj(ap[k]) + ix += incX + k++ + } + jx += incX + kk += n - j + } + } + } else { + kk := n*(n+1)/2 - n + if incX == 1 { + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[j] /= cmplx.Conj(ap[kk+j]) + } + xj := x[j] + for i := 0; i < j; i++ { + x[i] -= xj * cmplx.Conj(ap[kk+i]) + } + kk -= j + } + } else { + jx := kx + (n-1)*incX + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[jx] /= cmplx.Conj(ap[kk+j]) + } + xj := x[jx] + ix := kx + for i := 0; i < j; i++ { + x[ix] -= xj * cmplx.Conj(ap[kk+i]) + ix += incX + } + jx -= incX + kk -= j + } + } + } +} + +// Ctrmv performs one of the matrix-vector operations +// x = A * x if trans = blas.NoTrans +// x = A^T * x if trans = blas.Trans +// x = A^H * x if trans = blas.ConjTrans +// where x is a vector, and A is an n×n triangular matrix. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Ctrmv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, a []complex64, lda int, x []complex64, incX int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through A. + + if trans == blas.NoTrans { + // Form x = A*x. + if uplo == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[i] *= a[i*lda+i] + } + if n-i-1 > 0 { + x[i] += c64.DotuUnitary(a[i*lda+i+1:i*lda+n], x[i+1:n]) + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if diag == blas.NonUnit { + x[ix] *= a[i*lda+i] + } + if n-i-1 > 0 { + x[ix] += c64.DotuInc(a[i*lda+i+1:i*lda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) + } + ix += incX + } + } + } else { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[i] *= a[i*lda+i] + } + if i > 0 { + x[i] += c64.DotuUnitary(a[i*lda:i*lda+i], x[:i]) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if diag == blas.NonUnit { + x[ix] *= a[i*lda+i] + } + if i > 0 { + x[ix] += c64.DotuInc(a[i*lda:i*lda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + } + ix -= incX + } + } + } + return + } + + if trans == blas.Trans { + // Form x = A^T*x. + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + x[i] *= a[i*lda+i] + } + if n-i-1 > 0 { + c64.AxpyUnitary(xi, a[i*lda+i+1:i*lda+n], x[i+1:n]) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + x[ix] *= a[i*lda+i] + } + if n-i-1 > 0 { + c64.AxpyInc(xi, a[i*lda+i+1:i*lda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(ix+incX)) + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + if i > 0 { + c64.AxpyUnitary(x[i], a[i*lda:i*lda+i], x[:i]) + } + if diag == blas.NonUnit { + x[i] *= a[i*lda+i] + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if i > 0 { + c64.AxpyInc(x[ix], a[i*lda:i*lda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + } + if diag == blas.NonUnit { + x[ix] *= a[i*lda+i] + } + ix += incX + } + } + } + return + } + + // Form x = A^H*x. + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + xi := x[i] + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(a[i*lda+i]) + } + for j := i + 1; j < n; j++ { + x[j] += xi * cmplx.Conj(a[i*lda+j]) + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + xi := x[ix] + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(a[i*lda+i]) + } + jx := ix + incX + for j := i + 1; j < n; j++ { + x[jx] += xi * cmplx.Conj(a[i*lda+j]) + jx += incX + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + for j := 0; j < i; j++ { + x[j] += x[i] * cmplx.Conj(a[i*lda+j]) + } + if diag == blas.NonUnit { + x[i] *= cmplx.Conj(a[i*lda+i]) + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + jx := kx + for j := 0; j < i; j++ { + x[jx] += x[ix] * cmplx.Conj(a[i*lda+j]) + jx += incX + } + if diag == blas.NonUnit { + x[ix] *= cmplx.Conj(a[i*lda+i]) + } + ix += incX + } + } + } +} + +// Ctrsv solves one of the systems of equations +// A * x = b if trans == blas.NoTrans +// A^T * x = b if trans == blas.Trans +// A^H * x = b if trans == blas.ConjTrans +// where b and x are n element vectors and A is an n×n triangular matrix. +// +// On entry, x contains the values of b, and the solution is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Ctrsv(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n int, a []complex64, lda int, x []complex64, incX int) { + switch trans { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch uplo { + default: + panic(badUplo) + case blas.Upper, blas.Lower: + } + switch diag { + default: + panic(badDiag) + case blas.NonUnit, blas.Unit: + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + // Set up start index in X. + var kx int + if incX < 0 { + kx = (1 - n) * incX + } + + // The elements of A are accessed sequentially with one pass through A. + + if trans == blas.NoTrans { + // Form x = inv(A)*x. + if uplo == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + aii := a[i*lda+i] + if n-i-1 > 0 { + x[i] -= c64.DotuUnitary(x[i+1:n], a[i*lda+i+1:i*lda+n]) + } + if diag == blas.NonUnit { + x[i] /= aii + } + } + } else { + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + aii := a[i*lda+i] + if n-i-1 > 0 { + x[ix] -= c64.DotuInc(x, a[i*lda+i+1:i*lda+n], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) + } + if diag == blas.NonUnit { + x[ix] /= aii + } + ix -= incX + } + } + } else { + if incX == 1 { + for i := 0; i < n; i++ { + if i > 0 { + x[i] -= c64.DotuUnitary(x[:i], a[i*lda:i*lda+i]) + } + if diag == blas.NonUnit { + x[i] /= a[i*lda+i] + } + } + } else { + ix := kx + for i := 0; i < n; i++ { + if i > 0 { + x[ix] -= c64.DotuInc(x, a[i*lda:i*lda+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) + } + if diag == blas.NonUnit { + x[ix] /= a[i*lda+i] + } + ix += incX + } + } + } + return + } + + if trans == blas.Trans { + // Form x = inv(A^T)*x. + if uplo == blas.Upper { + if incX == 1 { + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[j] /= a[j*lda+j] + } + if n-j-1 > 0 { + c64.AxpyUnitary(-x[j], a[j*lda+j+1:j*lda+n], x[j+1:n]) + } + } + } else { + jx := kx + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[jx] /= a[j*lda+j] + } + if n-j-1 > 0 { + c64.AxpyInc(-x[jx], a[j*lda+j+1:j*lda+n], x, uintptr(n-j-1), 1, uintptr(incX), 0, uintptr(jx+incX)) + } + jx += incX + } + } + } else { + if incX == 1 { + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[j] /= a[j*lda+j] + } + xj := x[j] + if j > 0 { + c64.AxpyUnitary(-xj, a[j*lda:j*lda+j], x[:j]) + } + } + } else { + jx := kx + (n-1)*incX + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[jx] /= a[j*lda+j] + } + if j > 0 { + c64.AxpyInc(-x[jx], a[j*lda:j*lda+j], x, uintptr(j), 1, uintptr(incX), 0, uintptr(kx)) + } + jx -= incX + } + } + } + return + } + + // Form x = inv(A^H)*x. + if uplo == blas.Upper { + if incX == 1 { + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[j] /= cmplx.Conj(a[j*lda+j]) + } + xj := x[j] + for i := j + 1; i < n; i++ { + x[i] -= xj * cmplx.Conj(a[j*lda+i]) + } + } + } else { + jx := kx + for j := 0; j < n; j++ { + if diag == blas.NonUnit { + x[jx] /= cmplx.Conj(a[j*lda+j]) + } + xj := x[jx] + ix := jx + incX + for i := j + 1; i < n; i++ { + x[ix] -= xj * cmplx.Conj(a[j*lda+i]) + ix += incX + } + jx += incX + } + } + } else { + if incX == 1 { + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[j] /= cmplx.Conj(a[j*lda+j]) + } + xj := x[j] + for i := 0; i < j; i++ { + x[i] -= xj * cmplx.Conj(a[j*lda+i]) + } + } + } else { + jx := kx + (n-1)*incX + for j := n - 1; j >= 0; j-- { + if diag == blas.NonUnit { + x[jx] /= cmplx.Conj(a[j*lda+j]) + } + xj := x[jx] + ix := kx + for i := 0; i < j; i++ { + x[ix] -= xj * cmplx.Conj(a[j*lda+i]) + ix += incX + } + jx -= incX + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level2float32.go b/vendor/gonum.org/v1/gonum/blas/gonum/level2float32.go new file mode 100644 index 0000000000..08e1927f79 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level2float32.go @@ -0,0 +1,2296 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f32" +) + +var _ blas.Float32Level2 = Implementation{} + +// Sger performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sger(m, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32, lda int) { + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (m-1)*incX) || (incX < 0 && len(x) <= (1-m)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(a) < lda*(m-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + f32.Ger(uintptr(m), uintptr(n), + alpha, + x, uintptr(incX), + y, uintptr(incY), + a, uintptr(lda)) +} + +// Sgbmv performs one of the matrix-vector operations +// y = alpha * A * x + beta * y if tA == blas.NoTrans +// y = alpha * A^T * x + beta * y if tA == blas.Trans or blas.ConjTrans +// where A is an m×n band matrix with kL sub-diagonals and kU super-diagonals, +// x and y are vectors, and alpha and beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sgbmv(tA blas.Transpose, m, n, kL, kU int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) { + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if kL < 0 { + panic(kLLT0) + } + if kU < 0 { + panic(kULT0) + } + if lda < kL+kU+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(min(m, n+kL)-1)+kL+kU+1 { + panic(shortA) + } + lenX := m + lenY := n + if tA == blas.NoTrans { + lenX = n + lenY = m + } + if (incX > 0 && len(x) <= (lenX-1)*incX) || (incX < 0 && len(x) <= (1-lenX)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (lenY-1)*incY) || (incY < 0 && len(y) <= (1-lenY)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + var kx, ky int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if incY < 0 { + ky = -(lenY - 1) * incY + } + + // Form y = beta * y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:lenY] { + y[i] = 0 + } + } else { + f32.ScalUnitary(beta, y[:lenY]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < lenY; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + f32.ScalInc(beta, y, uintptr(lenY), uintptr(incY)) + } else { + f32.ScalInc(beta, y, uintptr(lenY), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + // i and j are indices of the compacted banded matrix. + // off is the offset into the dense matrix (off + j = densej) + nCol := kU + 1 + kL + if tA == blas.NoTrans { + iy := ky + if incX == 1 { + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + xtmp := x[off : off+u-l] + var sum float32 + for j, v := range atmp { + sum += xtmp[j] * v + } + y[iy] += sum * alpha + iy += incY + } + return + } + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + jx := kx + var sum float32 + for _, v := range atmp { + sum += x[off*incX+jx] * v + jx += incX + } + y[iy] += sum * alpha + iy += incY + } + return + } + if incX == 1 { + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + tmp := alpha * x[i] + jy := ky + for _, v := range atmp { + y[jy+off*incY] += tmp * v + jy += incY + } + } + return + } + ix := kx + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + tmp := alpha * x[ix] + jy := ky + for _, v := range atmp { + y[jy+off*incY] += tmp * v + jy += incY + } + ix += incX + } +} + +// Strmv performs one of the matrix-vector operations +// x = A * x if tA == blas.NoTrans +// x = A^T * x if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix, and x is a vector. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Strmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float32, lda int, x []float32, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + nonUnit := d != blas.Unit + if n == 1 { + if nonUnit { + x[0] *= a[0] + } + return + } + var kx int + if incX <= 0 { + kx = -(n - 1) * incX + } + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + ilda := i * lda + var tmp float32 + if nonUnit { + tmp = a[ilda+i] * x[i] + } else { + tmp = x[i] + } + x[i] = tmp + f32.DotUnitary(a[ilda+i+1:ilda+n], x[i+1:n]) + } + return + } + ix := kx + for i := 0; i < n; i++ { + ilda := i * lda + var tmp float32 + if nonUnit { + tmp = a[ilda+i] * x[ix] + } else { + tmp = x[ix] + } + x[ix] = tmp + f32.DotInc(x, a[ilda+i+1:ilda+n], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + ilda := i * lda + var tmp float32 + if nonUnit { + tmp += a[ilda+i] * x[i] + } else { + tmp = x[i] + } + x[i] = tmp + f32.DotUnitary(a[ilda:ilda+i], x[:i]) + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + ilda := i * lda + var tmp float32 + if nonUnit { + tmp = a[ilda+i] * x[ix] + } else { + tmp = x[ix] + } + x[ix] = tmp + f32.DotInc(x, a[ilda:ilda+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) + ix -= incX + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + ilda := i * lda + xi := x[i] + f32.AxpyUnitary(xi, a[ilda+i+1:ilda+n], x[i+1:n]) + if nonUnit { + x[i] *= a[ilda+i] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + ilda := i * lda + xi := x[ix] + f32.AxpyInc(xi, a[ilda+i+1:ilda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(kx+(i+1)*incX)) + if nonUnit { + x[ix] *= a[ilda+i] + } + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + ilda := i * lda + xi := x[i] + f32.AxpyUnitary(xi, a[ilda:ilda+i], x[:i]) + if nonUnit { + x[i] *= a[i*lda+i] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + ilda := i * lda + xi := x[ix] + f32.AxpyInc(xi, a[ilda:ilda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + if nonUnit { + x[ix] *= a[ilda+i] + } + ix += incX + } +} + +// Strsv solves one of the systems of equations +// A * x = b if tA == blas.NoTrans +// A^T * x = b if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix, and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Strsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float32, lda int, x []float32, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + if n == 1 { + if d == blas.NonUnit { + x[0] /= a[0] + } + return + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + nonUnit := d == blas.NonUnit + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + var sum float32 + atmp := a[i*lda+i+1 : i*lda+n] + for j, v := range atmp { + jv := i + j + 1 + sum += x[jv] * v + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda+i] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + var sum float32 + jx := ix + incX + atmp := a[i*lda+i+1 : i*lda+n] + for _, v := range atmp { + sum += x[jx] * v + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda+i] + } + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + var sum float32 + atmp := a[i*lda : i*lda+i] + for j, v := range atmp { + sum += x[j] * v + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda+i] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + var sum float32 + atmp := a[i*lda : i*lda+i] + for _, v := range atmp { + sum += x[jx] * v + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda+i] + } + ix += incX + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if nonUnit { + x[i] /= a[i*lda+i] + } + xi := x[i] + atmp := a[i*lda+i+1 : i*lda+n] + for j, v := range atmp { + jv := j + i + 1 + x[jv] -= v * xi + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + if nonUnit { + x[ix] /= a[i*lda+i] + } + xi := x[ix] + jx := kx + (i+1)*incX + atmp := a[i*lda+i+1 : i*lda+n] + for _, v := range atmp { + x[jx] -= v * xi + jx += incX + } + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[i] /= a[i*lda+i] + } + xi := x[i] + atmp := a[i*lda : i*lda+i] + for j, v := range atmp { + x[j] -= v * xi + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[ix] /= a[i*lda+i] + } + xi := x[ix] + jx := kx + atmp := a[i*lda : i*lda+i] + for _, v := range atmp { + x[jx] -= v * xi + jx += incX + } + ix -= incX + } +} + +// Ssymv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where A is an n×n symmetric matrix, x and y are vectors, and alpha and +// beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssymv(ul blas.Uplo, n int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up start points + var kx, ky int + if incX < 0 { + kx = -(n - 1) * incX + } + if incY < 0 { + ky = -(n - 1) * incY + } + + // Form y = beta * y + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + f32.ScalUnitary(beta, y[:n]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + f32.ScalInc(beta, y, uintptr(n), uintptr(incY)) + } else { + f32.ScalInc(beta, y, uintptr(n), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + if n == 1 { + y[0] += alpha * a[0] * x[0] + return + } + + if ul == blas.Upper { + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + xv := x[i] * alpha + sum := x[i] * a[i*lda+i] + jy := ky + (i+1)*incY + atmp := a[i*lda+i+1 : i*lda+n] + for j, v := range atmp { + jp := j + i + 1 + sum += x[jp] * v + y[jy] += xv * v + jy += incY + } + y[iy] += alpha * sum + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + xv := x[ix] * alpha + sum := x[ix] * a[i*lda+i] + jx := kx + (i+1)*incX + jy := ky + (i+1)*incY + atmp := a[i*lda+i+1 : i*lda+n] + for _, v := range atmp { + sum += x[jx] * v + y[jy] += xv * v + jx += incX + jy += incY + } + y[iy] += alpha * sum + ix += incX + iy += incY + } + return + } + // Cases where a is lower triangular. + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + jy := ky + xv := alpha * x[i] + atmp := a[i*lda : i*lda+i] + var sum float32 + for j, v := range atmp { + sum += x[j] * v + y[jy] += xv * v + jy += incY + } + sum += x[i] * a[i*lda+i] + sum *= alpha + y[iy] += sum + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + jy := ky + xv := alpha * x[ix] + atmp := a[i*lda : i*lda+i] + var sum float32 + for _, v := range atmp { + sum += x[jx] * v + y[jy] += xv * v + jx += incX + jy += incY + } + sum += x[ix] * a[i*lda+i] + sum *= alpha + y[iy] += sum + ix += incX + iy += incY + } +} + +// Stbmv performs one of the matrix-vector operations +// x = A * x if tA == blas.NoTrans +// x = A^T * x if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular band matrix with k+1 diagonals, and x is a vector. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Stbmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float32, lda int, x []float32, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + + nonunit := d != blas.Unit + + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + u := min(1+k, n-i) + var sum float32 + atmp := a[i*lda:] + xtmp := x[i:] + for j := 1; j < u; j++ { + sum += xtmp[j] * atmp[j] + } + if nonunit { + sum += xtmp[0] * atmp[0] + } else { + sum += xtmp[0] + } + x[i] = sum + } + return + } + ix := kx + for i := 0; i < n; i++ { + u := min(1+k, n-i) + var sum float32 + atmp := a[i*lda:] + jx := incX + for j := 1; j < u; j++ { + sum += x[ix+jx] * atmp[j] + jx += incX + } + if nonunit { + sum += x[ix] * atmp[0] + } else { + sum += x[ix] + } + x[ix] = sum + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + l := max(0, k-i) + atmp := a[i*lda:] + var sum float32 + for j := l; j < k; j++ { + sum += x[i-k+j] * atmp[j] + } + if nonunit { + sum += x[i] * atmp[k] + } else { + sum += x[i] + } + x[i] = sum + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + l := max(0, k-i) + atmp := a[i*lda:] + var sum float32 + jx := l * incX + for j := l; j < k; j++ { + sum += x[ix-k*incX+jx] * atmp[j] + jx += incX + } + if nonunit { + sum += x[ix] * atmp[k] + } else { + sum += x[ix] + } + x[ix] = sum + ix -= incX + } + return + } + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + u := k + 1 + if i < u { + u = i + 1 + } + var sum float32 + for j := 1; j < u; j++ { + sum += x[i-j] * a[(i-j)*lda+j] + } + if nonunit { + sum += x[i] * a[i*lda] + } else { + sum += x[i] + } + x[i] = sum + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + u := k + 1 + if i < u { + u = i + 1 + } + var sum float32 + jx := incX + for j := 1; j < u; j++ { + sum += x[ix-jx] * a[(i-j)*lda+j] + jx += incX + } + if nonunit { + sum += x[ix] * a[i*lda] + } else { + sum += x[ix] + } + x[ix] = sum + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + u := k + if i+k >= n { + u = n - i - 1 + } + var sum float32 + for j := 0; j < u; j++ { + sum += x[i+j+1] * a[(i+j+1)*lda+k-j-1] + } + if nonunit { + sum += x[i] * a[i*lda+k] + } else { + sum += x[i] + } + x[i] = sum + } + return + } + ix := kx + for i := 0; i < n; i++ { + u := k + if i+k >= n { + u = n - i - 1 + } + var ( + sum float32 + jx int + ) + for j := 0; j < u; j++ { + sum += x[ix+jx+incX] * a[(i+j+1)*lda+k-j-1] + jx += incX + } + if nonunit { + sum += x[ix] * a[i*lda+k] + } else { + sum += x[ix] + } + x[ix] = sum + ix += incX + } +} + +// Stpmv performs one of the matrix-vector operations +// x = A * x if tA == blas.NoTrans +// x = A^T * x if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix in packed format, and x is a vector. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Stpmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float32, x []float32, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + + nonUnit := d == blas.NonUnit + var offset int // Offset is the index of (i,i) + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if nonUnit { + xi *= ap[offset] + } + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + for j, v := range atmp { + xi += v * xtmp[j] + } + x[i] = xi + offset += n - i + } + return + } + ix := kx + for i := 0; i < n; i++ { + xix := x[ix] + if nonUnit { + xix *= ap[offset] + } + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + for _, v := range atmp { + xix += v * x[jx] + jx += incX + } + x[ix] = xix + offset += n - i + ix += incX + } + return + } + if incX == 1 { + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xi := x[i] + if nonUnit { + xi *= ap[offset] + } + atmp := ap[offset-i : offset] + for j, v := range atmp { + xi += v * x[j] + } + x[i] = xi + offset -= i + 1 + } + return + } + ix := kx + (n-1)*incX + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xix := x[ix] + if nonUnit { + xix *= ap[offset] + } + atmp := ap[offset-i : offset] + jx := kx + for _, v := range atmp { + xix += v * x[jx] + jx += incX + } + x[ix] = xix + offset -= i + 1 + ix -= incX + } + return + } + // Cases where ap is transposed. + if ul == blas.Upper { + if incX == 1 { + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xi := x[i] + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + for j, v := range atmp { + xtmp[j] += v * xi + } + if nonUnit { + x[i] *= ap[offset] + } + offset -= n - i + 1 + } + return + } + ix := kx + (n-1)*incX + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xix := x[ix] + jx := kx + (i+1)*incX + atmp := ap[offset+1 : offset+n-i] + for _, v := range atmp { + x[jx] += v * xix + jx += incX + } + if nonUnit { + x[ix] *= ap[offset] + } + offset -= n - i + 1 + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + atmp := ap[offset-i : offset] + for j, v := range atmp { + x[j] += v * xi + } + if nonUnit { + x[i] *= ap[offset] + } + offset += i + 2 + } + return + } + ix := kx + for i := 0; i < n; i++ { + xix := x[ix] + jx := kx + atmp := ap[offset-i : offset] + for _, v := range atmp { + x[jx] += v * xix + jx += incX + } + if nonUnit { + x[ix] *= ap[offset] + } + ix += incX + offset += i + 2 + } +} + +// Stbsv solves one of the systems of equations +// A * x = b if tA == blas.NoTrans +// A^T * x = b if tA == blas.Trans or tA == blas.ConjTrans +// where A is an n×n triangular band matrix with k+1 diagonals, +// and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Stbsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float32, lda int, x []float32, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + nonUnit := d == blas.NonUnit + // Form x = A^-1 x. + // Several cases below use subslices for speed improvement. + // The incX != 1 cases usually do not because incX may be negative. + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + bands := k + if i+bands >= n { + bands = n - i - 1 + } + atmp := a[i*lda+1:] + xtmp := x[i+1 : i+bands+1] + var sum float32 + for j, v := range xtmp { + sum += v * atmp[j] + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + max := k + 1 + if i+max > n { + max = n - i + } + atmp := a[i*lda:] + var ( + jx int + sum float32 + ) + for j := 1; j < max; j++ { + jx += incX + sum += x[ix+jx] * atmp[j] + } + x[ix] -= sum + if nonUnit { + x[ix] /= atmp[0] + } + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + atmp := a[i*lda+k-bands:] + xtmp := x[i-bands : i] + var sum float32 + for j, v := range xtmp { + sum += v * atmp[j] + } + x[i] -= sum + if nonUnit { + x[i] /= atmp[bands] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + atmp := a[i*lda+k-bands:] + var ( + sum float32 + jx int + ) + for j := 0; j < bands; j++ { + sum += x[ix-bands*incX+jx] * atmp[j] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= atmp[bands] + } + ix += incX + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + var sum float32 + for j := 0; j < bands; j++ { + sum += x[i-bands+j] * a[(i-bands+j)*lda+bands-j] + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + var ( + sum float32 + jx int + ) + for j := 0; j < bands; j++ { + sum += x[ix-bands*incX+jx] * a[(i-bands+j)*lda+bands-j] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda] + } + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + bands := k + if i+bands >= n { + bands = n - i - 1 + } + var sum float32 + xtmp := x[i+1 : i+1+bands] + for j, v := range xtmp { + sum += v * a[(i+j+1)*lda+k-j-1] + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda+k] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + bands := k + if i+bands >= n { + bands = n - i - 1 + } + var ( + sum float32 + jx int + ) + for j := 0; j < bands; j++ { + sum += x[ix+jx+incX] * a[(i+j+1)*lda+k-j-1] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda+k] + } + ix -= incX + } +} + +// Ssbmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where A is an n×n symmetric band matrix with k super-diagonals, x and y are +// vectors, and alpha and beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssbmv(ul blas.Uplo, n, k int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up indexes + lenX := n + lenY := n + var kx, ky int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if incY < 0 { + ky = -(lenY - 1) * incY + } + + // Form y = beta * y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + f32.ScalUnitary(beta, y[:n]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + f32.ScalInc(beta, y, uintptr(n), uintptr(incY)) + } else { + f32.ScalInc(beta, y, uintptr(n), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + if ul == blas.Upper { + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + atmp := a[i*lda:] + tmp := alpha * x[i] + sum := tmp * atmp[0] + u := min(k, n-i-1) + jy := incY + for j := 1; j <= u; j++ { + v := atmp[j] + sum += alpha * x[i+j] * v + y[iy+jy] += tmp * v + jy += incY + } + y[iy] += sum + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + atmp := a[i*lda:] + tmp := alpha * x[ix] + sum := tmp * atmp[0] + u := min(k, n-i-1) + jx := incX + jy := incY + for j := 1; j <= u; j++ { + v := atmp[j] + sum += alpha * x[ix+jx] * v + y[iy+jy] += tmp * v + jx += incX + jy += incY + } + y[iy] += sum + ix += incX + iy += incY + } + return + } + + // Casses where a has bands below the diagonal. + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + l := max(0, k-i) + tmp := alpha * x[i] + jy := l * incY + atmp := a[i*lda:] + for j := l; j < k; j++ { + v := atmp[j] + y[iy] += alpha * v * x[i-k+j] + y[iy-k*incY+jy] += tmp * v + jy += incY + } + y[iy] += tmp * atmp[k] + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + l := max(0, k-i) + tmp := alpha * x[ix] + jx := l * incX + jy := l * incY + atmp := a[i*lda:] + for j := l; j < k; j++ { + v := atmp[j] + y[iy] += alpha * v * x[ix-k*incX+jx] + y[iy-k*incY+jy] += tmp * v + jx += incX + jy += incY + } + y[iy] += tmp * atmp[k] + ix += incX + iy += incY + } +} + +// Ssyr performs the symmetric rank-one update +// A += alpha * x * x^T +// where A is an n×n symmetric matrix, and x is a vector. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssyr(ul blas.Uplo, n int, alpha float32, x []float32, incX int, a []float32, lda int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if len(a) < lda*(n-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + lenX := n + var kx int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + tmp := x[i] * alpha + if tmp != 0 { + atmp := a[i*lda+i : i*lda+n] + xtmp := x[i:n] + for j, v := range xtmp { + atmp[j] += v * tmp + } + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + tmp := x[ix] * alpha + if tmp != 0 { + jx := ix + atmp := a[i*lda:] + for j := i; j < n; j++ { + atmp[j] += x[jx] * tmp + jx += incX + } + } + ix += incX + } + return + } + // Cases where a is lower triangular. + if incX == 1 { + for i := 0; i < n; i++ { + tmp := x[i] * alpha + if tmp != 0 { + atmp := a[i*lda:] + xtmp := x[:i+1] + for j, v := range xtmp { + atmp[j] += tmp * v + } + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + tmp := x[ix] * alpha + if tmp != 0 { + atmp := a[i*lda:] + jx := kx + for j := 0; j < i+1; j++ { + atmp[j] += tmp * x[jx] + jx += incX + } + } + ix += incX + } +} + +// Ssyr2 performs the symmetric rank-two update +// A += alpha * x * y^T + alpha * y * x^T +// where A is an n×n symmetric matrix, x and y are vectors, and alpha is a scalar. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssyr2(ul blas.Uplo, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32, lda int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(a) < lda*(n-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var ky, kx int + if incY < 0 { + ky = -(n - 1) * incY + } + if incX < 0 { + kx = -(n - 1) * incX + } + if ul == blas.Upper { + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + xi := x[i] + yi := y[i] + atmp := a[i*lda:] + for j := i; j < n; j++ { + atmp[j] += alpha * (xi*y[j] + x[j]*yi) + } + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + i*incX + jy := ky + i*incY + xi := x[ix] + yi := y[iy] + atmp := a[i*lda:] + for j := i; j < n; j++ { + atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) + jx += incX + jy += incY + } + ix += incX + iy += incY + } + return + } + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + xi := x[i] + yi := y[i] + atmp := a[i*lda:] + for j := 0; j <= i; j++ { + atmp[j] += alpha * (xi*y[j] + x[j]*yi) + } + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + jy := ky + xi := x[ix] + yi := y[iy] + atmp := a[i*lda:] + for j := 0; j <= i; j++ { + atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) + jx += incX + jy += incY + } + ix += incX + iy += incY + } +} + +// Stpsv solves one of the systems of equations +// A * x = b if tA == blas.NoTrans +// A^T * x = b if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix in packed format, and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Stpsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float32, x []float32, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + + nonUnit := d == blas.NonUnit + var offset int // Offset is the index of (i,i) + if tA == blas.NoTrans { + if ul == blas.Upper { + offset = n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + var sum float32 + for j, v := range atmp { + sum += v * xtmp[j] + } + x[i] -= sum + if nonUnit { + x[i] /= ap[offset] + } + offset -= n - i + 1 + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + var sum float32 + for _, v := range atmp { + sum += v * x[jx] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= ap[offset] + } + ix -= incX + offset -= n - i + 1 + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset-i : offset] + var sum float32 + for j, v := range atmp { + sum += v * x[j] + } + x[i] -= sum + if nonUnit { + x[i] /= ap[offset] + } + offset += i + 2 + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + atmp := ap[offset-i : offset] + var sum float32 + for _, v := range atmp { + sum += v * x[jx] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= ap[offset] + } + ix += incX + offset += i + 2 + } + return + } + // Cases where ap is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if nonUnit { + x[i] /= ap[offset] + } + xi := x[i] + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + for j, v := range atmp { + xtmp[j] -= v * xi + } + offset += n - i + } + return + } + ix := kx + for i := 0; i < n; i++ { + if nonUnit { + x[ix] /= ap[offset] + } + xix := x[ix] + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + for _, v := range atmp { + x[jx] -= v * xix + jx += incX + } + ix += incX + offset += n - i + } + return + } + if incX == 1 { + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[i] /= ap[offset] + } + xi := x[i] + atmp := ap[offset-i : offset] + for j, v := range atmp { + x[j] -= v * xi + } + offset -= i + 1 + } + return + } + ix := kx + (n-1)*incX + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[ix] /= ap[offset] + } + xix := x[ix] + atmp := ap[offset-i : offset] + jx := kx + for _, v := range atmp { + x[jx] -= v * xix + jx += incX + } + ix -= incX + offset -= i + 1 + } +} + +// Sspmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where A is an n×n symmetric matrix in packed format, x and y are vectors, +// and alpha and beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sspmv(ul blas.Uplo, n int, alpha float32, ap []float32, x []float32, incX int, beta float32, y []float32, incY int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up start points + var kx, ky int + if incX < 0 { + kx = -(n - 1) * incX + } + if incY < 0 { + ky = -(n - 1) * incY + } + + // Form y = beta * y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + f32.ScalUnitary(beta, y[:n]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + f32.ScalInc(beta, y, uintptr(n), uintptr(incY)) + } else { + f32.ScalInc(beta, y, uintptr(n), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + if n == 1 { + y[0] += alpha * ap[0] * x[0] + return + } + var offset int // Offset is the index of (i,i). + if ul == blas.Upper { + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + xv := x[i] * alpha + sum := ap[offset] * x[i] + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + jy := ky + (i+1)*incY + for j, v := range atmp { + sum += v * xtmp[j] + y[jy] += v * xv + jy += incY + } + y[iy] += alpha * sum + iy += incY + offset += n - i + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + xv := x[ix] * alpha + sum := ap[offset] * x[ix] + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + jy := ky + (i+1)*incY + for _, v := range atmp { + sum += v * x[jx] + y[jy] += v * xv + jx += incX + jy += incY + } + y[iy] += alpha * sum + ix += incX + iy += incY + offset += n - i + } + return + } + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + xv := x[i] * alpha + atmp := ap[offset-i : offset] + jy := ky + var sum float32 + for j, v := range atmp { + sum += v * x[j] + y[jy] += v * xv + jy += incY + } + sum += ap[offset] * x[i] + y[iy] += alpha * sum + iy += incY + offset += i + 2 + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + xv := x[ix] * alpha + atmp := ap[offset-i : offset] + jx := kx + jy := ky + var sum float32 + for _, v := range atmp { + sum += v * x[jx] + y[jy] += v * xv + jx += incX + jy += incY + } + + sum += ap[offset] * x[ix] + y[iy] += alpha * sum + ix += incX + iy += incY + offset += i + 2 + } +} + +// Sspr performs the symmetric rank-one operation +// A += alpha * x * x^T +// where A is an n×n symmetric matrix in packed format, x is a vector, and +// alpha is a scalar. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sspr(ul blas.Uplo, n int, alpha float32, x []float32, incX int, ap []float32) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + lenX := n + var kx int + if incX < 0 { + kx = -(lenX - 1) * incX + } + var offset int // Offset is the index of (i,i). + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset:] + xv := alpha * x[i] + xtmp := x[i:n] + for j, v := range xtmp { + atmp[j] += xv * v + } + offset += n - i + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + i*incX + atmp := ap[offset:] + xv := alpha * x[ix] + for j := 0; j < n-i; j++ { + atmp[j] += xv * x[jx] + jx += incX + } + ix += incX + offset += n - i + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset-i:] + xv := alpha * x[i] + xtmp := x[:i+1] + for j, v := range xtmp { + atmp[j] += xv * v + } + offset += i + 2 + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + atmp := ap[offset-i:] + xv := alpha * x[ix] + for j := 0; j <= i; j++ { + atmp[j] += xv * x[jx] + jx += incX + } + ix += incX + offset += i + 2 + } +} + +// Sspr2 performs the symmetric rank-2 update +// A += alpha * x * y^T + alpha * y * x^T +// where A is an n×n symmetric matrix in packed format, x and y are vectors, +// and alpha is a scalar. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sspr2(ul blas.Uplo, n int, alpha float32, x []float32, incX int, y []float32, incY int, ap []float32) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var ky, kx int + if incY < 0 { + ky = -(n - 1) * incY + } + if incX < 0 { + kx = -(n - 1) * incX + } + var offset int // Offset is the index of (i,i). + if ul == blas.Upper { + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset:] + xi := x[i] + yi := y[i] + xtmp := x[i:n] + ytmp := y[i:n] + for j, v := range xtmp { + atmp[j] += alpha * (xi*ytmp[j] + v*yi) + } + offset += n - i + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + i*incX + jy := ky + i*incY + atmp := ap[offset:] + xi := x[ix] + yi := y[iy] + for j := 0; j < n-i; j++ { + atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) + jx += incX + jy += incY + } + ix += incX + iy += incY + offset += n - i + } + return + } + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset-i:] + xi := x[i] + yi := y[i] + xtmp := x[:i+1] + for j, v := range xtmp { + atmp[j] += alpha * (xi*y[j] + v*yi) + } + offset += i + 2 + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + jy := ky + atmp := ap[offset-i:] + for j := 0; j <= i; j++ { + atmp[j] += alpha * (x[ix]*y[jy] + x[jx]*y[iy]) + jx += incX + jy += incY + } + ix += incX + iy += incY + offset += i + 2 + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level2float64.go b/vendor/gonum.org/v1/gonum/blas/gonum/level2float64.go new file mode 100644 index 0000000000..261257888d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level2float64.go @@ -0,0 +1,2264 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f64" +) + +var _ blas.Float64Level2 = Implementation{} + +// Dger performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func (Implementation) Dger(m, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int) { + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (m-1)*incX) || (incX < 0 && len(x) <= (1-m)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(a) < lda*(m-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + f64.Ger(uintptr(m), uintptr(n), + alpha, + x, uintptr(incX), + y, uintptr(incY), + a, uintptr(lda)) +} + +// Dgbmv performs one of the matrix-vector operations +// y = alpha * A * x + beta * y if tA == blas.NoTrans +// y = alpha * A^T * x + beta * y if tA == blas.Trans or blas.ConjTrans +// where A is an m×n band matrix with kL sub-diagonals and kU super-diagonals, +// x and y are vectors, and alpha and beta are scalars. +func (Implementation) Dgbmv(tA blas.Transpose, m, n, kL, kU int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if kL < 0 { + panic(kLLT0) + } + if kU < 0 { + panic(kULT0) + } + if lda < kL+kU+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(min(m, n+kL)-1)+kL+kU+1 { + panic(shortA) + } + lenX := m + lenY := n + if tA == blas.NoTrans { + lenX = n + lenY = m + } + if (incX > 0 && len(x) <= (lenX-1)*incX) || (incX < 0 && len(x) <= (1-lenX)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (lenY-1)*incY) || (incY < 0 && len(y) <= (1-lenY)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + var kx, ky int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if incY < 0 { + ky = -(lenY - 1) * incY + } + + // Form y = beta * y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:lenY] { + y[i] = 0 + } + } else { + f64.ScalUnitary(beta, y[:lenY]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < lenY; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + f64.ScalInc(beta, y, uintptr(lenY), uintptr(incY)) + } else { + f64.ScalInc(beta, y, uintptr(lenY), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + // i and j are indices of the compacted banded matrix. + // off is the offset into the dense matrix (off + j = densej) + nCol := kU + 1 + kL + if tA == blas.NoTrans { + iy := ky + if incX == 1 { + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + xtmp := x[off : off+u-l] + var sum float64 + for j, v := range atmp { + sum += xtmp[j] * v + } + y[iy] += sum * alpha + iy += incY + } + return + } + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + jx := kx + var sum float64 + for _, v := range atmp { + sum += x[off*incX+jx] * v + jx += incX + } + y[iy] += sum * alpha + iy += incY + } + return + } + if incX == 1 { + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + tmp := alpha * x[i] + jy := ky + for _, v := range atmp { + y[jy+off*incY] += tmp * v + jy += incY + } + } + return + } + ix := kx + for i := 0; i < min(m, n+kL); i++ { + l := max(0, kL-i) + u := min(nCol, n+kL-i) + off := max(0, i-kL) + atmp := a[i*lda+l : i*lda+u] + tmp := alpha * x[ix] + jy := ky + for _, v := range atmp { + y[jy+off*incY] += tmp * v + jy += incY + } + ix += incX + } +} + +// Dtrmv performs one of the matrix-vector operations +// x = A * x if tA == blas.NoTrans +// x = A^T * x if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix, and x is a vector. +func (Implementation) Dtrmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float64, lda int, x []float64, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + nonUnit := d != blas.Unit + if n == 1 { + if nonUnit { + x[0] *= a[0] + } + return + } + var kx int + if incX <= 0 { + kx = -(n - 1) * incX + } + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + ilda := i * lda + var tmp float64 + if nonUnit { + tmp = a[ilda+i] * x[i] + } else { + tmp = x[i] + } + x[i] = tmp + f64.DotUnitary(a[ilda+i+1:ilda+n], x[i+1:n]) + } + return + } + ix := kx + for i := 0; i < n; i++ { + ilda := i * lda + var tmp float64 + if nonUnit { + tmp = a[ilda+i] * x[ix] + } else { + tmp = x[ix] + } + x[ix] = tmp + f64.DotInc(x, a[ilda+i+1:ilda+n], uintptr(n-i-1), uintptr(incX), 1, uintptr(ix+incX), 0) + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + ilda := i * lda + var tmp float64 + if nonUnit { + tmp += a[ilda+i] * x[i] + } else { + tmp = x[i] + } + x[i] = tmp + f64.DotUnitary(a[ilda:ilda+i], x[:i]) + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + ilda := i * lda + var tmp float64 + if nonUnit { + tmp = a[ilda+i] * x[ix] + } else { + tmp = x[ix] + } + x[ix] = tmp + f64.DotInc(x, a[ilda:ilda+i], uintptr(i), uintptr(incX), 1, uintptr(kx), 0) + ix -= incX + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + ilda := i * lda + xi := x[i] + f64.AxpyUnitary(xi, a[ilda+i+1:ilda+n], x[i+1:n]) + if nonUnit { + x[i] *= a[ilda+i] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + ilda := i * lda + xi := x[ix] + f64.AxpyInc(xi, a[ilda+i+1:ilda+n], x, uintptr(n-i-1), 1, uintptr(incX), 0, uintptr(kx+(i+1)*incX)) + if nonUnit { + x[ix] *= a[ilda+i] + } + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + ilda := i * lda + xi := x[i] + f64.AxpyUnitary(xi, a[ilda:ilda+i], x[:i]) + if nonUnit { + x[i] *= a[i*lda+i] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + ilda := i * lda + xi := x[ix] + f64.AxpyInc(xi, a[ilda:ilda+i], x, uintptr(i), 1, uintptr(incX), 0, uintptr(kx)) + if nonUnit { + x[ix] *= a[ilda+i] + } + ix += incX + } +} + +// Dtrsv solves one of the systems of equations +// A * x = b if tA == blas.NoTrans +// A^T * x = b if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix, and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func (Implementation) Dtrsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float64, lda int, x []float64, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + if n == 1 { + if d == blas.NonUnit { + x[0] /= a[0] + } + return + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + nonUnit := d == blas.NonUnit + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + var sum float64 + atmp := a[i*lda+i+1 : i*lda+n] + for j, v := range atmp { + jv := i + j + 1 + sum += x[jv] * v + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda+i] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + var sum float64 + jx := ix + incX + atmp := a[i*lda+i+1 : i*lda+n] + for _, v := range atmp { + sum += x[jx] * v + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda+i] + } + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + var sum float64 + atmp := a[i*lda : i*lda+i] + for j, v := range atmp { + sum += x[j] * v + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda+i] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + var sum float64 + atmp := a[i*lda : i*lda+i] + for _, v := range atmp { + sum += x[jx] * v + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda+i] + } + ix += incX + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if nonUnit { + x[i] /= a[i*lda+i] + } + xi := x[i] + atmp := a[i*lda+i+1 : i*lda+n] + for j, v := range atmp { + jv := j + i + 1 + x[jv] -= v * xi + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + if nonUnit { + x[ix] /= a[i*lda+i] + } + xi := x[ix] + jx := kx + (i+1)*incX + atmp := a[i*lda+i+1 : i*lda+n] + for _, v := range atmp { + x[jx] -= v * xi + jx += incX + } + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[i] /= a[i*lda+i] + } + xi := x[i] + atmp := a[i*lda : i*lda+i] + for j, v := range atmp { + x[j] -= v * xi + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[ix] /= a[i*lda+i] + } + xi := x[ix] + jx := kx + atmp := a[i*lda : i*lda+i] + for _, v := range atmp { + x[jx] -= v * xi + jx += incX + } + ix -= incX + } +} + +// Dsymv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where A is an n×n symmetric matrix, x and y are vectors, and alpha and +// beta are scalars. +func (Implementation) Dsymv(ul blas.Uplo, n int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+n { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up start points + var kx, ky int + if incX < 0 { + kx = -(n - 1) * incX + } + if incY < 0 { + ky = -(n - 1) * incY + } + + // Form y = beta * y + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + f64.ScalUnitary(beta, y[:n]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + f64.ScalInc(beta, y, uintptr(n), uintptr(incY)) + } else { + f64.ScalInc(beta, y, uintptr(n), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + if n == 1 { + y[0] += alpha * a[0] * x[0] + return + } + + if ul == blas.Upper { + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + xv := x[i] * alpha + sum := x[i] * a[i*lda+i] + jy := ky + (i+1)*incY + atmp := a[i*lda+i+1 : i*lda+n] + for j, v := range atmp { + jp := j + i + 1 + sum += x[jp] * v + y[jy] += xv * v + jy += incY + } + y[iy] += alpha * sum + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + xv := x[ix] * alpha + sum := x[ix] * a[i*lda+i] + jx := kx + (i+1)*incX + jy := ky + (i+1)*incY + atmp := a[i*lda+i+1 : i*lda+n] + for _, v := range atmp { + sum += x[jx] * v + y[jy] += xv * v + jx += incX + jy += incY + } + y[iy] += alpha * sum + ix += incX + iy += incY + } + return + } + // Cases where a is lower triangular. + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + jy := ky + xv := alpha * x[i] + atmp := a[i*lda : i*lda+i] + var sum float64 + for j, v := range atmp { + sum += x[j] * v + y[jy] += xv * v + jy += incY + } + sum += x[i] * a[i*lda+i] + sum *= alpha + y[iy] += sum + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + jy := ky + xv := alpha * x[ix] + atmp := a[i*lda : i*lda+i] + var sum float64 + for _, v := range atmp { + sum += x[jx] * v + y[jy] += xv * v + jx += incX + jy += incY + } + sum += x[ix] * a[i*lda+i] + sum *= alpha + y[iy] += sum + ix += incX + iy += incY + } +} + +// Dtbmv performs one of the matrix-vector operations +// x = A * x if tA == blas.NoTrans +// x = A^T * x if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular band matrix with k+1 diagonals, and x is a vector. +func (Implementation) Dtbmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float64, lda int, x []float64, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + + nonunit := d != blas.Unit + + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + u := min(1+k, n-i) + var sum float64 + atmp := a[i*lda:] + xtmp := x[i:] + for j := 1; j < u; j++ { + sum += xtmp[j] * atmp[j] + } + if nonunit { + sum += xtmp[0] * atmp[0] + } else { + sum += xtmp[0] + } + x[i] = sum + } + return + } + ix := kx + for i := 0; i < n; i++ { + u := min(1+k, n-i) + var sum float64 + atmp := a[i*lda:] + jx := incX + for j := 1; j < u; j++ { + sum += x[ix+jx] * atmp[j] + jx += incX + } + if nonunit { + sum += x[ix] * atmp[0] + } else { + sum += x[ix] + } + x[ix] = sum + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + l := max(0, k-i) + atmp := a[i*lda:] + var sum float64 + for j := l; j < k; j++ { + sum += x[i-k+j] * atmp[j] + } + if nonunit { + sum += x[i] * atmp[k] + } else { + sum += x[i] + } + x[i] = sum + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + l := max(0, k-i) + atmp := a[i*lda:] + var sum float64 + jx := l * incX + for j := l; j < k; j++ { + sum += x[ix-k*incX+jx] * atmp[j] + jx += incX + } + if nonunit { + sum += x[ix] * atmp[k] + } else { + sum += x[ix] + } + x[ix] = sum + ix -= incX + } + return + } + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + u := k + 1 + if i < u { + u = i + 1 + } + var sum float64 + for j := 1; j < u; j++ { + sum += x[i-j] * a[(i-j)*lda+j] + } + if nonunit { + sum += x[i] * a[i*lda] + } else { + sum += x[i] + } + x[i] = sum + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + u := k + 1 + if i < u { + u = i + 1 + } + var sum float64 + jx := incX + for j := 1; j < u; j++ { + sum += x[ix-jx] * a[(i-j)*lda+j] + jx += incX + } + if nonunit { + sum += x[ix] * a[i*lda] + } else { + sum += x[ix] + } + x[ix] = sum + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + u := k + if i+k >= n { + u = n - i - 1 + } + var sum float64 + for j := 0; j < u; j++ { + sum += x[i+j+1] * a[(i+j+1)*lda+k-j-1] + } + if nonunit { + sum += x[i] * a[i*lda+k] + } else { + sum += x[i] + } + x[i] = sum + } + return + } + ix := kx + for i := 0; i < n; i++ { + u := k + if i+k >= n { + u = n - i - 1 + } + var ( + sum float64 + jx int + ) + for j := 0; j < u; j++ { + sum += x[ix+jx+incX] * a[(i+j+1)*lda+k-j-1] + jx += incX + } + if nonunit { + sum += x[ix] * a[i*lda+k] + } else { + sum += x[ix] + } + x[ix] = sum + ix += incX + } +} + +// Dtpmv performs one of the matrix-vector operations +// x = A * x if tA == blas.NoTrans +// x = A^T * x if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix in packed format, and x is a vector. +func (Implementation) Dtpmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float64, x []float64, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + + nonUnit := d == blas.NonUnit + var offset int // Offset is the index of (i,i) + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + if nonUnit { + xi *= ap[offset] + } + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + for j, v := range atmp { + xi += v * xtmp[j] + } + x[i] = xi + offset += n - i + } + return + } + ix := kx + for i := 0; i < n; i++ { + xix := x[ix] + if nonUnit { + xix *= ap[offset] + } + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + for _, v := range atmp { + xix += v * x[jx] + jx += incX + } + x[ix] = xix + offset += n - i + ix += incX + } + return + } + if incX == 1 { + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xi := x[i] + if nonUnit { + xi *= ap[offset] + } + atmp := ap[offset-i : offset] + for j, v := range atmp { + xi += v * x[j] + } + x[i] = xi + offset -= i + 1 + } + return + } + ix := kx + (n-1)*incX + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xix := x[ix] + if nonUnit { + xix *= ap[offset] + } + atmp := ap[offset-i : offset] + jx := kx + for _, v := range atmp { + xix += v * x[jx] + jx += incX + } + x[ix] = xix + offset -= i + 1 + ix -= incX + } + return + } + // Cases where ap is transposed. + if ul == blas.Upper { + if incX == 1 { + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xi := x[i] + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + for j, v := range atmp { + xtmp[j] += v * xi + } + if nonUnit { + x[i] *= ap[offset] + } + offset -= n - i + 1 + } + return + } + ix := kx + (n-1)*incX + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + xix := x[ix] + jx := kx + (i+1)*incX + atmp := ap[offset+1 : offset+n-i] + for _, v := range atmp { + x[jx] += v * xix + jx += incX + } + if nonUnit { + x[ix] *= ap[offset] + } + offset -= n - i + 1 + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + xi := x[i] + atmp := ap[offset-i : offset] + for j, v := range atmp { + x[j] += v * xi + } + if nonUnit { + x[i] *= ap[offset] + } + offset += i + 2 + } + return + } + ix := kx + for i := 0; i < n; i++ { + xix := x[ix] + jx := kx + atmp := ap[offset-i : offset] + for _, v := range atmp { + x[jx] += v * xix + jx += incX + } + if nonUnit { + x[ix] *= ap[offset] + } + ix += incX + offset += i + 2 + } +} + +// Dtbsv solves one of the systems of equations +// A * x = b if tA == blas.NoTrans +// A^T * x = b if tA == blas.Trans or tA == blas.ConjTrans +// where A is an n×n triangular band matrix with k+1 diagonals, +// and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func (Implementation) Dtbsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float64, lda int, x []float64, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + nonUnit := d == blas.NonUnit + // Form x = A^-1 x. + // Several cases below use subslices for speed improvement. + // The incX != 1 cases usually do not because incX may be negative. + if tA == blas.NoTrans { + if ul == blas.Upper { + if incX == 1 { + for i := n - 1; i >= 0; i-- { + bands := k + if i+bands >= n { + bands = n - i - 1 + } + atmp := a[i*lda+1:] + xtmp := x[i+1 : i+bands+1] + var sum float64 + for j, v := range xtmp { + sum += v * atmp[j] + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + max := k + 1 + if i+max > n { + max = n - i + } + atmp := a[i*lda:] + var ( + jx int + sum float64 + ) + for j := 1; j < max; j++ { + jx += incX + sum += x[ix+jx] * atmp[j] + } + x[ix] -= sum + if nonUnit { + x[ix] /= atmp[0] + } + ix -= incX + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + atmp := a[i*lda+k-bands:] + xtmp := x[i-bands : i] + var sum float64 + for j, v := range xtmp { + sum += v * atmp[j] + } + x[i] -= sum + if nonUnit { + x[i] /= atmp[bands] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + atmp := a[i*lda+k-bands:] + var ( + sum float64 + jx int + ) + for j := 0; j < bands; j++ { + sum += x[ix-bands*incX+jx] * atmp[j] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= atmp[bands] + } + ix += incX + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + var sum float64 + for j := 0; j < bands; j++ { + sum += x[i-bands+j] * a[(i-bands+j)*lda+bands-j] + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda] + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + bands := k + if i-k < 0 { + bands = i + } + var ( + sum float64 + jx int + ) + for j := 0; j < bands; j++ { + sum += x[ix-bands*incX+jx] * a[(i-bands+j)*lda+bands-j] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda] + } + ix += incX + } + return + } + if incX == 1 { + for i := n - 1; i >= 0; i-- { + bands := k + if i+bands >= n { + bands = n - i - 1 + } + var sum float64 + xtmp := x[i+1 : i+1+bands] + for j, v := range xtmp { + sum += v * a[(i+j+1)*lda+k-j-1] + } + x[i] -= sum + if nonUnit { + x[i] /= a[i*lda+k] + } + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + bands := k + if i+bands >= n { + bands = n - i - 1 + } + var ( + sum float64 + jx int + ) + for j := 0; j < bands; j++ { + sum += x[ix+jx+incX] * a[(i+j+1)*lda+k-j-1] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= a[i*lda+k] + } + ix -= incX + } +} + +// Dsbmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where A is an n×n symmetric band matrix with k super-diagonals, x and y are +// vectors, and alpha and beta are scalars. +func (Implementation) Dsbmv(ul blas.Uplo, n, k int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + if lda < k+1 { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(n-1)+k+1 { + panic(shortA) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up indexes + lenX := n + lenY := n + var kx, ky int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if incY < 0 { + ky = -(lenY - 1) * incY + } + + // Form y = beta * y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + f64.ScalUnitary(beta, y[:n]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + f64.ScalInc(beta, y, uintptr(n), uintptr(incY)) + } else { + f64.ScalInc(beta, y, uintptr(n), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + if ul == blas.Upper { + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + atmp := a[i*lda:] + tmp := alpha * x[i] + sum := tmp * atmp[0] + u := min(k, n-i-1) + jy := incY + for j := 1; j <= u; j++ { + v := atmp[j] + sum += alpha * x[i+j] * v + y[iy+jy] += tmp * v + jy += incY + } + y[iy] += sum + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + atmp := a[i*lda:] + tmp := alpha * x[ix] + sum := tmp * atmp[0] + u := min(k, n-i-1) + jx := incX + jy := incY + for j := 1; j <= u; j++ { + v := atmp[j] + sum += alpha * x[ix+jx] * v + y[iy+jy] += tmp * v + jx += incX + jy += incY + } + y[iy] += sum + ix += incX + iy += incY + } + return + } + + // Casses where a has bands below the diagonal. + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + l := max(0, k-i) + tmp := alpha * x[i] + jy := l * incY + atmp := a[i*lda:] + for j := l; j < k; j++ { + v := atmp[j] + y[iy] += alpha * v * x[i-k+j] + y[iy-k*incY+jy] += tmp * v + jy += incY + } + y[iy] += tmp * atmp[k] + iy += incY + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + l := max(0, k-i) + tmp := alpha * x[ix] + jx := l * incX + jy := l * incY + atmp := a[i*lda:] + for j := l; j < k; j++ { + v := atmp[j] + y[iy] += alpha * v * x[ix-k*incX+jx] + y[iy-k*incY+jy] += tmp * v + jx += incX + jy += incY + } + y[iy] += tmp * atmp[k] + ix += incX + iy += incY + } +} + +// Dsyr performs the symmetric rank-one update +// A += alpha * x * x^T +// where A is an n×n symmetric matrix, and x is a vector. +func (Implementation) Dsyr(ul blas.Uplo, n int, alpha float64, x []float64, incX int, a []float64, lda int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if len(a) < lda*(n-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + lenX := n + var kx int + if incX < 0 { + kx = -(lenX - 1) * incX + } + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + tmp := x[i] * alpha + if tmp != 0 { + atmp := a[i*lda+i : i*lda+n] + xtmp := x[i:n] + for j, v := range xtmp { + atmp[j] += v * tmp + } + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + tmp := x[ix] * alpha + if tmp != 0 { + jx := ix + atmp := a[i*lda:] + for j := i; j < n; j++ { + atmp[j] += x[jx] * tmp + jx += incX + } + } + ix += incX + } + return + } + // Cases where a is lower triangular. + if incX == 1 { + for i := 0; i < n; i++ { + tmp := x[i] * alpha + if tmp != 0 { + atmp := a[i*lda:] + xtmp := x[:i+1] + for j, v := range xtmp { + atmp[j] += tmp * v + } + } + } + return + } + ix := kx + for i := 0; i < n; i++ { + tmp := x[ix] * alpha + if tmp != 0 { + atmp := a[i*lda:] + jx := kx + for j := 0; j < i+1; j++ { + atmp[j] += tmp * x[jx] + jx += incX + } + } + ix += incX + } +} + +// Dsyr2 performs the symmetric rank-two update +// A += alpha * x * y^T + alpha * y * x^T +// where A is an n×n symmetric matrix, x and y are vectors, and alpha is a scalar. +func (Implementation) Dsyr2(ul blas.Uplo, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if lda < max(1, n) { + panic(badLdA) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(a) < lda*(n-1)+n { + panic(shortA) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var ky, kx int + if incY < 0 { + ky = -(n - 1) * incY + } + if incX < 0 { + kx = -(n - 1) * incX + } + if ul == blas.Upper { + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + xi := x[i] + yi := y[i] + atmp := a[i*lda:] + for j := i; j < n; j++ { + atmp[j] += alpha * (xi*y[j] + x[j]*yi) + } + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + i*incX + jy := ky + i*incY + xi := x[ix] + yi := y[iy] + atmp := a[i*lda:] + for j := i; j < n; j++ { + atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) + jx += incX + jy += incY + } + ix += incX + iy += incY + } + return + } + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + xi := x[i] + yi := y[i] + atmp := a[i*lda:] + for j := 0; j <= i; j++ { + atmp[j] += alpha * (xi*y[j] + x[j]*yi) + } + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + jy := ky + xi := x[ix] + yi := y[iy] + atmp := a[i*lda:] + for j := 0; j <= i; j++ { + atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) + jx += incX + jy += incY + } + ix += incX + iy += incY + } +} + +// Dtpsv solves one of the systems of equations +// A * x = b if tA == blas.NoTrans +// A^T * x = b if tA == blas.Trans or blas.ConjTrans +// where A is an n×n triangular matrix in packed format, and x and b are vectors. +// +// At entry to the function, x contains the values of b, and the result is +// stored in-place into x. +// +// No test for singularity or near-singularity is included in this +// routine. Such tests must be performed before calling this routine. +func (Implementation) Dtpsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float64, x []float64, incX int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + + var kx int + if incX < 0 { + kx = -(n - 1) * incX + } + + nonUnit := d == blas.NonUnit + var offset int // Offset is the index of (i,i) + if tA == blas.NoTrans { + if ul == blas.Upper { + offset = n*(n+1)/2 - 1 + if incX == 1 { + for i := n - 1; i >= 0; i-- { + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + var sum float64 + for j, v := range atmp { + sum += v * xtmp[j] + } + x[i] -= sum + if nonUnit { + x[i] /= ap[offset] + } + offset -= n - i + 1 + } + return + } + ix := kx + (n-1)*incX + for i := n - 1; i >= 0; i-- { + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + var sum float64 + for _, v := range atmp { + sum += v * x[jx] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= ap[offset] + } + ix -= incX + offset -= n - i + 1 + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset-i : offset] + var sum float64 + for j, v := range atmp { + sum += v * x[j] + } + x[i] -= sum + if nonUnit { + x[i] /= ap[offset] + } + offset += i + 2 + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + atmp := ap[offset-i : offset] + var sum float64 + for _, v := range atmp { + sum += v * x[jx] + jx += incX + } + x[ix] -= sum + if nonUnit { + x[ix] /= ap[offset] + } + ix += incX + offset += i + 2 + } + return + } + // Cases where ap is transposed. + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + if nonUnit { + x[i] /= ap[offset] + } + xi := x[i] + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + for j, v := range atmp { + xtmp[j] -= v * xi + } + offset += n - i + } + return + } + ix := kx + for i := 0; i < n; i++ { + if nonUnit { + x[ix] /= ap[offset] + } + xix := x[ix] + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + for _, v := range atmp { + x[jx] -= v * xix + jx += incX + } + ix += incX + offset += n - i + } + return + } + if incX == 1 { + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[i] /= ap[offset] + } + xi := x[i] + atmp := ap[offset-i : offset] + for j, v := range atmp { + x[j] -= v * xi + } + offset -= i + 1 + } + return + } + ix := kx + (n-1)*incX + offset = n*(n+1)/2 - 1 + for i := n - 1; i >= 0; i-- { + if nonUnit { + x[ix] /= ap[offset] + } + xix := x[ix] + atmp := ap[offset-i : offset] + jx := kx + for _, v := range atmp { + x[jx] -= v * xix + jx += incX + } + ix -= incX + offset -= i + 1 + } +} + +// Dspmv performs the matrix-vector operation +// y = alpha * A * x + beta * y +// where A is an n×n symmetric matrix in packed format, x and y are vectors, +// and alpha and beta are scalars. +func (Implementation) Dspmv(ul blas.Uplo, n int, alpha float64, ap []float64, x []float64, incX int, beta float64, y []float64, incY int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + // Set up start points + var kx, ky int + if incX < 0 { + kx = -(n - 1) * incX + } + if incY < 0 { + ky = -(n - 1) * incY + } + + // Form y = beta * y. + if beta != 1 { + if incY == 1 { + if beta == 0 { + for i := range y[:n] { + y[i] = 0 + } + } else { + f64.ScalUnitary(beta, y[:n]) + } + } else { + iy := ky + if beta == 0 { + for i := 0; i < n; i++ { + y[iy] = 0 + iy += incY + } + } else { + if incY > 0 { + f64.ScalInc(beta, y, uintptr(n), uintptr(incY)) + } else { + f64.ScalInc(beta, y, uintptr(n), uintptr(-incY)) + } + } + } + } + + if alpha == 0 { + return + } + + if n == 1 { + y[0] += alpha * ap[0] * x[0] + return + } + var offset int // Offset is the index of (i,i). + if ul == blas.Upper { + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + xv := x[i] * alpha + sum := ap[offset] * x[i] + atmp := ap[offset+1 : offset+n-i] + xtmp := x[i+1:] + jy := ky + (i+1)*incY + for j, v := range atmp { + sum += v * xtmp[j] + y[jy] += v * xv + jy += incY + } + y[iy] += alpha * sum + iy += incY + offset += n - i + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + xv := x[ix] * alpha + sum := ap[offset] * x[ix] + atmp := ap[offset+1 : offset+n-i] + jx := kx + (i+1)*incX + jy := ky + (i+1)*incY + for _, v := range atmp { + sum += v * x[jx] + y[jy] += v * xv + jx += incX + jy += incY + } + y[iy] += alpha * sum + ix += incX + iy += incY + offset += n - i + } + return + } + if incX == 1 { + iy := ky + for i := 0; i < n; i++ { + xv := x[i] * alpha + atmp := ap[offset-i : offset] + jy := ky + var sum float64 + for j, v := range atmp { + sum += v * x[j] + y[jy] += v * xv + jy += incY + } + sum += ap[offset] * x[i] + y[iy] += alpha * sum + iy += incY + offset += i + 2 + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + xv := x[ix] * alpha + atmp := ap[offset-i : offset] + jx := kx + jy := ky + var sum float64 + for _, v := range atmp { + sum += v * x[jx] + y[jy] += v * xv + jx += incX + jy += incY + } + + sum += ap[offset] * x[ix] + y[iy] += alpha * sum + ix += incX + iy += incY + offset += i + 2 + } +} + +// Dspr performs the symmetric rank-one operation +// A += alpha * x * x^T +// where A is an n×n symmetric matrix in packed format, x is a vector, and +// alpha is a scalar. +func (Implementation) Dspr(ul blas.Uplo, n int, alpha float64, x []float64, incX int, ap []float64) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + lenX := n + var kx int + if incX < 0 { + kx = -(lenX - 1) * incX + } + var offset int // Offset is the index of (i,i). + if ul == blas.Upper { + if incX == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset:] + xv := alpha * x[i] + xtmp := x[i:n] + for j, v := range xtmp { + atmp[j] += xv * v + } + offset += n - i + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + i*incX + atmp := ap[offset:] + xv := alpha * x[ix] + for j := 0; j < n-i; j++ { + atmp[j] += xv * x[jx] + jx += incX + } + ix += incX + offset += n - i + } + return + } + if incX == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset-i:] + xv := alpha * x[i] + xtmp := x[:i+1] + for j, v := range xtmp { + atmp[j] += xv * v + } + offset += i + 2 + } + return + } + ix := kx + for i := 0; i < n; i++ { + jx := kx + atmp := ap[offset-i:] + xv := alpha * x[ix] + for j := 0; j <= i; j++ { + atmp[j] += xv * x[jx] + jx += incX + } + ix += incX + offset += i + 2 + } +} + +// Dspr2 performs the symmetric rank-2 update +// A += alpha * x * y^T + alpha * y * x^T +// where A is an n×n symmetric matrix in packed format, x and y are vectors, +// and alpha is a scalar. +func (Implementation) Dspr2(ul blas.Uplo, n int, alpha float64, x []float64, incX int, y []float64, incY int, ap []float64) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if n < 0 { + panic(nLT0) + } + if incX == 0 { + panic(zeroIncX) + } + if incY == 0 { + panic(zeroIncY) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if (incX > 0 && len(x) <= (n-1)*incX) || (incX < 0 && len(x) <= (1-n)*incX) { + panic(shortX) + } + if (incY > 0 && len(y) <= (n-1)*incY) || (incY < 0 && len(y) <= (1-n)*incY) { + panic(shortY) + } + if len(ap) < n*(n+1)/2 { + panic(shortAP) + } + + // Quick return if possible. + if alpha == 0 { + return + } + + var ky, kx int + if incY < 0 { + ky = -(n - 1) * incY + } + if incX < 0 { + kx = -(n - 1) * incX + } + var offset int // Offset is the index of (i,i). + if ul == blas.Upper { + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset:] + xi := x[i] + yi := y[i] + xtmp := x[i:n] + ytmp := y[i:n] + for j, v := range xtmp { + atmp[j] += alpha * (xi*ytmp[j] + v*yi) + } + offset += n - i + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + i*incX + jy := ky + i*incY + atmp := ap[offset:] + xi := x[ix] + yi := y[iy] + for j := 0; j < n-i; j++ { + atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) + jx += incX + jy += incY + } + ix += incX + iy += incY + offset += n - i + } + return + } + if incX == 1 && incY == 1 { + for i := 0; i < n; i++ { + atmp := ap[offset-i:] + xi := x[i] + yi := y[i] + xtmp := x[:i+1] + for j, v := range xtmp { + atmp[j] += alpha * (xi*y[j] + v*yi) + } + offset += i + 2 + } + return + } + ix := kx + iy := ky + for i := 0; i < n; i++ { + jx := kx + jy := ky + atmp := ap[offset-i:] + for j := 0; j <= i; j++ { + atmp[j] += alpha * (x[ix]*y[jy] + x[jx]*y[iy]) + jx += incX + jy += incY + } + ix += incX + iy += incY + offset += i + 2 + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx128.go b/vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx128.go new file mode 100644 index 0000000000..e4a2bb5e9b --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx128.go @@ -0,0 +1,1715 @@ +// Copyright ©2019 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math/cmplx" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/c128" +) + +var _ blas.Complex128Level3 = Implementation{} + +// Zgemm performs one of the matrix-matrix operations +// C = alpha * op(A) * op(B) + beta * C +// where op(X) is one of +// op(X) = X or op(X) = X^T or op(X) = X^H, +// alpha and beta are scalars, and A, B and C are matrices, with op(A) an m×k matrix, +// op(B) a k×n matrix and C an m×n matrix. +func (Implementation) Zgemm(tA, tB blas.Transpose, m, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) { + switch tA { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch tB { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + } + rowA, colA := m, k + if tA != blas.NoTrans { + rowA, colA = k, m + } + if lda < max(1, colA) { + panic(badLdA) + } + rowB, colB := k, n + if tB != blas.NoTrans { + rowB, colB = n, k + } + if ldb < max(1, colB) { + panic(badLdB) + } + if ldc < max(1, n) { + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (rowA-1)*lda+colA { + panic(shortA) + } + if len(b) < (rowB-1)*ldb+colB { + panic(shortB) + } + if len(c) < (m-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + if alpha == 0 { + if beta == 0 { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + c[i*ldc+j] = 0 + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + c[i*ldc+j] *= beta + } + } + } + return + } + + switch tA { + case blas.NoTrans: + switch tB { + case blas.NoTrans: + // Form C = alpha * A * B + beta * C. + for i := 0; i < m; i++ { + switch { + case beta == 0: + for j := 0; j < n; j++ { + c[i*ldc+j] = 0 + } + case beta != 1: + for j := 0; j < n; j++ { + c[i*ldc+j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[i*lda+l] + for j := 0; j < n; j++ { + c[i*ldc+j] += tmp * b[l*ldb+j] + } + } + } + case blas.Trans: + // Form C = alpha * A * B^T + beta * C. + for i := 0; i < m; i++ { + switch { + case beta == 0: + for j := 0; j < n; j++ { + c[i*ldc+j] = 0 + } + case beta != 1: + for j := 0; j < n; j++ { + c[i*ldc+j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[i*lda+l] + for j := 0; j < n; j++ { + c[i*ldc+j] += tmp * b[j*ldb+l] + } + } + } + case blas.ConjTrans: + // Form C = alpha * A * B^H + beta * C. + for i := 0; i < m; i++ { + switch { + case beta == 0: + for j := 0; j < n; j++ { + c[i*ldc+j] = 0 + } + case beta != 1: + for j := 0; j < n; j++ { + c[i*ldc+j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[i*lda+l] + for j := 0; j < n; j++ { + c[i*ldc+j] += tmp * cmplx.Conj(b[j*ldb+l]) + } + } + } + } + case blas.Trans: + switch tB { + case blas.NoTrans: + // Form C = alpha * A^T * B + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex128 + for l := 0; l < k; l++ { + tmp += a[l*lda+i] * b[l*ldb+j] + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + case blas.Trans: + // Form C = alpha * A^T * B^T + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex128 + for l := 0; l < k; l++ { + tmp += a[l*lda+i] * b[j*ldb+l] + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + case blas.ConjTrans: + // Form C = alpha * A^T * B^H + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex128 + for l := 0; l < k; l++ { + tmp += a[l*lda+i] * cmplx.Conj(b[j*ldb+l]) + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + } + case blas.ConjTrans: + switch tB { + case blas.NoTrans: + // Form C = alpha * A^H * B + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex128 + for l := 0; l < k; l++ { + tmp += cmplx.Conj(a[l*lda+i]) * b[l*ldb+j] + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + case blas.Trans: + // Form C = alpha * A^H * B^T + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex128 + for l := 0; l < k; l++ { + tmp += cmplx.Conj(a[l*lda+i]) * b[j*ldb+l] + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + case blas.ConjTrans: + // Form C = alpha * A^H * B^H + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex128 + for l := 0; l < k; l++ { + tmp += cmplx.Conj(a[l*lda+i]) * cmplx.Conj(b[j*ldb+l]) + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + } + } +} + +// Zhemm performs one of the matrix-matrix operations +// C = alpha*A*B + beta*C if side == blas.Left +// C = alpha*B*A + beta*C if side == blas.Right +// where alpha and beta are scalars, A is an m×m or n×n hermitian matrix and B +// and C are m×n matrices. The imaginary parts of the diagonal elements of A are +// assumed to be zero. +func (Implementation) Zhemm(side blas.Side, uplo blas.Uplo, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) { + na := m + if side == blas.Right { + na = n + } + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, na): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(na-1)+na { + panic(shortA) + } + if len(b) < ldb*(m-1)+n { + panic(shortB) + } + if len(c) < ldc*(m-1)+n { + panic(shortC) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + if alpha == 0 { + if beta == 0 { + for i := 0; i < m; i++ { + ci := c[i*ldc : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < m; i++ { + ci := c[i*ldc : i*ldc+n] + c128.ScalUnitary(beta, ci) + } + } + return + } + + if side == blas.Left { + // Form C = alpha*A*B + beta*C. + for i := 0; i < m; i++ { + atmp := alpha * complex(real(a[i*lda+i]), 0) + bi := b[i*ldb : i*ldb+n] + ci := c[i*ldc : i*ldc+n] + if beta == 0 { + for j, bij := range bi { + ci[j] = atmp * bij + } + } else { + for j, bij := range bi { + ci[j] = atmp*bij + beta*ci[j] + } + } + if uplo == blas.Upper { + for k := 0; k < i; k++ { + atmp = alpha * cmplx.Conj(a[k*lda+i]) + c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + for k := i + 1; k < m; k++ { + atmp = alpha * a[i*lda+k] + c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + } else { + for k := 0; k < i; k++ { + atmp = alpha * a[i*lda+k] + c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + for k := i + 1; k < m; k++ { + atmp = alpha * cmplx.Conj(a[k*lda+i]) + c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + } + } + } else { + // Form C = alpha*B*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := n - 1; j >= 0; j-- { + abij := alpha * b[i*ldb+j] + aj := a[j*lda+j+1 : j*lda+n] + bi := b[i*ldb+j+1 : i*ldb+n] + ci := c[i*ldc+j+1 : i*ldc+n] + var tmp complex128 + for k, ajk := range aj { + ci[k] += abij * ajk + tmp += bi[k] * cmplx.Conj(ajk) + } + ajj := complex(real(a[j*lda+j]), 0) + if beta == 0 { + c[i*ldc+j] = abij*ajj + alpha*tmp + } else { + c[i*ldc+j] = abij*ajj + alpha*tmp + beta*c[i*ldc+j] + } + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + abij := alpha * b[i*ldb+j] + aj := a[j*lda : j*lda+j] + bi := b[i*ldb : i*ldb+j] + ci := c[i*ldc : i*ldc+j] + var tmp complex128 + for k, ajk := range aj { + ci[k] += abij * ajk + tmp += bi[k] * cmplx.Conj(ajk) + } + ajj := complex(real(a[j*lda+j]), 0) + if beta == 0 { + c[i*ldc+j] = abij*ajj + alpha*tmp + } else { + c[i*ldc+j] = abij*ajj + alpha*tmp + beta*c[i*ldc+j] + } + } + } + } + } +} + +// Zherk performs one of the hermitian rank-k operations +// C = alpha*A*A^H + beta*C if trans == blas.NoTrans +// C = alpha*A^H*A + beta*C if trans == blas.ConjTrans +// where alpha and beta are real scalars, C is an n×n hermitian matrix and A is +// an n×k matrix in the first case and a k×n matrix in the second case. +// +// The imaginary parts of the diagonal elements of C are assumed to be zero, and +// on return they will be set to zero. +func (Implementation) Zherk(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha float64, a []complex128, lda int, beta float64, c []complex128, ldc int) { + var rowA, colA int + switch trans { + default: + panic(badTranspose) + case blas.NoTrans: + rowA, colA = n, k + case blas.ConjTrans: + rowA, colA = k, n + } + switch { + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case lda < max(1, colA): + panic(badLdA) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (rowA-1)*lda+colA { + panic(shortA) + } + if len(c) < (n-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + if alpha == 0 { + if uplo == blas.Upper { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + ci[0] = complex(beta*real(ci[0]), 0) + if i != n-1 { + c128.DscalUnitary(beta, ci[1:]) + } + } + } + } else { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + if i != 0 { + c128.DscalUnitary(beta, ci[:i]) + } + ci[i] = complex(beta*real(ci[i]), 0) + } + } + } + return + } + + calpha := complex(alpha, 0) + if trans == blas.NoTrans { + // Form C = alpha*A*A^H + beta*C. + cbeta := complex(beta, 0) + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + ai := a[i*lda : i*lda+k] + switch { + case beta == 0: + // Handle the i-th diagonal element of C. + ci[0] = complex(alpha*real(c128.DotcUnitary(ai, ai)), 0) + // Handle the remaining elements on the i-th row of C. + for jc := range ci[1:] { + j := i + 1 + jc + ci[jc+1] = calpha * c128.DotcUnitary(a[j*lda:j*lda+k], ai) + } + case beta != 1: + cii := calpha*c128.DotcUnitary(ai, ai) + cbeta*ci[0] + ci[0] = complex(real(cii), 0) + for jc, cij := range ci[1:] { + j := i + 1 + jc + ci[jc+1] = calpha*c128.DotcUnitary(a[j*lda:j*lda+k], ai) + cbeta*cij + } + default: + cii := calpha*c128.DotcUnitary(ai, ai) + ci[0] + ci[0] = complex(real(cii), 0) + for jc, cij := range ci[1:] { + j := i + 1 + jc + ci[jc+1] = calpha*c128.DotcUnitary(a[j*lda:j*lda+k], ai) + cij + } + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + ai := a[i*lda : i*lda+k] + switch { + case beta == 0: + // Handle the first i-1 elements on the i-th row of C. + for j := range ci[:i] { + ci[j] = calpha * c128.DotcUnitary(a[j*lda:j*lda+k], ai) + } + // Handle the i-th diagonal element of C. + ci[i] = complex(alpha*real(c128.DotcUnitary(ai, ai)), 0) + case beta != 1: + for j, cij := range ci[:i] { + ci[j] = calpha*c128.DotcUnitary(a[j*lda:j*lda+k], ai) + cbeta*cij + } + cii := calpha*c128.DotcUnitary(ai, ai) + cbeta*ci[i] + ci[i] = complex(real(cii), 0) + default: + for j, cij := range ci[:i] { + ci[j] = calpha*c128.DotcUnitary(a[j*lda:j*lda+k], ai) + cij + } + cii := calpha*c128.DotcUnitary(ai, ai) + ci[i] + ci[i] = complex(real(cii), 0) + } + } + } + } else { + // Form C = alpha*A^H*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + switch { + case beta == 0: + for jc := range ci { + ci[jc] = 0 + } + case beta != 1: + c128.DscalUnitary(beta, ci) + ci[0] = complex(real(ci[0]), 0) + default: + ci[0] = complex(real(ci[0]), 0) + } + for j := 0; j < k; j++ { + aji := cmplx.Conj(a[j*lda+i]) + if aji != 0 { + c128.AxpyUnitary(calpha*aji, a[j*lda+i:j*lda+n], ci) + } + } + c[i*ldc+i] = complex(real(c[i*ldc+i]), 0) + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + switch { + case beta == 0: + for j := range ci { + ci[j] = 0 + } + case beta != 1: + c128.DscalUnitary(beta, ci) + ci[i] = complex(real(ci[i]), 0) + default: + ci[i] = complex(real(ci[i]), 0) + } + for j := 0; j < k; j++ { + aji := cmplx.Conj(a[j*lda+i]) + if aji != 0 { + c128.AxpyUnitary(calpha*aji, a[j*lda:j*lda+i+1], ci) + } + } + c[i*ldc+i] = complex(real(c[i*ldc+i]), 0) + } + } + } +} + +// Zher2k performs one of the hermitian rank-2k operations +// C = alpha*A*B^H + conj(alpha)*B*A^H + beta*C if trans == blas.NoTrans +// C = alpha*A^H*B + conj(alpha)*B^H*A + beta*C if trans == blas.ConjTrans +// where alpha and beta are scalars with beta real, C is an n×n hermitian matrix +// and A and B are n×k matrices in the first case and k×n matrices in the second case. +// +// The imaginary parts of the diagonal elements of C are assumed to be zero, and +// on return they will be set to zero. +func (Implementation) Zher2k(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta float64, c []complex128, ldc int) { + var row, col int + switch trans { + default: + panic(badTranspose) + case blas.NoTrans: + row, col = n, k + case blas.ConjTrans: + row, col = k, n + } + switch { + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case lda < max(1, col): + panic(badLdA) + case ldb < max(1, col): + panic(badLdB) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (row-1)*lda+col { + panic(shortA) + } + if len(b) < (row-1)*ldb+col { + panic(shortB) + } + if len(c) < (n-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + if alpha == 0 { + if uplo == blas.Upper { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + ci[0] = complex(beta*real(ci[0]), 0) + if i != n-1 { + c128.DscalUnitary(beta, ci[1:]) + } + } + } + } else { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + if i != 0 { + c128.DscalUnitary(beta, ci[:i]) + } + ci[i] = complex(beta*real(ci[i]), 0) + } + } + } + return + } + + conjalpha := cmplx.Conj(alpha) + cbeta := complex(beta, 0) + if trans == blas.NoTrans { + // Form C = alpha*A*B^H + conj(alpha)*B*A^H + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i+1 : i*ldc+n] + ai := a[i*lda : i*lda+k] + bi := b[i*ldb : i*ldb+k] + if beta == 0 { + cii := alpha*c128.DotcUnitary(bi, ai) + conjalpha*c128.DotcUnitary(ai, bi) + c[i*ldc+i] = complex(real(cii), 0) + for jc := range ci { + j := i + 1 + jc + ci[jc] = alpha*c128.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c128.DotcUnitary(a[j*lda:j*lda+k], bi) + } + } else { + cii := alpha*c128.DotcUnitary(bi, ai) + conjalpha*c128.DotcUnitary(ai, bi) + cbeta*c[i*ldc+i] + c[i*ldc+i] = complex(real(cii), 0) + for jc, cij := range ci { + j := i + 1 + jc + ci[jc] = alpha*c128.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c128.DotcUnitary(a[j*lda:j*lda+k], bi) + cbeta*cij + } + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i] + ai := a[i*lda : i*lda+k] + bi := b[i*ldb : i*ldb+k] + if beta == 0 { + for j := range ci { + ci[j] = alpha*c128.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c128.DotcUnitary(a[j*lda:j*lda+k], bi) + } + cii := alpha*c128.DotcUnitary(bi, ai) + conjalpha*c128.DotcUnitary(ai, bi) + c[i*ldc+i] = complex(real(cii), 0) + } else { + for j, cij := range ci { + ci[j] = alpha*c128.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c128.DotcUnitary(a[j*lda:j*lda+k], bi) + cbeta*cij + } + cii := alpha*c128.DotcUnitary(bi, ai) + conjalpha*c128.DotcUnitary(ai, bi) + cbeta*c[i*ldc+i] + c[i*ldc+i] = complex(real(cii), 0) + } + } + } + } else { + // Form C = alpha*A^H*B + conj(alpha)*B^H*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + switch { + case beta == 0: + for jc := range ci { + ci[jc] = 0 + } + case beta != 1: + c128.DscalUnitary(beta, ci) + ci[0] = complex(real(ci[0]), 0) + default: + ci[0] = complex(real(ci[0]), 0) + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + bji := b[j*ldb+i] + if aji != 0 { + c128.AxpyUnitary(alpha*cmplx.Conj(aji), b[j*ldb+i:j*ldb+n], ci) + } + if bji != 0 { + c128.AxpyUnitary(conjalpha*cmplx.Conj(bji), a[j*lda+i:j*lda+n], ci) + } + } + ci[0] = complex(real(ci[0]), 0) + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + switch { + case beta == 0: + for j := range ci { + ci[j] = 0 + } + case beta != 1: + c128.DscalUnitary(beta, ci) + ci[i] = complex(real(ci[i]), 0) + default: + ci[i] = complex(real(ci[i]), 0) + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + bji := b[j*ldb+i] + if aji != 0 { + c128.AxpyUnitary(alpha*cmplx.Conj(aji), b[j*ldb:j*ldb+i+1], ci) + } + if bji != 0 { + c128.AxpyUnitary(conjalpha*cmplx.Conj(bji), a[j*lda:j*lda+i+1], ci) + } + } + ci[i] = complex(real(ci[i]), 0) + } + } + } +} + +// Zsymm performs one of the matrix-matrix operations +// C = alpha*A*B + beta*C if side == blas.Left +// C = alpha*B*A + beta*C if side == blas.Right +// where alpha and beta are scalars, A is an m×m or n×n symmetric matrix and B +// and C are m×n matrices. +func (Implementation) Zsymm(side blas.Side, uplo blas.Uplo, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) { + na := m + if side == blas.Right { + na = n + } + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, na): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(na-1)+na { + panic(shortA) + } + if len(b) < ldb*(m-1)+n { + panic(shortB) + } + if len(c) < ldc*(m-1)+n { + panic(shortC) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + if alpha == 0 { + if beta == 0 { + for i := 0; i < m; i++ { + ci := c[i*ldc : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < m; i++ { + ci := c[i*ldc : i*ldc+n] + c128.ScalUnitary(beta, ci) + } + } + return + } + + if side == blas.Left { + // Form C = alpha*A*B + beta*C. + for i := 0; i < m; i++ { + atmp := alpha * a[i*lda+i] + bi := b[i*ldb : i*ldb+n] + ci := c[i*ldc : i*ldc+n] + if beta == 0 { + for j, bij := range bi { + ci[j] = atmp * bij + } + } else { + for j, bij := range bi { + ci[j] = atmp*bij + beta*ci[j] + } + } + if uplo == blas.Upper { + for k := 0; k < i; k++ { + atmp = alpha * a[k*lda+i] + c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + for k := i + 1; k < m; k++ { + atmp = alpha * a[i*lda+k] + c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + } else { + for k := 0; k < i; k++ { + atmp = alpha * a[i*lda+k] + c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + for k := i + 1; k < m; k++ { + atmp = alpha * a[k*lda+i] + c128.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + } + } + } else { + // Form C = alpha*B*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := n - 1; j >= 0; j-- { + abij := alpha * b[i*ldb+j] + aj := a[j*lda+j+1 : j*lda+n] + bi := b[i*ldb+j+1 : i*ldb+n] + ci := c[i*ldc+j+1 : i*ldc+n] + var tmp complex128 + for k, ajk := range aj { + ci[k] += abij * ajk + tmp += bi[k] * ajk + } + if beta == 0 { + c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp + } else { + c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp + beta*c[i*ldc+j] + } + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + abij := alpha * b[i*ldb+j] + aj := a[j*lda : j*lda+j] + bi := b[i*ldb : i*ldb+j] + ci := c[i*ldc : i*ldc+j] + var tmp complex128 + for k, ajk := range aj { + ci[k] += abij * ajk + tmp += bi[k] * ajk + } + if beta == 0 { + c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp + } else { + c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp + beta*c[i*ldc+j] + } + } + } + } + } +} + +// Zsyrk performs one of the symmetric rank-k operations +// C = alpha*A*A^T + beta*C if trans == blas.NoTrans +// C = alpha*A^T*A + beta*C if trans == blas.Trans +// where alpha and beta are scalars, C is an n×n symmetric matrix and A is +// an n×k matrix in the first case and a k×n matrix in the second case. +func (Implementation) Zsyrk(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha complex128, a []complex128, lda int, beta complex128, c []complex128, ldc int) { + var rowA, colA int + switch trans { + default: + panic(badTranspose) + case blas.NoTrans: + rowA, colA = n, k + case blas.Trans: + rowA, colA = k, n + } + switch { + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case lda < max(1, colA): + panic(badLdA) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (rowA-1)*lda+colA { + panic(shortA) + } + if len(c) < (n-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + if alpha == 0 { + if uplo == blas.Upper { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + c128.ScalUnitary(beta, ci) + } + } + } else { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + c128.ScalUnitary(beta, ci) + } + } + } + return + } + + if trans == blas.NoTrans { + // Form C = alpha*A*A^T + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + ai := a[i*lda : i*lda+k] + for jc, cij := range ci { + j := i + jc + ci[jc] = beta*cij + alpha*c128.DotuUnitary(ai, a[j*lda:j*lda+k]) + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + ai := a[i*lda : i*lda+k] + for j, cij := range ci { + ci[j] = beta*cij + alpha*c128.DotuUnitary(ai, a[j*lda:j*lda+k]) + } + } + } + } else { + // Form C = alpha*A^T*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + switch { + case beta == 0: + for jc := range ci { + ci[jc] = 0 + } + case beta != 1: + for jc := range ci { + ci[jc] *= beta + } + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + if aji != 0 { + c128.AxpyUnitary(alpha*aji, a[j*lda+i:j*lda+n], ci) + } + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + switch { + case beta == 0: + for j := range ci { + ci[j] = 0 + } + case beta != 1: + for j := range ci { + ci[j] *= beta + } + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + if aji != 0 { + c128.AxpyUnitary(alpha*aji, a[j*lda:j*lda+i+1], ci) + } + } + } + } + } +} + +// Zsyr2k performs one of the symmetric rank-2k operations +// C = alpha*A*B^T + alpha*B*A^T + beta*C if trans == blas.NoTrans +// C = alpha*A^T*B + alpha*B^T*A + beta*C if trans == blas.Trans +// where alpha and beta are scalars, C is an n×n symmetric matrix and A and B +// are n×k matrices in the first case and k×n matrices in the second case. +func (Implementation) Zsyr2k(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha complex128, a []complex128, lda int, b []complex128, ldb int, beta complex128, c []complex128, ldc int) { + var row, col int + switch trans { + default: + panic(badTranspose) + case blas.NoTrans: + row, col = n, k + case blas.Trans: + row, col = k, n + } + switch { + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case lda < max(1, col): + panic(badLdA) + case ldb < max(1, col): + panic(badLdB) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (row-1)*lda+col { + panic(shortA) + } + if len(b) < (row-1)*ldb+col { + panic(shortB) + } + if len(c) < (n-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + if alpha == 0 { + if uplo == blas.Upper { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + c128.ScalUnitary(beta, ci) + } + } + } else { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + c128.ScalUnitary(beta, ci) + } + } + } + return + } + + if trans == blas.NoTrans { + // Form C = alpha*A*B^T + alpha*B*A^T + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + ai := a[i*lda : i*lda+k] + bi := b[i*ldb : i*ldb+k] + if beta == 0 { + for jc := range ci { + j := i + jc + ci[jc] = alpha*c128.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c128.DotuUnitary(bi, a[j*lda:j*lda+k]) + } + } else { + for jc, cij := range ci { + j := i + jc + ci[jc] = alpha*c128.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c128.DotuUnitary(bi, a[j*lda:j*lda+k]) + beta*cij + } + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + ai := a[i*lda : i*lda+k] + bi := b[i*ldb : i*ldb+k] + if beta == 0 { + for j := range ci { + ci[j] = alpha*c128.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c128.DotuUnitary(bi, a[j*lda:j*lda+k]) + } + } else { + for j, cij := range ci { + ci[j] = alpha*c128.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c128.DotuUnitary(bi, a[j*lda:j*lda+k]) + beta*cij + } + } + } + } + } else { + // Form C = alpha*A^T*B + alpha*B^T*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + switch { + case beta == 0: + for jc := range ci { + ci[jc] = 0 + } + case beta != 1: + for jc := range ci { + ci[jc] *= beta + } + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + bji := b[j*ldb+i] + if aji != 0 { + c128.AxpyUnitary(alpha*aji, b[j*ldb+i:j*ldb+n], ci) + } + if bji != 0 { + c128.AxpyUnitary(alpha*bji, a[j*lda+i:j*lda+n], ci) + } + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + switch { + case beta == 0: + for j := range ci { + ci[j] = 0 + } + case beta != 1: + for j := range ci { + ci[j] *= beta + } + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + bji := b[j*ldb+i] + if aji != 0 { + c128.AxpyUnitary(alpha*aji, b[j*ldb:j*ldb+i+1], ci) + } + if bji != 0 { + c128.AxpyUnitary(alpha*bji, a[j*lda:j*lda+i+1], ci) + } + } + } + } + } +} + +// Ztrmm performs one of the matrix-matrix operations +// B = alpha * op(A) * B if side == blas.Left, +// B = alpha * B * op(A) if side == blas.Right, +// where alpha is a scalar, B is an m×n matrix, A is a unit, or non-unit, +// upper or lower triangular matrix and op(A) is one of +// op(A) = A if trans == blas.NoTrans, +// op(A) = A^T if trans == blas.Trans, +// op(A) = A^H if trans == blas.ConjTrans. +func (Implementation) Ztrmm(side blas.Side, uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int) { + na := m + if side == blas.Right { + na = n + } + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans: + panic(badTranspose) + case diag != blas.Unit && diag != blas.NonUnit: + panic(badDiag) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, na): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (na-1)*lda+na { + panic(shortA) + } + if len(b) < (m-1)*ldb+n { + panic(shortB) + } + + // Quick return if possible. + if alpha == 0 { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for j := range bi { + bi[j] = 0 + } + } + return + } + + noConj := trans != blas.ConjTrans + noUnit := diag == blas.NonUnit + if side == blas.Left { + if trans == blas.NoTrans { + // Form B = alpha*A*B. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + aii := alpha + if noUnit { + aii *= a[i*lda+i] + } + bi := b[i*ldb : i*ldb+n] + for j := range bi { + bi[j] *= aii + } + for ja, aij := range a[i*lda+i+1 : i*lda+m] { + j := ja + i + 1 + if aij != 0 { + c128.AxpyUnitary(alpha*aij, b[j*ldb:j*ldb+n], bi) + } + } + } + } else { + for i := m - 1; i >= 0; i-- { + aii := alpha + if noUnit { + aii *= a[i*lda+i] + } + bi := b[i*ldb : i*ldb+n] + for j := range bi { + bi[j] *= aii + } + for j, aij := range a[i*lda : i*lda+i] { + if aij != 0 { + c128.AxpyUnitary(alpha*aij, b[j*ldb:j*ldb+n], bi) + } + } + } + } + } else { + // Form B = alpha*A^T*B or B = alpha*A^H*B. + if uplo == blas.Upper { + for k := m - 1; k >= 0; k-- { + bk := b[k*ldb : k*ldb+n] + for ja, ajk := range a[k*lda+k+1 : k*lda+m] { + if ajk == 0 { + continue + } + j := k + 1 + ja + if noConj { + c128.AxpyUnitary(alpha*ajk, bk, b[j*ldb:j*ldb+n]) + } else { + c128.AxpyUnitary(alpha*cmplx.Conj(ajk), bk, b[j*ldb:j*ldb+n]) + } + } + akk := alpha + if noUnit { + if noConj { + akk *= a[k*lda+k] + } else { + akk *= cmplx.Conj(a[k*lda+k]) + } + } + if akk != 1 { + c128.ScalUnitary(akk, bk) + } + } + } else { + for k := 0; k < m; k++ { + bk := b[k*ldb : k*ldb+n] + for j, ajk := range a[k*lda : k*lda+k] { + if ajk == 0 { + continue + } + if noConj { + c128.AxpyUnitary(alpha*ajk, bk, b[j*ldb:j*ldb+n]) + } else { + c128.AxpyUnitary(alpha*cmplx.Conj(ajk), bk, b[j*ldb:j*ldb+n]) + } + } + akk := alpha + if noUnit { + if noConj { + akk *= a[k*lda+k] + } else { + akk *= cmplx.Conj(a[k*lda+k]) + } + } + if akk != 1 { + c128.ScalUnitary(akk, bk) + } + } + } + } + } else { + if trans == blas.NoTrans { + // Form B = alpha*B*A. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for k := n - 1; k >= 0; k-- { + abik := alpha * bi[k] + if abik == 0 { + continue + } + bi[k] = abik + if noUnit { + bi[k] *= a[k*lda+k] + } + c128.AxpyUnitary(abik, a[k*lda+k+1:k*lda+n], bi[k+1:]) + } + } + } else { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for k := 0; k < n; k++ { + abik := alpha * bi[k] + if abik == 0 { + continue + } + bi[k] = abik + if noUnit { + bi[k] *= a[k*lda+k] + } + c128.AxpyUnitary(abik, a[k*lda:k*lda+k], bi[:k]) + } + } + } + } else { + // Form B = alpha*B*A^T or B = alpha*B*A^H. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for j, bij := range bi { + if noConj { + if noUnit { + bij *= a[j*lda+j] + } + bij += c128.DotuUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) + } else { + if noUnit { + bij *= cmplx.Conj(a[j*lda+j]) + } + bij += c128.DotcUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) + } + bi[j] = alpha * bij + } + } + } else { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for j := n - 1; j >= 0; j-- { + bij := bi[j] + if noConj { + if noUnit { + bij *= a[j*lda+j] + } + bij += c128.DotuUnitary(a[j*lda:j*lda+j], bi[:j]) + } else { + if noUnit { + bij *= cmplx.Conj(a[j*lda+j]) + } + bij += c128.DotcUnitary(a[j*lda:j*lda+j], bi[:j]) + } + bi[j] = alpha * bij + } + } + } + } + } +} + +// Ztrsm solves one of the matrix equations +// op(A) * X = alpha * B if side == blas.Left, +// X * op(A) = alpha * B if side == blas.Right, +// where alpha is a scalar, X and B are m×n matrices, A is a unit or +// non-unit, upper or lower triangular matrix and op(A) is one of +// op(A) = A if transA == blas.NoTrans, +// op(A) = A^T if transA == blas.Trans, +// op(A) = A^H if transA == blas.ConjTrans. +// On return the matrix X is overwritten on B. +func (Implementation) Ztrsm(side blas.Side, uplo blas.Uplo, transA blas.Transpose, diag blas.Diag, m, n int, alpha complex128, a []complex128, lda int, b []complex128, ldb int) { + na := m + if side == blas.Right { + na = n + } + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case transA != blas.NoTrans && transA != blas.Trans && transA != blas.ConjTrans: + panic(badTranspose) + case diag != blas.Unit && diag != blas.NonUnit: + panic(badDiag) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, na): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (na-1)*lda+na { + panic(shortA) + } + if len(b) < (m-1)*ldb+n { + panic(shortB) + } + + if alpha == 0 { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + b[i*ldb+j] = 0 + } + } + return + } + + noConj := transA != blas.ConjTrans + noUnit := diag == blas.NonUnit + if side == blas.Left { + if transA == blas.NoTrans { + // Form B = alpha*inv(A)*B. + if uplo == blas.Upper { + for i := m - 1; i >= 0; i-- { + bi := b[i*ldb : i*ldb+n] + if alpha != 1 { + c128.ScalUnitary(alpha, bi) + } + for ka, aik := range a[i*lda+i+1 : i*lda+m] { + k := i + 1 + ka + if aik != 0 { + c128.AxpyUnitary(-aik, b[k*ldb:k*ldb+n], bi) + } + } + if noUnit { + c128.ScalUnitary(1/a[i*lda+i], bi) + } + } + } else { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + if alpha != 1 { + c128.ScalUnitary(alpha, bi) + } + for j, aij := range a[i*lda : i*lda+i] { + if aij != 0 { + c128.AxpyUnitary(-aij, b[j*ldb:j*ldb+n], bi) + } + } + if noUnit { + c128.ScalUnitary(1/a[i*lda+i], bi) + } + } + } + } else { + // Form B = alpha*inv(A^T)*B or B = alpha*inv(A^H)*B. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + if noUnit { + if noConj { + c128.ScalUnitary(1/a[i*lda+i], bi) + } else { + c128.ScalUnitary(1/cmplx.Conj(a[i*lda+i]), bi) + } + } + for ja, aij := range a[i*lda+i+1 : i*lda+m] { + if aij == 0 { + continue + } + j := i + 1 + ja + if noConj { + c128.AxpyUnitary(-aij, bi, b[j*ldb:j*ldb+n]) + } else { + c128.AxpyUnitary(-cmplx.Conj(aij), bi, b[j*ldb:j*ldb+n]) + } + } + if alpha != 1 { + c128.ScalUnitary(alpha, bi) + } + } + } else { + for i := m - 1; i >= 0; i-- { + bi := b[i*ldb : i*ldb+n] + if noUnit { + if noConj { + c128.ScalUnitary(1/a[i*lda+i], bi) + } else { + c128.ScalUnitary(1/cmplx.Conj(a[i*lda+i]), bi) + } + } + for j, aij := range a[i*lda : i*lda+i] { + if aij == 0 { + continue + } + if noConj { + c128.AxpyUnitary(-aij, bi, b[j*ldb:j*ldb+n]) + } else { + c128.AxpyUnitary(-cmplx.Conj(aij), bi, b[j*ldb:j*ldb+n]) + } + } + if alpha != 1 { + c128.ScalUnitary(alpha, bi) + } + } + } + } + } else { + if transA == blas.NoTrans { + // Form B = alpha*B*inv(A). + if uplo == blas.Upper { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + if alpha != 1 { + c128.ScalUnitary(alpha, bi) + } + for j, bij := range bi { + if bij == 0 { + continue + } + if noUnit { + bi[j] /= a[j*lda+j] + } + c128.AxpyUnitary(-bi[j], a[j*lda+j+1:j*lda+n], bi[j+1:n]) + } + } + } else { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + if alpha != 1 { + c128.ScalUnitary(alpha, bi) + } + for j := n - 1; j >= 0; j-- { + if bi[j] == 0 { + continue + } + if noUnit { + bi[j] /= a[j*lda+j] + } + c128.AxpyUnitary(-bi[j], a[j*lda:j*lda+j], bi[:j]) + } + } + } + } else { + // Form B = alpha*B*inv(A^T) or B = alpha*B*inv(A^H). + if uplo == blas.Upper { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for j := n - 1; j >= 0; j-- { + bij := alpha * bi[j] + if noConj { + bij -= c128.DotuUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) + if noUnit { + bij /= a[j*lda+j] + } + } else { + bij -= c128.DotcUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) + if noUnit { + bij /= cmplx.Conj(a[j*lda+j]) + } + } + bi[j] = bij + } + } + } else { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for j, bij := range bi { + bij *= alpha + if noConj { + bij -= c128.DotuUnitary(a[j*lda:j*lda+j], bi[:j]) + if noUnit { + bij /= a[j*lda+j] + } + } else { + bij -= c128.DotcUnitary(a[j*lda:j*lda+j], bi[:j]) + if noUnit { + bij /= cmplx.Conj(a[j*lda+j]) + } + } + bi[j] = bij + } + } + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx64.go b/vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx64.go new file mode 100644 index 0000000000..436c545065 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level3cmplx64.go @@ -0,0 +1,1735 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2019 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + cmplx "gonum.org/v1/gonum/internal/cmplx64" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/c64" +) + +var _ blas.Complex64Level3 = Implementation{} + +// Cgemm performs one of the matrix-matrix operations +// C = alpha * op(A) * op(B) + beta * C +// where op(X) is one of +// op(X) = X or op(X) = X^T or op(X) = X^H, +// alpha and beta are scalars, and A, B and C are matrices, with op(A) an m×k matrix, +// op(B) a k×n matrix and C an m×n matrix. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cgemm(tA, tB blas.Transpose, m, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) { + switch tA { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch tB { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + } + rowA, colA := m, k + if tA != blas.NoTrans { + rowA, colA = k, m + } + if lda < max(1, colA) { + panic(badLdA) + } + rowB, colB := k, n + if tB != blas.NoTrans { + rowB, colB = n, k + } + if ldb < max(1, colB) { + panic(badLdB) + } + if ldc < max(1, n) { + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (rowA-1)*lda+colA { + panic(shortA) + } + if len(b) < (rowB-1)*ldb+colB { + panic(shortB) + } + if len(c) < (m-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + if alpha == 0 { + if beta == 0 { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + c[i*ldc+j] = 0 + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + c[i*ldc+j] *= beta + } + } + } + return + } + + switch tA { + case blas.NoTrans: + switch tB { + case blas.NoTrans: + // Form C = alpha * A * B + beta * C. + for i := 0; i < m; i++ { + switch { + case beta == 0: + for j := 0; j < n; j++ { + c[i*ldc+j] = 0 + } + case beta != 1: + for j := 0; j < n; j++ { + c[i*ldc+j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[i*lda+l] + for j := 0; j < n; j++ { + c[i*ldc+j] += tmp * b[l*ldb+j] + } + } + } + case blas.Trans: + // Form C = alpha * A * B^T + beta * C. + for i := 0; i < m; i++ { + switch { + case beta == 0: + for j := 0; j < n; j++ { + c[i*ldc+j] = 0 + } + case beta != 1: + for j := 0; j < n; j++ { + c[i*ldc+j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[i*lda+l] + for j := 0; j < n; j++ { + c[i*ldc+j] += tmp * b[j*ldb+l] + } + } + } + case blas.ConjTrans: + // Form C = alpha * A * B^H + beta * C. + for i := 0; i < m; i++ { + switch { + case beta == 0: + for j := 0; j < n; j++ { + c[i*ldc+j] = 0 + } + case beta != 1: + for j := 0; j < n; j++ { + c[i*ldc+j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[i*lda+l] + for j := 0; j < n; j++ { + c[i*ldc+j] += tmp * cmplx.Conj(b[j*ldb+l]) + } + } + } + } + case blas.Trans: + switch tB { + case blas.NoTrans: + // Form C = alpha * A^T * B + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex64 + for l := 0; l < k; l++ { + tmp += a[l*lda+i] * b[l*ldb+j] + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + case blas.Trans: + // Form C = alpha * A^T * B^T + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex64 + for l := 0; l < k; l++ { + tmp += a[l*lda+i] * b[j*ldb+l] + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + case blas.ConjTrans: + // Form C = alpha * A^T * B^H + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex64 + for l := 0; l < k; l++ { + tmp += a[l*lda+i] * cmplx.Conj(b[j*ldb+l]) + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + } + case blas.ConjTrans: + switch tB { + case blas.NoTrans: + // Form C = alpha * A^H * B + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex64 + for l := 0; l < k; l++ { + tmp += cmplx.Conj(a[l*lda+i]) * b[l*ldb+j] + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + case blas.Trans: + // Form C = alpha * A^H * B^T + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex64 + for l := 0; l < k; l++ { + tmp += cmplx.Conj(a[l*lda+i]) * b[j*ldb+l] + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + case blas.ConjTrans: + // Form C = alpha * A^H * B^H + beta * C. + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + var tmp complex64 + for l := 0; l < k; l++ { + tmp += cmplx.Conj(a[l*lda+i]) * cmplx.Conj(b[j*ldb+l]) + } + if beta == 0 { + c[i*ldc+j] = alpha * tmp + } else { + c[i*ldc+j] = alpha*tmp + beta*c[i*ldc+j] + } + } + } + } + } +} + +// Chemm performs one of the matrix-matrix operations +// C = alpha*A*B + beta*C if side == blas.Left +// C = alpha*B*A + beta*C if side == blas.Right +// where alpha and beta are scalars, A is an m×m or n×n hermitian matrix and B +// and C are m×n matrices. The imaginary parts of the diagonal elements of A are +// assumed to be zero. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Chemm(side blas.Side, uplo blas.Uplo, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) { + na := m + if side == blas.Right { + na = n + } + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, na): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(na-1)+na { + panic(shortA) + } + if len(b) < ldb*(m-1)+n { + panic(shortB) + } + if len(c) < ldc*(m-1)+n { + panic(shortC) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + if alpha == 0 { + if beta == 0 { + for i := 0; i < m; i++ { + ci := c[i*ldc : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < m; i++ { + ci := c[i*ldc : i*ldc+n] + c64.ScalUnitary(beta, ci) + } + } + return + } + + if side == blas.Left { + // Form C = alpha*A*B + beta*C. + for i := 0; i < m; i++ { + atmp := alpha * complex(real(a[i*lda+i]), 0) + bi := b[i*ldb : i*ldb+n] + ci := c[i*ldc : i*ldc+n] + if beta == 0 { + for j, bij := range bi { + ci[j] = atmp * bij + } + } else { + for j, bij := range bi { + ci[j] = atmp*bij + beta*ci[j] + } + } + if uplo == blas.Upper { + for k := 0; k < i; k++ { + atmp = alpha * cmplx.Conj(a[k*lda+i]) + c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + for k := i + 1; k < m; k++ { + atmp = alpha * a[i*lda+k] + c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + } else { + for k := 0; k < i; k++ { + atmp = alpha * a[i*lda+k] + c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + for k := i + 1; k < m; k++ { + atmp = alpha * cmplx.Conj(a[k*lda+i]) + c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + } + } + } else { + // Form C = alpha*B*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := n - 1; j >= 0; j-- { + abij := alpha * b[i*ldb+j] + aj := a[j*lda+j+1 : j*lda+n] + bi := b[i*ldb+j+1 : i*ldb+n] + ci := c[i*ldc+j+1 : i*ldc+n] + var tmp complex64 + for k, ajk := range aj { + ci[k] += abij * ajk + tmp += bi[k] * cmplx.Conj(ajk) + } + ajj := complex(real(a[j*lda+j]), 0) + if beta == 0 { + c[i*ldc+j] = abij*ajj + alpha*tmp + } else { + c[i*ldc+j] = abij*ajj + alpha*tmp + beta*c[i*ldc+j] + } + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + abij := alpha * b[i*ldb+j] + aj := a[j*lda : j*lda+j] + bi := b[i*ldb : i*ldb+j] + ci := c[i*ldc : i*ldc+j] + var tmp complex64 + for k, ajk := range aj { + ci[k] += abij * ajk + tmp += bi[k] * cmplx.Conj(ajk) + } + ajj := complex(real(a[j*lda+j]), 0) + if beta == 0 { + c[i*ldc+j] = abij*ajj + alpha*tmp + } else { + c[i*ldc+j] = abij*ajj + alpha*tmp + beta*c[i*ldc+j] + } + } + } + } + } +} + +// Cherk performs one of the hermitian rank-k operations +// C = alpha*A*A^H + beta*C if trans == blas.NoTrans +// C = alpha*A^H*A + beta*C if trans == blas.ConjTrans +// where alpha and beta are real scalars, C is an n×n hermitian matrix and A is +// an n×k matrix in the first case and a k×n matrix in the second case. +// +// The imaginary parts of the diagonal elements of C are assumed to be zero, and +// on return they will be set to zero. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cherk(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha float32, a []complex64, lda int, beta float32, c []complex64, ldc int) { + var rowA, colA int + switch trans { + default: + panic(badTranspose) + case blas.NoTrans: + rowA, colA = n, k + case blas.ConjTrans: + rowA, colA = k, n + } + switch { + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case lda < max(1, colA): + panic(badLdA) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (rowA-1)*lda+colA { + panic(shortA) + } + if len(c) < (n-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + if alpha == 0 { + if uplo == blas.Upper { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + ci[0] = complex(beta*real(ci[0]), 0) + if i != n-1 { + c64.SscalUnitary(beta, ci[1:]) + } + } + } + } else { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + if i != 0 { + c64.SscalUnitary(beta, ci[:i]) + } + ci[i] = complex(beta*real(ci[i]), 0) + } + } + } + return + } + + calpha := complex(alpha, 0) + if trans == blas.NoTrans { + // Form C = alpha*A*A^H + beta*C. + cbeta := complex(beta, 0) + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + ai := a[i*lda : i*lda+k] + switch { + case beta == 0: + // Handle the i-th diagonal element of C. + ci[0] = complex(alpha*real(c64.DotcUnitary(ai, ai)), 0) + // Handle the remaining elements on the i-th row of C. + for jc := range ci[1:] { + j := i + 1 + jc + ci[jc+1] = calpha * c64.DotcUnitary(a[j*lda:j*lda+k], ai) + } + case beta != 1: + cii := calpha*c64.DotcUnitary(ai, ai) + cbeta*ci[0] + ci[0] = complex(real(cii), 0) + for jc, cij := range ci[1:] { + j := i + 1 + jc + ci[jc+1] = calpha*c64.DotcUnitary(a[j*lda:j*lda+k], ai) + cbeta*cij + } + default: + cii := calpha*c64.DotcUnitary(ai, ai) + ci[0] + ci[0] = complex(real(cii), 0) + for jc, cij := range ci[1:] { + j := i + 1 + jc + ci[jc+1] = calpha*c64.DotcUnitary(a[j*lda:j*lda+k], ai) + cij + } + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + ai := a[i*lda : i*lda+k] + switch { + case beta == 0: + // Handle the first i-1 elements on the i-th row of C. + for j := range ci[:i] { + ci[j] = calpha * c64.DotcUnitary(a[j*lda:j*lda+k], ai) + } + // Handle the i-th diagonal element of C. + ci[i] = complex(alpha*real(c64.DotcUnitary(ai, ai)), 0) + case beta != 1: + for j, cij := range ci[:i] { + ci[j] = calpha*c64.DotcUnitary(a[j*lda:j*lda+k], ai) + cbeta*cij + } + cii := calpha*c64.DotcUnitary(ai, ai) + cbeta*ci[i] + ci[i] = complex(real(cii), 0) + default: + for j, cij := range ci[:i] { + ci[j] = calpha*c64.DotcUnitary(a[j*lda:j*lda+k], ai) + cij + } + cii := calpha*c64.DotcUnitary(ai, ai) + ci[i] + ci[i] = complex(real(cii), 0) + } + } + } + } else { + // Form C = alpha*A^H*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + switch { + case beta == 0: + for jc := range ci { + ci[jc] = 0 + } + case beta != 1: + c64.SscalUnitary(beta, ci) + ci[0] = complex(real(ci[0]), 0) + default: + ci[0] = complex(real(ci[0]), 0) + } + for j := 0; j < k; j++ { + aji := cmplx.Conj(a[j*lda+i]) + if aji != 0 { + c64.AxpyUnitary(calpha*aji, a[j*lda+i:j*lda+n], ci) + } + } + c[i*ldc+i] = complex(real(c[i*ldc+i]), 0) + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + switch { + case beta == 0: + for j := range ci { + ci[j] = 0 + } + case beta != 1: + c64.SscalUnitary(beta, ci) + ci[i] = complex(real(ci[i]), 0) + default: + ci[i] = complex(real(ci[i]), 0) + } + for j := 0; j < k; j++ { + aji := cmplx.Conj(a[j*lda+i]) + if aji != 0 { + c64.AxpyUnitary(calpha*aji, a[j*lda:j*lda+i+1], ci) + } + } + c[i*ldc+i] = complex(real(c[i*ldc+i]), 0) + } + } + } +} + +// Cher2k performs one of the hermitian rank-2k operations +// C = alpha*A*B^H + conj(alpha)*B*A^H + beta*C if trans == blas.NoTrans +// C = alpha*A^H*B + conj(alpha)*B^H*A + beta*C if trans == blas.ConjTrans +// where alpha and beta are scalars with beta real, C is an n×n hermitian matrix +// and A and B are n×k matrices in the first case and k×n matrices in the second case. +// +// The imaginary parts of the diagonal elements of C are assumed to be zero, and +// on return they will be set to zero. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Cher2k(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta float32, c []complex64, ldc int) { + var row, col int + switch trans { + default: + panic(badTranspose) + case blas.NoTrans: + row, col = n, k + case blas.ConjTrans: + row, col = k, n + } + switch { + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case lda < max(1, col): + panic(badLdA) + case ldb < max(1, col): + panic(badLdB) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (row-1)*lda+col { + panic(shortA) + } + if len(b) < (row-1)*ldb+col { + panic(shortB) + } + if len(c) < (n-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + if alpha == 0 { + if uplo == blas.Upper { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + ci[0] = complex(beta*real(ci[0]), 0) + if i != n-1 { + c64.SscalUnitary(beta, ci[1:]) + } + } + } + } else { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + if i != 0 { + c64.SscalUnitary(beta, ci[:i]) + } + ci[i] = complex(beta*real(ci[i]), 0) + } + } + } + return + } + + conjalpha := cmplx.Conj(alpha) + cbeta := complex(beta, 0) + if trans == blas.NoTrans { + // Form C = alpha*A*B^H + conj(alpha)*B*A^H + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i+1 : i*ldc+n] + ai := a[i*lda : i*lda+k] + bi := b[i*ldb : i*ldb+k] + if beta == 0 { + cii := alpha*c64.DotcUnitary(bi, ai) + conjalpha*c64.DotcUnitary(ai, bi) + c[i*ldc+i] = complex(real(cii), 0) + for jc := range ci { + j := i + 1 + jc + ci[jc] = alpha*c64.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c64.DotcUnitary(a[j*lda:j*lda+k], bi) + } + } else { + cii := alpha*c64.DotcUnitary(bi, ai) + conjalpha*c64.DotcUnitary(ai, bi) + cbeta*c[i*ldc+i] + c[i*ldc+i] = complex(real(cii), 0) + for jc, cij := range ci { + j := i + 1 + jc + ci[jc] = alpha*c64.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c64.DotcUnitary(a[j*lda:j*lda+k], bi) + cbeta*cij + } + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i] + ai := a[i*lda : i*lda+k] + bi := b[i*ldb : i*ldb+k] + if beta == 0 { + for j := range ci { + ci[j] = alpha*c64.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c64.DotcUnitary(a[j*lda:j*lda+k], bi) + } + cii := alpha*c64.DotcUnitary(bi, ai) + conjalpha*c64.DotcUnitary(ai, bi) + c[i*ldc+i] = complex(real(cii), 0) + } else { + for j, cij := range ci { + ci[j] = alpha*c64.DotcUnitary(b[j*ldb:j*ldb+k], ai) + conjalpha*c64.DotcUnitary(a[j*lda:j*lda+k], bi) + cbeta*cij + } + cii := alpha*c64.DotcUnitary(bi, ai) + conjalpha*c64.DotcUnitary(ai, bi) + cbeta*c[i*ldc+i] + c[i*ldc+i] = complex(real(cii), 0) + } + } + } + } else { + // Form C = alpha*A^H*B + conj(alpha)*B^H*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + switch { + case beta == 0: + for jc := range ci { + ci[jc] = 0 + } + case beta != 1: + c64.SscalUnitary(beta, ci) + ci[0] = complex(real(ci[0]), 0) + default: + ci[0] = complex(real(ci[0]), 0) + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + bji := b[j*ldb+i] + if aji != 0 { + c64.AxpyUnitary(alpha*cmplx.Conj(aji), b[j*ldb+i:j*ldb+n], ci) + } + if bji != 0 { + c64.AxpyUnitary(conjalpha*cmplx.Conj(bji), a[j*lda+i:j*lda+n], ci) + } + } + ci[0] = complex(real(ci[0]), 0) + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + switch { + case beta == 0: + for j := range ci { + ci[j] = 0 + } + case beta != 1: + c64.SscalUnitary(beta, ci) + ci[i] = complex(real(ci[i]), 0) + default: + ci[i] = complex(real(ci[i]), 0) + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + bji := b[j*ldb+i] + if aji != 0 { + c64.AxpyUnitary(alpha*cmplx.Conj(aji), b[j*ldb:j*ldb+i+1], ci) + } + if bji != 0 { + c64.AxpyUnitary(conjalpha*cmplx.Conj(bji), a[j*lda:j*lda+i+1], ci) + } + } + ci[i] = complex(real(ci[i]), 0) + } + } + } +} + +// Csymm performs one of the matrix-matrix operations +// C = alpha*A*B + beta*C if side == blas.Left +// C = alpha*B*A + beta*C if side == blas.Right +// where alpha and beta are scalars, A is an m×m or n×n symmetric matrix and B +// and C are m×n matrices. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Csymm(side blas.Side, uplo blas.Uplo, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) { + na := m + if side == blas.Right { + na = n + } + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, na): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(na-1)+na { + panic(shortA) + } + if len(b) < ldb*(m-1)+n { + panic(shortB) + } + if len(c) < ldc*(m-1)+n { + panic(shortC) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + if alpha == 0 { + if beta == 0 { + for i := 0; i < m; i++ { + ci := c[i*ldc : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < m; i++ { + ci := c[i*ldc : i*ldc+n] + c64.ScalUnitary(beta, ci) + } + } + return + } + + if side == blas.Left { + // Form C = alpha*A*B + beta*C. + for i := 0; i < m; i++ { + atmp := alpha * a[i*lda+i] + bi := b[i*ldb : i*ldb+n] + ci := c[i*ldc : i*ldc+n] + if beta == 0 { + for j, bij := range bi { + ci[j] = atmp * bij + } + } else { + for j, bij := range bi { + ci[j] = atmp*bij + beta*ci[j] + } + } + if uplo == blas.Upper { + for k := 0; k < i; k++ { + atmp = alpha * a[k*lda+i] + c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + for k := i + 1; k < m; k++ { + atmp = alpha * a[i*lda+k] + c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + } else { + for k := 0; k < i; k++ { + atmp = alpha * a[i*lda+k] + c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + for k := i + 1; k < m; k++ { + atmp = alpha * a[k*lda+i] + c64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ci) + } + } + } + } else { + // Form C = alpha*B*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := n - 1; j >= 0; j-- { + abij := alpha * b[i*ldb+j] + aj := a[j*lda+j+1 : j*lda+n] + bi := b[i*ldb+j+1 : i*ldb+n] + ci := c[i*ldc+j+1 : i*ldc+n] + var tmp complex64 + for k, ajk := range aj { + ci[k] += abij * ajk + tmp += bi[k] * ajk + } + if beta == 0 { + c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp + } else { + c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp + beta*c[i*ldc+j] + } + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + abij := alpha * b[i*ldb+j] + aj := a[j*lda : j*lda+j] + bi := b[i*ldb : i*ldb+j] + ci := c[i*ldc : i*ldc+j] + var tmp complex64 + for k, ajk := range aj { + ci[k] += abij * ajk + tmp += bi[k] * ajk + } + if beta == 0 { + c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp + } else { + c[i*ldc+j] = abij*a[j*lda+j] + alpha*tmp + beta*c[i*ldc+j] + } + } + } + } + } +} + +// Csyrk performs one of the symmetric rank-k operations +// C = alpha*A*A^T + beta*C if trans == blas.NoTrans +// C = alpha*A^T*A + beta*C if trans == blas.Trans +// where alpha and beta are scalars, C is an n×n symmetric matrix and A is +// an n×k matrix in the first case and a k×n matrix in the second case. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Csyrk(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha complex64, a []complex64, lda int, beta complex64, c []complex64, ldc int) { + var rowA, colA int + switch trans { + default: + panic(badTranspose) + case blas.NoTrans: + rowA, colA = n, k + case blas.Trans: + rowA, colA = k, n + } + switch { + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case lda < max(1, colA): + panic(badLdA) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (rowA-1)*lda+colA { + panic(shortA) + } + if len(c) < (n-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + if alpha == 0 { + if uplo == blas.Upper { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + c64.ScalUnitary(beta, ci) + } + } + } else { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + c64.ScalUnitary(beta, ci) + } + } + } + return + } + + if trans == blas.NoTrans { + // Form C = alpha*A*A^T + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + ai := a[i*lda : i*lda+k] + for jc, cij := range ci { + j := i + jc + ci[jc] = beta*cij + alpha*c64.DotuUnitary(ai, a[j*lda:j*lda+k]) + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + ai := a[i*lda : i*lda+k] + for j, cij := range ci { + ci[j] = beta*cij + alpha*c64.DotuUnitary(ai, a[j*lda:j*lda+k]) + } + } + } + } else { + // Form C = alpha*A^T*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + switch { + case beta == 0: + for jc := range ci { + ci[jc] = 0 + } + case beta != 1: + for jc := range ci { + ci[jc] *= beta + } + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + if aji != 0 { + c64.AxpyUnitary(alpha*aji, a[j*lda+i:j*lda+n], ci) + } + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + switch { + case beta == 0: + for j := range ci { + ci[j] = 0 + } + case beta != 1: + for j := range ci { + ci[j] *= beta + } + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + if aji != 0 { + c64.AxpyUnitary(alpha*aji, a[j*lda:j*lda+i+1], ci) + } + } + } + } + } +} + +// Csyr2k performs one of the symmetric rank-2k operations +// C = alpha*A*B^T + alpha*B*A^T + beta*C if trans == blas.NoTrans +// C = alpha*A^T*B + alpha*B^T*A + beta*C if trans == blas.Trans +// where alpha and beta are scalars, C is an n×n symmetric matrix and A and B +// are n×k matrices in the first case and k×n matrices in the second case. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Csyr2k(uplo blas.Uplo, trans blas.Transpose, n, k int, alpha complex64, a []complex64, lda int, b []complex64, ldb int, beta complex64, c []complex64, ldc int) { + var row, col int + switch trans { + default: + panic(badTranspose) + case blas.NoTrans: + row, col = n, k + case blas.Trans: + row, col = k, n + } + switch { + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case lda < max(1, col): + panic(badLdA) + case ldb < max(1, col): + panic(badLdB) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (row-1)*lda+col { + panic(shortA) + } + if len(b) < (row-1)*ldb+col { + panic(shortB) + } + if len(c) < (n-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + if alpha == 0 { + if uplo == blas.Upper { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + c64.ScalUnitary(beta, ci) + } + } + } else { + if beta == 0 { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + for j := range ci { + ci[j] = 0 + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + c64.ScalUnitary(beta, ci) + } + } + } + return + } + + if trans == blas.NoTrans { + // Form C = alpha*A*B^T + alpha*B*A^T + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + ai := a[i*lda : i*lda+k] + bi := b[i*ldb : i*ldb+k] + if beta == 0 { + for jc := range ci { + j := i + jc + ci[jc] = alpha*c64.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c64.DotuUnitary(bi, a[j*lda:j*lda+k]) + } + } else { + for jc, cij := range ci { + j := i + jc + ci[jc] = alpha*c64.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c64.DotuUnitary(bi, a[j*lda:j*lda+k]) + beta*cij + } + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + ai := a[i*lda : i*lda+k] + bi := b[i*ldb : i*ldb+k] + if beta == 0 { + for j := range ci { + ci[j] = alpha*c64.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c64.DotuUnitary(bi, a[j*lda:j*lda+k]) + } + } else { + for j, cij := range ci { + ci[j] = alpha*c64.DotuUnitary(ai, b[j*ldb:j*ldb+k]) + alpha*c64.DotuUnitary(bi, a[j*lda:j*lda+k]) + beta*cij + } + } + } + } + } else { + // Form C = alpha*A^T*B + alpha*B^T*A + beta*C. + if uplo == blas.Upper { + for i := 0; i < n; i++ { + ci := c[i*ldc+i : i*ldc+n] + switch { + case beta == 0: + for jc := range ci { + ci[jc] = 0 + } + case beta != 1: + for jc := range ci { + ci[jc] *= beta + } + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + bji := b[j*ldb+i] + if aji != 0 { + c64.AxpyUnitary(alpha*aji, b[j*ldb+i:j*ldb+n], ci) + } + if bji != 0 { + c64.AxpyUnitary(alpha*bji, a[j*lda+i:j*lda+n], ci) + } + } + } + } else { + for i := 0; i < n; i++ { + ci := c[i*ldc : i*ldc+i+1] + switch { + case beta == 0: + for j := range ci { + ci[j] = 0 + } + case beta != 1: + for j := range ci { + ci[j] *= beta + } + } + for j := 0; j < k; j++ { + aji := a[j*lda+i] + bji := b[j*ldb+i] + if aji != 0 { + c64.AxpyUnitary(alpha*aji, b[j*ldb:j*ldb+i+1], ci) + } + if bji != 0 { + c64.AxpyUnitary(alpha*bji, a[j*lda:j*lda+i+1], ci) + } + } + } + } + } +} + +// Ctrmm performs one of the matrix-matrix operations +// B = alpha * op(A) * B if side == blas.Left, +// B = alpha * B * op(A) if side == blas.Right, +// where alpha is a scalar, B is an m×n matrix, A is a unit, or non-unit, +// upper or lower triangular matrix and op(A) is one of +// op(A) = A if trans == blas.NoTrans, +// op(A) = A^T if trans == blas.Trans, +// op(A) = A^H if trans == blas.ConjTrans. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Ctrmm(side blas.Side, uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int) { + na := m + if side == blas.Right { + na = n + } + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans: + panic(badTranspose) + case diag != blas.Unit && diag != blas.NonUnit: + panic(badDiag) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, na): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (na-1)*lda+na { + panic(shortA) + } + if len(b) < (m-1)*ldb+n { + panic(shortB) + } + + // Quick return if possible. + if alpha == 0 { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for j := range bi { + bi[j] = 0 + } + } + return + } + + noConj := trans != blas.ConjTrans + noUnit := diag == blas.NonUnit + if side == blas.Left { + if trans == blas.NoTrans { + // Form B = alpha*A*B. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + aii := alpha + if noUnit { + aii *= a[i*lda+i] + } + bi := b[i*ldb : i*ldb+n] + for j := range bi { + bi[j] *= aii + } + for ja, aij := range a[i*lda+i+1 : i*lda+m] { + j := ja + i + 1 + if aij != 0 { + c64.AxpyUnitary(alpha*aij, b[j*ldb:j*ldb+n], bi) + } + } + } + } else { + for i := m - 1; i >= 0; i-- { + aii := alpha + if noUnit { + aii *= a[i*lda+i] + } + bi := b[i*ldb : i*ldb+n] + for j := range bi { + bi[j] *= aii + } + for j, aij := range a[i*lda : i*lda+i] { + if aij != 0 { + c64.AxpyUnitary(alpha*aij, b[j*ldb:j*ldb+n], bi) + } + } + } + } + } else { + // Form B = alpha*A^T*B or B = alpha*A^H*B. + if uplo == blas.Upper { + for k := m - 1; k >= 0; k-- { + bk := b[k*ldb : k*ldb+n] + for ja, ajk := range a[k*lda+k+1 : k*lda+m] { + if ajk == 0 { + continue + } + j := k + 1 + ja + if noConj { + c64.AxpyUnitary(alpha*ajk, bk, b[j*ldb:j*ldb+n]) + } else { + c64.AxpyUnitary(alpha*cmplx.Conj(ajk), bk, b[j*ldb:j*ldb+n]) + } + } + akk := alpha + if noUnit { + if noConj { + akk *= a[k*lda+k] + } else { + akk *= cmplx.Conj(a[k*lda+k]) + } + } + if akk != 1 { + c64.ScalUnitary(akk, bk) + } + } + } else { + for k := 0; k < m; k++ { + bk := b[k*ldb : k*ldb+n] + for j, ajk := range a[k*lda : k*lda+k] { + if ajk == 0 { + continue + } + if noConj { + c64.AxpyUnitary(alpha*ajk, bk, b[j*ldb:j*ldb+n]) + } else { + c64.AxpyUnitary(alpha*cmplx.Conj(ajk), bk, b[j*ldb:j*ldb+n]) + } + } + akk := alpha + if noUnit { + if noConj { + akk *= a[k*lda+k] + } else { + akk *= cmplx.Conj(a[k*lda+k]) + } + } + if akk != 1 { + c64.ScalUnitary(akk, bk) + } + } + } + } + } else { + if trans == blas.NoTrans { + // Form B = alpha*B*A. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for k := n - 1; k >= 0; k-- { + abik := alpha * bi[k] + if abik == 0 { + continue + } + bi[k] = abik + if noUnit { + bi[k] *= a[k*lda+k] + } + c64.AxpyUnitary(abik, a[k*lda+k+1:k*lda+n], bi[k+1:]) + } + } + } else { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for k := 0; k < n; k++ { + abik := alpha * bi[k] + if abik == 0 { + continue + } + bi[k] = abik + if noUnit { + bi[k] *= a[k*lda+k] + } + c64.AxpyUnitary(abik, a[k*lda:k*lda+k], bi[:k]) + } + } + } + } else { + // Form B = alpha*B*A^T or B = alpha*B*A^H. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for j, bij := range bi { + if noConj { + if noUnit { + bij *= a[j*lda+j] + } + bij += c64.DotuUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) + } else { + if noUnit { + bij *= cmplx.Conj(a[j*lda+j]) + } + bij += c64.DotcUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) + } + bi[j] = alpha * bij + } + } + } else { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for j := n - 1; j >= 0; j-- { + bij := bi[j] + if noConj { + if noUnit { + bij *= a[j*lda+j] + } + bij += c64.DotuUnitary(a[j*lda:j*lda+j], bi[:j]) + } else { + if noUnit { + bij *= cmplx.Conj(a[j*lda+j]) + } + bij += c64.DotcUnitary(a[j*lda:j*lda+j], bi[:j]) + } + bi[j] = alpha * bij + } + } + } + } + } +} + +// Ctrsm solves one of the matrix equations +// op(A) * X = alpha * B if side == blas.Left, +// X * op(A) = alpha * B if side == blas.Right, +// where alpha is a scalar, X and B are m×n matrices, A is a unit or +// non-unit, upper or lower triangular matrix and op(A) is one of +// op(A) = A if transA == blas.NoTrans, +// op(A) = A^T if transA == blas.Trans, +// op(A) = A^H if transA == blas.ConjTrans. +// On return the matrix X is overwritten on B. +// +// Complex64 implementations are autogenerated and not directly tested. +func (Implementation) Ctrsm(side blas.Side, uplo blas.Uplo, transA blas.Transpose, diag blas.Diag, m, n int, alpha complex64, a []complex64, lda int, b []complex64, ldb int) { + na := m + if side == blas.Right { + na = n + } + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case uplo != blas.Lower && uplo != blas.Upper: + panic(badUplo) + case transA != blas.NoTrans && transA != blas.Trans && transA != blas.ConjTrans: + panic(badTranspose) + case diag != blas.Unit && diag != blas.NonUnit: + panic(badDiag) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, na): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < (na-1)*lda+na { + panic(shortA) + } + if len(b) < (m-1)*ldb+n { + panic(shortB) + } + + if alpha == 0 { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + b[i*ldb+j] = 0 + } + } + return + } + + noConj := transA != blas.ConjTrans + noUnit := diag == blas.NonUnit + if side == blas.Left { + if transA == blas.NoTrans { + // Form B = alpha*inv(A)*B. + if uplo == blas.Upper { + for i := m - 1; i >= 0; i-- { + bi := b[i*ldb : i*ldb+n] + if alpha != 1 { + c64.ScalUnitary(alpha, bi) + } + for ka, aik := range a[i*lda+i+1 : i*lda+m] { + k := i + 1 + ka + if aik != 0 { + c64.AxpyUnitary(-aik, b[k*ldb:k*ldb+n], bi) + } + } + if noUnit { + c64.ScalUnitary(1/a[i*lda+i], bi) + } + } + } else { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + if alpha != 1 { + c64.ScalUnitary(alpha, bi) + } + for j, aij := range a[i*lda : i*lda+i] { + if aij != 0 { + c64.AxpyUnitary(-aij, b[j*ldb:j*ldb+n], bi) + } + } + if noUnit { + c64.ScalUnitary(1/a[i*lda+i], bi) + } + } + } + } else { + // Form B = alpha*inv(A^T)*B or B = alpha*inv(A^H)*B. + if uplo == blas.Upper { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + if noUnit { + if noConj { + c64.ScalUnitary(1/a[i*lda+i], bi) + } else { + c64.ScalUnitary(1/cmplx.Conj(a[i*lda+i]), bi) + } + } + for ja, aij := range a[i*lda+i+1 : i*lda+m] { + if aij == 0 { + continue + } + j := i + 1 + ja + if noConj { + c64.AxpyUnitary(-aij, bi, b[j*ldb:j*ldb+n]) + } else { + c64.AxpyUnitary(-cmplx.Conj(aij), bi, b[j*ldb:j*ldb+n]) + } + } + if alpha != 1 { + c64.ScalUnitary(alpha, bi) + } + } + } else { + for i := m - 1; i >= 0; i-- { + bi := b[i*ldb : i*ldb+n] + if noUnit { + if noConj { + c64.ScalUnitary(1/a[i*lda+i], bi) + } else { + c64.ScalUnitary(1/cmplx.Conj(a[i*lda+i]), bi) + } + } + for j, aij := range a[i*lda : i*lda+i] { + if aij == 0 { + continue + } + if noConj { + c64.AxpyUnitary(-aij, bi, b[j*ldb:j*ldb+n]) + } else { + c64.AxpyUnitary(-cmplx.Conj(aij), bi, b[j*ldb:j*ldb+n]) + } + } + if alpha != 1 { + c64.ScalUnitary(alpha, bi) + } + } + } + } + } else { + if transA == blas.NoTrans { + // Form B = alpha*B*inv(A). + if uplo == blas.Upper { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + if alpha != 1 { + c64.ScalUnitary(alpha, bi) + } + for j, bij := range bi { + if bij == 0 { + continue + } + if noUnit { + bi[j] /= a[j*lda+j] + } + c64.AxpyUnitary(-bi[j], a[j*lda+j+1:j*lda+n], bi[j+1:n]) + } + } + } else { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + if alpha != 1 { + c64.ScalUnitary(alpha, bi) + } + for j := n - 1; j >= 0; j-- { + if bi[j] == 0 { + continue + } + if noUnit { + bi[j] /= a[j*lda+j] + } + c64.AxpyUnitary(-bi[j], a[j*lda:j*lda+j], bi[:j]) + } + } + } + } else { + // Form B = alpha*B*inv(A^T) or B = alpha*B*inv(A^H). + if uplo == blas.Upper { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for j := n - 1; j >= 0; j-- { + bij := alpha * bi[j] + if noConj { + bij -= c64.DotuUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) + if noUnit { + bij /= a[j*lda+j] + } + } else { + bij -= c64.DotcUnitary(a[j*lda+j+1:j*lda+n], bi[j+1:n]) + if noUnit { + bij /= cmplx.Conj(a[j*lda+j]) + } + } + bi[j] = bij + } + } + } else { + for i := 0; i < m; i++ { + bi := b[i*ldb : i*ldb+n] + for j, bij := range bi { + bij *= alpha + if noConj { + bij -= c64.DotuUnitary(a[j*lda:j*lda+j], bi[:j]) + if noUnit { + bij /= a[j*lda+j] + } + } else { + bij -= c64.DotcUnitary(a[j*lda:j*lda+j], bi[:j]) + if noUnit { + bij /= cmplx.Conj(a[j*lda+j]) + } + } + bi[j] = bij + } + } + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level3float32.go b/vendor/gonum.org/v1/gonum/blas/gonum/level3float32.go new file mode 100644 index 0000000000..13c4a792e9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level3float32.go @@ -0,0 +1,876 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f32" +) + +var _ blas.Float32Level3 = Implementation{} + +// Strsm solves one of the matrix equations +// A * X = alpha * B if tA == blas.NoTrans and side == blas.Left +// A^T * X = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left +// X * A = alpha * B if tA == blas.NoTrans and side == blas.Right +// X * A^T = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Right +// where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and alpha is a +// scalar. +// +// At entry to the function, X contains the values of B, and the result is +// stored in-place into X. +// +// No check is made that A is invertible. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Strsm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int) { + if s != blas.Left && s != blas.Right { + panic(badSide) + } + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + k := n + if s == blas.Left { + k = m + } + if lda < max(1, k) { + panic(badLdA) + } + if ldb < max(1, n) { + panic(badLdB) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(k-1)+k { + panic(shortA) + } + if len(b) < ldb*(m-1)+n { + panic(shortB) + } + + if alpha == 0 { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := range btmp { + btmp[j] = 0 + } + } + return + } + nonUnit := d == blas.NonUnit + if s == blas.Left { + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := m - 1; i >= 0; i-- { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + f32.ScalUnitary(alpha, btmp) + } + for ka, va := range a[i*lda+i+1 : i*lda+m] { + if va != 0 { + k := ka + i + 1 + f32.AxpyUnitary(-va, b[k*ldb:k*ldb+n], btmp) + } + } + if nonUnit { + tmp := 1 / a[i*lda+i] + f32.ScalUnitary(tmp, btmp) + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + f32.ScalUnitary(alpha, btmp) + } + for k, va := range a[i*lda : i*lda+i] { + if va != 0 { + f32.AxpyUnitary(-va, b[k*ldb:k*ldb+n], btmp) + } + } + if nonUnit { + tmp := 1 / a[i*lda+i] + f32.ScalUnitary(tmp, btmp) + } + } + return + } + // Cases where a is transposed + if ul == blas.Upper { + for k := 0; k < m; k++ { + btmpk := b[k*ldb : k*ldb+n] + if nonUnit { + tmp := 1 / a[k*lda+k] + f32.ScalUnitary(tmp, btmpk) + } + for ia, va := range a[k*lda+k+1 : k*lda+m] { + if va != 0 { + i := ia + k + 1 + f32.AxpyUnitary(-va, btmpk, b[i*ldb:i*ldb+n]) + } + } + if alpha != 1 { + f32.ScalUnitary(alpha, btmpk) + } + } + return + } + for k := m - 1; k >= 0; k-- { + btmpk := b[k*ldb : k*ldb+n] + if nonUnit { + tmp := 1 / a[k*lda+k] + f32.ScalUnitary(tmp, btmpk) + } + for i, va := range a[k*lda : k*lda+k] { + if va != 0 { + f32.AxpyUnitary(-va, btmpk, b[i*ldb:i*ldb+n]) + } + } + if alpha != 1 { + f32.ScalUnitary(alpha, btmpk) + } + } + return + } + // Cases where a is to the right of X. + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + f32.ScalUnitary(alpha, btmp) + } + for k, vb := range btmp { + if vb == 0 { + continue + } + if nonUnit { + btmp[k] /= a[k*lda+k] + } + f32.AxpyUnitary(-btmp[k], a[k*lda+k+1:k*lda+n], btmp[k+1:n]) + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + f32.ScalUnitary(alpha, btmp) + } + for k := n - 1; k >= 0; k-- { + if btmp[k] == 0 { + continue + } + if nonUnit { + btmp[k] /= a[k*lda+k] + } + f32.AxpyUnitary(-btmp[k], a[k*lda:k*lda+k], btmp[:k]) + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := n - 1; j >= 0; j-- { + tmp := alpha*btmp[j] - f32.DotUnitary(a[j*lda+j+1:j*lda+n], btmp[j+1:]) + if nonUnit { + tmp /= a[j*lda+j] + } + btmp[j] = tmp + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := 0; j < n; j++ { + tmp := alpha*btmp[j] - f32.DotUnitary(a[j*lda:j*lda+j], btmp[:j]) + if nonUnit { + tmp /= a[j*lda+j] + } + btmp[j] = tmp + } + } +} + +// Ssymm performs one of the matrix-matrix operations +// C = alpha * A * B + beta * C if side == blas.Left +// C = alpha * B * A + beta * C if side == blas.Right +// where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and alpha +// is a scalar. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssymm(s blas.Side, ul blas.Uplo, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) { + if s != blas.Right && s != blas.Left { + panic(badSide) + } + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + k := n + if s == blas.Left { + k = m + } + if lda < max(1, k) { + panic(badLdA) + } + if ldb < max(1, n) { + panic(badLdB) + } + if ldc < max(1, n) { + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(k-1)+k { + panic(shortA) + } + if len(b) < ldb*(m-1)+n { + panic(shortB) + } + if len(c) < ldc*(m-1)+n { + panic(shortC) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + if alpha == 0 { + if beta == 0 { + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := 0; j < n; j++ { + ctmp[j] *= beta + } + } + return + } + + isUpper := ul == blas.Upper + if s == blas.Left { + for i := 0; i < m; i++ { + atmp := alpha * a[i*lda+i] + btmp := b[i*ldb : i*ldb+n] + ctmp := c[i*ldc : i*ldc+n] + for j, v := range btmp { + ctmp[j] *= beta + ctmp[j] += atmp * v + } + + for k := 0; k < i; k++ { + var atmp float32 + if isUpper { + atmp = a[k*lda+i] + } else { + atmp = a[i*lda+k] + } + atmp *= alpha + f32.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ctmp) + } + for k := i + 1; k < m; k++ { + var atmp float32 + if isUpper { + atmp = a[i*lda+k] + } else { + atmp = a[k*lda+i] + } + atmp *= alpha + f32.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ctmp) + } + } + return + } + if isUpper { + for i := 0; i < m; i++ { + for j := n - 1; j >= 0; j-- { + tmp := alpha * b[i*ldb+j] + var tmp2 float32 + atmp := a[j*lda+j+1 : j*lda+n] + btmp := b[i*ldb+j+1 : i*ldb+n] + ctmp := c[i*ldc+j+1 : i*ldc+n] + for k, v := range atmp { + ctmp[k] += tmp * v + tmp2 += btmp[k] * v + } + c[i*ldc+j] *= beta + c[i*ldc+j] += tmp*a[j*lda+j] + alpha*tmp2 + } + } + return + } + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + tmp := alpha * b[i*ldb+j] + var tmp2 float32 + atmp := a[j*lda : j*lda+j] + btmp := b[i*ldb : i*ldb+j] + ctmp := c[i*ldc : i*ldc+j] + for k, v := range atmp { + ctmp[k] += tmp * v + tmp2 += btmp[k] * v + } + c[i*ldc+j] *= beta + c[i*ldc+j] += tmp*a[j*lda+j] + alpha*tmp2 + } + } +} + +// Ssyrk performs one of the symmetric rank-k operations +// C = alpha * A * A^T + beta * C if tA == blas.NoTrans +// C = alpha * A^T * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans +// where A is an n×k or k×n matrix, C is an n×n symmetric matrix, and alpha and +// beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssyrk(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float32, a []float32, lda int, beta float32, c []float32, ldc int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.Trans && tA != blas.NoTrans && tA != blas.ConjTrans { + panic(badTranspose) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + row, col := k, n + if tA == blas.NoTrans { + row, col = n, k + } + if lda < max(1, col) { + panic(badLdA) + } + if ldc < max(1, n) { + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(row-1)+col { + panic(shortA) + } + if len(c) < ldc*(n-1)+n { + panic(shortC) + } + + if alpha == 0 { + if beta == 0 { + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + atmp := a[i*lda : i*lda+k] + if beta == 0 { + for jc := range ctmp { + j := jc + i + ctmp[jc] = alpha * f32.DotUnitary(atmp, a[j*lda:j*lda+k]) + } + } else { + for jc, vc := range ctmp { + j := jc + i + ctmp[jc] = vc*beta + alpha*f32.DotUnitary(atmp, a[j*lda:j*lda+k]) + } + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + atmp := a[i*lda : i*lda+k] + if beta == 0 { + for j := range ctmp { + ctmp[j] = alpha * f32.DotUnitary(a[j*lda:j*lda+k], atmp) + } + } else { + for j, vc := range ctmp { + ctmp[j] = vc*beta + alpha*f32.DotUnitary(a[j*lda:j*lda+k], atmp) + } + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + if beta == 0 { + for j := range ctmp { + ctmp[j] = 0 + } + } else if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[l*lda+i] + if tmp != 0 { + f32.AxpyUnitary(tmp, a[l*lda+i:l*lda+n], ctmp) + } + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[l*lda+i] + if tmp != 0 { + f32.AxpyUnitary(tmp, a[l*lda:l*lda+i+1], ctmp) + } + } + } +} + +// Ssyr2k performs one of the symmetric rank 2k operations +// C = alpha * A * B^T + alpha * B * A^T + beta * C if tA == blas.NoTrans +// C = alpha * A^T * B + alpha * B^T * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans +// where A and B are n×k or k×n matrices, C is an n×n symmetric matrix, and +// alpha and beta are scalars. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Ssyr2k(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.Trans && tA != blas.NoTrans && tA != blas.ConjTrans { + panic(badTranspose) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + row, col := k, n + if tA == blas.NoTrans { + row, col = n, k + } + if lda < max(1, col) { + panic(badLdA) + } + if ldb < max(1, col) { + panic(badLdB) + } + if ldc < max(1, n) { + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(row-1)+col { + panic(shortA) + } + if len(b) < ldb*(row-1)+col { + panic(shortB) + } + if len(c) < ldc*(n-1)+n { + panic(shortC) + } + + if alpha == 0 { + if beta == 0 { + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < n; i++ { + atmp := a[i*lda : i*lda+k] + btmp := b[i*ldb : i*ldb+k] + ctmp := c[i*ldc+i : i*ldc+n] + for jc := range ctmp { + j := i + jc + var tmp1, tmp2 float32 + binner := b[j*ldb : j*ldb+k] + for l, v := range a[j*lda : j*lda+k] { + tmp1 += v * btmp[l] + tmp2 += atmp[l] * binner[l] + } + ctmp[jc] *= beta + ctmp[jc] += alpha * (tmp1 + tmp2) + } + } + return + } + for i := 0; i < n; i++ { + atmp := a[i*lda : i*lda+k] + btmp := b[i*ldb : i*ldb+k] + ctmp := c[i*ldc : i*ldc+i+1] + for j := 0; j <= i; j++ { + var tmp1, tmp2 float32 + binner := b[j*ldb : j*ldb+k] + for l, v := range a[j*lda : j*lda+k] { + tmp1 += v * btmp[l] + tmp2 += atmp[l] * binner[l] + } + ctmp[j] *= beta + ctmp[j] += alpha * (tmp1 + tmp2) + } + } + return + } + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp1 := alpha * b[l*ldb+i] + tmp2 := alpha * a[l*lda+i] + btmp := b[l*ldb+i : l*ldb+n] + if tmp1 != 0 || tmp2 != 0 { + for j, v := range a[l*lda+i : l*lda+n] { + ctmp[j] += v*tmp1 + btmp[j]*tmp2 + } + } + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp1 := alpha * b[l*ldb+i] + tmp2 := alpha * a[l*lda+i] + btmp := b[l*ldb : l*ldb+i+1] + if tmp1 != 0 || tmp2 != 0 { + for j, v := range a[l*lda : l*lda+i+1] { + ctmp[j] += v*tmp1 + btmp[j]*tmp2 + } + } + } + } +} + +// Strmm performs one of the matrix-matrix operations +// B = alpha * A * B if tA == blas.NoTrans and side == blas.Left +// B = alpha * A^T * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left +// B = alpha * B * A if tA == blas.NoTrans and side == blas.Right +// B = alpha * B * A^T if tA == blas.Trans or blas.ConjTrans, and side == blas.Right +// where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is a scalar. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Strmm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int) { + if s != blas.Left && s != blas.Right { + panic(badSide) + } + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + k := n + if s == blas.Left { + k = m + } + if lda < max(1, k) { + panic(badLdA) + } + if ldb < max(1, n) { + panic(badLdB) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(k-1)+k { + panic(shortA) + } + if len(b) < ldb*(m-1)+n { + panic(shortB) + } + + if alpha == 0 { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := range btmp { + btmp[j] = 0 + } + } + return + } + + nonUnit := d == blas.NonUnit + if s == blas.Left { + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < m; i++ { + tmp := alpha + if nonUnit { + tmp *= a[i*lda+i] + } + btmp := b[i*ldb : i*ldb+n] + f32.ScalUnitary(tmp, btmp) + for ka, va := range a[i*lda+i+1 : i*lda+m] { + k := ka + i + 1 + if va != 0 { + f32.AxpyUnitary(alpha*va, b[k*ldb:k*ldb+n], btmp) + } + } + } + return + } + for i := m - 1; i >= 0; i-- { + tmp := alpha + if nonUnit { + tmp *= a[i*lda+i] + } + btmp := b[i*ldb : i*ldb+n] + f32.ScalUnitary(tmp, btmp) + for k, va := range a[i*lda : i*lda+i] { + if va != 0 { + f32.AxpyUnitary(alpha*va, b[k*ldb:k*ldb+n], btmp) + } + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for k := m - 1; k >= 0; k-- { + btmpk := b[k*ldb : k*ldb+n] + for ia, va := range a[k*lda+k+1 : k*lda+m] { + i := ia + k + 1 + btmp := b[i*ldb : i*ldb+n] + if va != 0 { + f32.AxpyUnitary(alpha*va, btmpk, btmp) + } + } + tmp := alpha + if nonUnit { + tmp *= a[k*lda+k] + } + if tmp != 1 { + f32.ScalUnitary(tmp, btmpk) + } + } + return + } + for k := 0; k < m; k++ { + btmpk := b[k*ldb : k*ldb+n] + for i, va := range a[k*lda : k*lda+k] { + btmp := b[i*ldb : i*ldb+n] + if va != 0 { + f32.AxpyUnitary(alpha*va, btmpk, btmp) + } + } + tmp := alpha + if nonUnit { + tmp *= a[k*lda+k] + } + if tmp != 1 { + f32.ScalUnitary(tmp, btmpk) + } + } + return + } + // Cases where a is on the right + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for k := n - 1; k >= 0; k-- { + tmp := alpha * btmp[k] + if tmp == 0 { + continue + } + btmp[k] = tmp + if nonUnit { + btmp[k] *= a[k*lda+k] + } + f32.AxpyUnitary(tmp, a[k*lda+k+1:k*lda+n], btmp[k+1:n]) + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for k := 0; k < n; k++ { + tmp := alpha * btmp[k] + if tmp == 0 { + continue + } + btmp[k] = tmp + if nonUnit { + btmp[k] *= a[k*lda+k] + } + f32.AxpyUnitary(tmp, a[k*lda:k*lda+k], btmp[:k]) + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j, vb := range btmp { + tmp := vb + if nonUnit { + tmp *= a[j*lda+j] + } + tmp += f32.DotUnitary(a[j*lda+j+1:j*lda+n], btmp[j+1:n]) + btmp[j] = alpha * tmp + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := n - 1; j >= 0; j-- { + tmp := btmp[j] + if nonUnit { + tmp *= a[j*lda+j] + } + tmp += f32.DotUnitary(a[j*lda:j*lda+j], btmp[:j]) + btmp[j] = alpha * tmp + } + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/level3float64.go b/vendor/gonum.org/v1/gonum/blas/gonum/level3float64.go new file mode 100644 index 0000000000..9eebd90691 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/level3float64.go @@ -0,0 +1,864 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f64" +) + +var _ blas.Float64Level3 = Implementation{} + +// Dtrsm solves one of the matrix equations +// A * X = alpha * B if tA == blas.NoTrans and side == blas.Left +// A^T * X = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left +// X * A = alpha * B if tA == blas.NoTrans and side == blas.Right +// X * A^T = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Right +// where A is an n×n or m×m triangular matrix, X and B are m×n matrices, and alpha is a +// scalar. +// +// At entry to the function, X contains the values of B, and the result is +// stored in-place into X. +// +// No check is made that A is invertible. +func (Implementation) Dtrsm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int) { + if s != blas.Left && s != blas.Right { + panic(badSide) + } + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + k := n + if s == blas.Left { + k = m + } + if lda < max(1, k) { + panic(badLdA) + } + if ldb < max(1, n) { + panic(badLdB) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(k-1)+k { + panic(shortA) + } + if len(b) < ldb*(m-1)+n { + panic(shortB) + } + + if alpha == 0 { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := range btmp { + btmp[j] = 0 + } + } + return + } + nonUnit := d == blas.NonUnit + if s == blas.Left { + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := m - 1; i >= 0; i-- { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + f64.ScalUnitary(alpha, btmp) + } + for ka, va := range a[i*lda+i+1 : i*lda+m] { + if va != 0 { + k := ka + i + 1 + f64.AxpyUnitary(-va, b[k*ldb:k*ldb+n], btmp) + } + } + if nonUnit { + tmp := 1 / a[i*lda+i] + f64.ScalUnitary(tmp, btmp) + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + f64.ScalUnitary(alpha, btmp) + } + for k, va := range a[i*lda : i*lda+i] { + if va != 0 { + f64.AxpyUnitary(-va, b[k*ldb:k*ldb+n], btmp) + } + } + if nonUnit { + tmp := 1 / a[i*lda+i] + f64.ScalUnitary(tmp, btmp) + } + } + return + } + // Cases where a is transposed + if ul == blas.Upper { + for k := 0; k < m; k++ { + btmpk := b[k*ldb : k*ldb+n] + if nonUnit { + tmp := 1 / a[k*lda+k] + f64.ScalUnitary(tmp, btmpk) + } + for ia, va := range a[k*lda+k+1 : k*lda+m] { + if va != 0 { + i := ia + k + 1 + f64.AxpyUnitary(-va, btmpk, b[i*ldb:i*ldb+n]) + } + } + if alpha != 1 { + f64.ScalUnitary(alpha, btmpk) + } + } + return + } + for k := m - 1; k >= 0; k-- { + btmpk := b[k*ldb : k*ldb+n] + if nonUnit { + tmp := 1 / a[k*lda+k] + f64.ScalUnitary(tmp, btmpk) + } + for i, va := range a[k*lda : k*lda+k] { + if va != 0 { + f64.AxpyUnitary(-va, btmpk, b[i*ldb:i*ldb+n]) + } + } + if alpha != 1 { + f64.ScalUnitary(alpha, btmpk) + } + } + return + } + // Cases where a is to the right of X. + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + f64.ScalUnitary(alpha, btmp) + } + for k, vb := range btmp { + if vb == 0 { + continue + } + if nonUnit { + btmp[k] /= a[k*lda+k] + } + f64.AxpyUnitary(-btmp[k], a[k*lda+k+1:k*lda+n], btmp[k+1:n]) + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + if alpha != 1 { + f64.ScalUnitary(alpha, btmp) + } + for k := n - 1; k >= 0; k-- { + if btmp[k] == 0 { + continue + } + if nonUnit { + btmp[k] /= a[k*lda+k] + } + f64.AxpyUnitary(-btmp[k], a[k*lda:k*lda+k], btmp[:k]) + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := n - 1; j >= 0; j-- { + tmp := alpha*btmp[j] - f64.DotUnitary(a[j*lda+j+1:j*lda+n], btmp[j+1:]) + if nonUnit { + tmp /= a[j*lda+j] + } + btmp[j] = tmp + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := 0; j < n; j++ { + tmp := alpha*btmp[j] - f64.DotUnitary(a[j*lda:j*lda+j], btmp[:j]) + if nonUnit { + tmp /= a[j*lda+j] + } + btmp[j] = tmp + } + } +} + +// Dsymm performs one of the matrix-matrix operations +// C = alpha * A * B + beta * C if side == blas.Left +// C = alpha * B * A + beta * C if side == blas.Right +// where A is an n×n or m×m symmetric matrix, B and C are m×n matrices, and alpha +// is a scalar. +func (Implementation) Dsymm(s blas.Side, ul blas.Uplo, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) { + if s != blas.Right && s != blas.Left { + panic(badSide) + } + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + k := n + if s == blas.Left { + k = m + } + if lda < max(1, k) { + panic(badLdA) + } + if ldb < max(1, n) { + panic(badLdB) + } + if ldc < max(1, n) { + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(k-1)+k { + panic(shortA) + } + if len(b) < ldb*(m-1)+n { + panic(shortB) + } + if len(c) < ldc*(m-1)+n { + panic(shortC) + } + + // Quick return if possible. + if alpha == 0 && beta == 1 { + return + } + + if alpha == 0 { + if beta == 0 { + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := 0; j < n; j++ { + ctmp[j] *= beta + } + } + return + } + + isUpper := ul == blas.Upper + if s == blas.Left { + for i := 0; i < m; i++ { + atmp := alpha * a[i*lda+i] + btmp := b[i*ldb : i*ldb+n] + ctmp := c[i*ldc : i*ldc+n] + for j, v := range btmp { + ctmp[j] *= beta + ctmp[j] += atmp * v + } + + for k := 0; k < i; k++ { + var atmp float64 + if isUpper { + atmp = a[k*lda+i] + } else { + atmp = a[i*lda+k] + } + atmp *= alpha + f64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ctmp) + } + for k := i + 1; k < m; k++ { + var atmp float64 + if isUpper { + atmp = a[i*lda+k] + } else { + atmp = a[k*lda+i] + } + atmp *= alpha + f64.AxpyUnitary(atmp, b[k*ldb:k*ldb+n], ctmp) + } + } + return + } + if isUpper { + for i := 0; i < m; i++ { + for j := n - 1; j >= 0; j-- { + tmp := alpha * b[i*ldb+j] + var tmp2 float64 + atmp := a[j*lda+j+1 : j*lda+n] + btmp := b[i*ldb+j+1 : i*ldb+n] + ctmp := c[i*ldc+j+1 : i*ldc+n] + for k, v := range atmp { + ctmp[k] += tmp * v + tmp2 += btmp[k] * v + } + c[i*ldc+j] *= beta + c[i*ldc+j] += tmp*a[j*lda+j] + alpha*tmp2 + } + } + return + } + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + tmp := alpha * b[i*ldb+j] + var tmp2 float64 + atmp := a[j*lda : j*lda+j] + btmp := b[i*ldb : i*ldb+j] + ctmp := c[i*ldc : i*ldc+j] + for k, v := range atmp { + ctmp[k] += tmp * v + tmp2 += btmp[k] * v + } + c[i*ldc+j] *= beta + c[i*ldc+j] += tmp*a[j*lda+j] + alpha*tmp2 + } + } +} + +// Dsyrk performs one of the symmetric rank-k operations +// C = alpha * A * A^T + beta * C if tA == blas.NoTrans +// C = alpha * A^T * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans +// where A is an n×k or k×n matrix, C is an n×n symmetric matrix, and alpha and +// beta are scalars. +func (Implementation) Dsyrk(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float64, a []float64, lda int, beta float64, c []float64, ldc int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.Trans && tA != blas.NoTrans && tA != blas.ConjTrans { + panic(badTranspose) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + row, col := k, n + if tA == blas.NoTrans { + row, col = n, k + } + if lda < max(1, col) { + panic(badLdA) + } + if ldc < max(1, n) { + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(row-1)+col { + panic(shortA) + } + if len(c) < ldc*(n-1)+n { + panic(shortC) + } + + if alpha == 0 { + if beta == 0 { + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + atmp := a[i*lda : i*lda+k] + if beta == 0 { + for jc := range ctmp { + j := jc + i + ctmp[jc] = alpha * f64.DotUnitary(atmp, a[j*lda:j*lda+k]) + } + } else { + for jc, vc := range ctmp { + j := jc + i + ctmp[jc] = vc*beta + alpha*f64.DotUnitary(atmp, a[j*lda:j*lda+k]) + } + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + atmp := a[i*lda : i*lda+k] + if beta == 0 { + for j := range ctmp { + ctmp[j] = alpha * f64.DotUnitary(a[j*lda:j*lda+k], atmp) + } + } else { + for j, vc := range ctmp { + ctmp[j] = vc*beta + alpha*f64.DotUnitary(a[j*lda:j*lda+k], atmp) + } + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + if beta == 0 { + for j := range ctmp { + ctmp[j] = 0 + } + } else if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[l*lda+i] + if tmp != 0 { + f64.AxpyUnitary(tmp, a[l*lda+i:l*lda+n], ctmp) + } + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp := alpha * a[l*lda+i] + if tmp != 0 { + f64.AxpyUnitary(tmp, a[l*lda:l*lda+i+1], ctmp) + } + } + } +} + +// Dsyr2k performs one of the symmetric rank 2k operations +// C = alpha * A * B^T + alpha * B * A^T + beta * C if tA == blas.NoTrans +// C = alpha * A^T * B + alpha * B^T * A + beta * C if tA == blas.Trans or tA == blas.ConjTrans +// where A and B are n×k or k×n matrices, C is an n×n symmetric matrix, and +// alpha and beta are scalars. +func (Implementation) Dsyr2k(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) { + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.Trans && tA != blas.NoTrans && tA != blas.ConjTrans { + panic(badTranspose) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + row, col := k, n + if tA == blas.NoTrans { + row, col = n, k + } + if lda < max(1, col) { + panic(badLdA) + } + if ldb < max(1, col) { + panic(badLdB) + } + if ldc < max(1, n) { + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(row-1)+col { + panic(shortA) + } + if len(b) < ldb*(row-1)+col { + panic(shortB) + } + if len(c) < ldc*(n-1)+n { + panic(shortC) + } + + if alpha == 0 { + if beta == 0 { + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] = 0 + } + } + return + } + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + for j := range ctmp { + ctmp[j] *= beta + } + } + return + } + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < n; i++ { + atmp := a[i*lda : i*lda+k] + btmp := b[i*ldb : i*ldb+k] + ctmp := c[i*ldc+i : i*ldc+n] + for jc := range ctmp { + j := i + jc + var tmp1, tmp2 float64 + binner := b[j*ldb : j*ldb+k] + for l, v := range a[j*lda : j*lda+k] { + tmp1 += v * btmp[l] + tmp2 += atmp[l] * binner[l] + } + ctmp[jc] *= beta + ctmp[jc] += alpha * (tmp1 + tmp2) + } + } + return + } + for i := 0; i < n; i++ { + atmp := a[i*lda : i*lda+k] + btmp := b[i*ldb : i*ldb+k] + ctmp := c[i*ldc : i*ldc+i+1] + for j := 0; j <= i; j++ { + var tmp1, tmp2 float64 + binner := b[j*ldb : j*ldb+k] + for l, v := range a[j*lda : j*lda+k] { + tmp1 += v * btmp[l] + tmp2 += atmp[l] * binner[l] + } + ctmp[j] *= beta + ctmp[j] += alpha * (tmp1 + tmp2) + } + } + return + } + if ul == blas.Upper { + for i := 0; i < n; i++ { + ctmp := c[i*ldc+i : i*ldc+n] + if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp1 := alpha * b[l*ldb+i] + tmp2 := alpha * a[l*lda+i] + btmp := b[l*ldb+i : l*ldb+n] + if tmp1 != 0 || tmp2 != 0 { + for j, v := range a[l*lda+i : l*lda+n] { + ctmp[j] += v*tmp1 + btmp[j]*tmp2 + } + } + } + } + return + } + for i := 0; i < n; i++ { + ctmp := c[i*ldc : i*ldc+i+1] + if beta != 1 { + for j := range ctmp { + ctmp[j] *= beta + } + } + for l := 0; l < k; l++ { + tmp1 := alpha * b[l*ldb+i] + tmp2 := alpha * a[l*lda+i] + btmp := b[l*ldb : l*ldb+i+1] + if tmp1 != 0 || tmp2 != 0 { + for j, v := range a[l*lda : l*lda+i+1] { + ctmp[j] += v*tmp1 + btmp[j]*tmp2 + } + } + } + } +} + +// Dtrmm performs one of the matrix-matrix operations +// B = alpha * A * B if tA == blas.NoTrans and side == blas.Left +// B = alpha * A^T * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left +// B = alpha * B * A if tA == blas.NoTrans and side == blas.Right +// B = alpha * B * A^T if tA == blas.Trans or blas.ConjTrans, and side == blas.Right +// where A is an n×n or m×m triangular matrix, B is an m×n matrix, and alpha is a scalar. +func (Implementation) Dtrmm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int) { + if s != blas.Left && s != blas.Right { + panic(badSide) + } + if ul != blas.Lower && ul != blas.Upper { + panic(badUplo) + } + if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { + panic(badTranspose) + } + if d != blas.NonUnit && d != blas.Unit { + panic(badDiag) + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + k := n + if s == blas.Left { + k = m + } + if lda < max(1, k) { + panic(badLdA) + } + if ldb < max(1, n) { + panic(badLdB) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if len(a) < lda*(k-1)+k { + panic(shortA) + } + if len(b) < ldb*(m-1)+n { + panic(shortB) + } + + if alpha == 0 { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := range btmp { + btmp[j] = 0 + } + } + return + } + + nonUnit := d == blas.NonUnit + if s == blas.Left { + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < m; i++ { + tmp := alpha + if nonUnit { + tmp *= a[i*lda+i] + } + btmp := b[i*ldb : i*ldb+n] + f64.ScalUnitary(tmp, btmp) + for ka, va := range a[i*lda+i+1 : i*lda+m] { + k := ka + i + 1 + if va != 0 { + f64.AxpyUnitary(alpha*va, b[k*ldb:k*ldb+n], btmp) + } + } + } + return + } + for i := m - 1; i >= 0; i-- { + tmp := alpha + if nonUnit { + tmp *= a[i*lda+i] + } + btmp := b[i*ldb : i*ldb+n] + f64.ScalUnitary(tmp, btmp) + for k, va := range a[i*lda : i*lda+i] { + if va != 0 { + f64.AxpyUnitary(alpha*va, b[k*ldb:k*ldb+n], btmp) + } + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for k := m - 1; k >= 0; k-- { + btmpk := b[k*ldb : k*ldb+n] + for ia, va := range a[k*lda+k+1 : k*lda+m] { + i := ia + k + 1 + btmp := b[i*ldb : i*ldb+n] + if va != 0 { + f64.AxpyUnitary(alpha*va, btmpk, btmp) + } + } + tmp := alpha + if nonUnit { + tmp *= a[k*lda+k] + } + if tmp != 1 { + f64.ScalUnitary(tmp, btmpk) + } + } + return + } + for k := 0; k < m; k++ { + btmpk := b[k*ldb : k*ldb+n] + for i, va := range a[k*lda : k*lda+k] { + btmp := b[i*ldb : i*ldb+n] + if va != 0 { + f64.AxpyUnitary(alpha*va, btmpk, btmp) + } + } + tmp := alpha + if nonUnit { + tmp *= a[k*lda+k] + } + if tmp != 1 { + f64.ScalUnitary(tmp, btmpk) + } + } + return + } + // Cases where a is on the right + if tA == blas.NoTrans { + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for k := n - 1; k >= 0; k-- { + tmp := alpha * btmp[k] + if tmp == 0 { + continue + } + btmp[k] = tmp + if nonUnit { + btmp[k] *= a[k*lda+k] + } + f64.AxpyUnitary(tmp, a[k*lda+k+1:k*lda+n], btmp[k+1:n]) + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for k := 0; k < n; k++ { + tmp := alpha * btmp[k] + if tmp == 0 { + continue + } + btmp[k] = tmp + if nonUnit { + btmp[k] *= a[k*lda+k] + } + f64.AxpyUnitary(tmp, a[k*lda:k*lda+k], btmp[:k]) + } + } + return + } + // Cases where a is transposed. + if ul == blas.Upper { + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j, vb := range btmp { + tmp := vb + if nonUnit { + tmp *= a[j*lda+j] + } + tmp += f64.DotUnitary(a[j*lda+j+1:j*lda+n], btmp[j+1:n]) + btmp[j] = alpha * tmp + } + } + return + } + for i := 0; i < m; i++ { + btmp := b[i*ldb : i*ldb+n] + for j := n - 1; j >= 0; j-- { + tmp := btmp[j] + if nonUnit { + tmp *= a[j*lda+j] + } + tmp += f64.DotUnitary(a[j*lda:j*lda+j], btmp[:j]) + btmp[j] = alpha * tmp + } + } +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/sgemm.go b/vendor/gonum.org/v1/gonum/blas/gonum/sgemm.go new file mode 100644 index 0000000000..e868a1050c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/sgemm.go @@ -0,0 +1,318 @@ +// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT. + +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "runtime" + "sync" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/internal/asm/f32" +) + +// Sgemm performs one of the matrix-matrix operations +// C = alpha * A * B + beta * C +// C = alpha * A^T * B + beta * C +// C = alpha * A * B^T + beta * C +// C = alpha * A^T * B^T + beta * C +// where A is an m×k or k×m dense matrix, B is an n×k or k×n dense matrix, C is +// an m×n matrix, and alpha and beta are scalars. tA and tB specify whether A or +// B are transposed. +// +// Float32 implementations are autogenerated and not directly tested. +func (Implementation) Sgemm(tA, tB blas.Transpose, m, n, k int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) { + switch tA { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + switch tB { + default: + panic(badTranspose) + case blas.NoTrans, blas.Trans, blas.ConjTrans: + } + if m < 0 { + panic(mLT0) + } + if n < 0 { + panic(nLT0) + } + if k < 0 { + panic(kLT0) + } + aTrans := tA == blas.Trans || tA == blas.ConjTrans + if aTrans { + if lda < max(1, m) { + panic(badLdA) + } + } else { + if lda < max(1, k) { + panic(badLdA) + } + } + bTrans := tB == blas.Trans || tB == blas.ConjTrans + if bTrans { + if ldb < max(1, k) { + panic(badLdB) + } + } else { + if ldb < max(1, n) { + panic(badLdB) + } + } + if ldc < max(1, n) { + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + // For zero matrix size the following slice length checks are trivially satisfied. + if aTrans { + if len(a) < (k-1)*lda+m { + panic(shortA) + } + } else { + if len(a) < (m-1)*lda+k { + panic(shortA) + } + } + if bTrans { + if len(b) < (n-1)*ldb+k { + panic(shortB) + } + } else { + if len(b) < (k-1)*ldb+n { + panic(shortB) + } + } + if len(c) < (m-1)*ldc+n { + panic(shortC) + } + + // Quick return if possible. + if (alpha == 0 || k == 0) && beta == 1 { + return + } + + // scale c + if beta != 1 { + if beta == 0 { + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := range ctmp { + ctmp[j] = 0 + } + } + } else { + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for j := range ctmp { + ctmp[j] *= beta + } + } + } + } + + sgemmParallel(aTrans, bTrans, m, n, k, a, lda, b, ldb, c, ldc, alpha) +} + +func sgemmParallel(aTrans, bTrans bool, m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { + // dgemmParallel computes a parallel matrix multiplication by partitioning + // a and b into sub-blocks, and updating c with the multiplication of the sub-block + // In all cases, + // A = [ A_11 A_12 ... A_1j + // A_21 A_22 ... A_2j + // ... + // A_i1 A_i2 ... A_ij] + // + // and same for B. All of the submatrix sizes are blockSize×blockSize except + // at the edges. + // + // In all cases, there is one dimension for each matrix along which + // C must be updated sequentially. + // Cij = \sum_k Aik Bki, (A * B) + // Cij = \sum_k Aki Bkj, (A^T * B) + // Cij = \sum_k Aik Bjk, (A * B^T) + // Cij = \sum_k Aki Bjk, (A^T * B^T) + // + // This code computes one {i, j} block sequentially along the k dimension, + // and computes all of the {i, j} blocks concurrently. This + // partitioning allows Cij to be updated in-place without race-conditions. + // Instead of launching a goroutine for each possible concurrent computation, + // a number of worker goroutines are created and channels are used to pass + // available and completed cases. + // + // http://alexkr.com/docs/matrixmult.pdf is a good reference on matrix-matrix + // multiplies, though this code does not copy matrices to attempt to eliminate + // cache misses. + + maxKLen := k + parBlocks := blocks(m, blockSize) * blocks(n, blockSize) + if parBlocks < minParBlock { + // The matrix multiplication is small in the dimensions where it can be + // computed concurrently. Just do it in serial. + sgemmSerial(aTrans, bTrans, m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + } + + nWorkers := runtime.GOMAXPROCS(0) + if parBlocks < nWorkers { + nWorkers = parBlocks + } + // There is a tradeoff between the workers having to wait for work + // and a large buffer making operations slow. + buf := buffMul * nWorkers + if buf > parBlocks { + buf = parBlocks + } + + sendChan := make(chan subMul, buf) + + // Launch workers. A worker receives an {i, j} submatrix of c, and computes + // A_ik B_ki (or the transposed version) storing the result in c_ij. When the + // channel is finally closed, it signals to the waitgroup that it has finished + // computing. + var wg sync.WaitGroup + for i := 0; i < nWorkers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for sub := range sendChan { + i := sub.i + j := sub.j + leni := blockSize + if i+leni > m { + leni = m - i + } + lenj := blockSize + if j+lenj > n { + lenj = n - j + } + + cSub := sliceView32(c, ldc, i, j, leni, lenj) + + // Compute A_ik B_kj for all k + for k := 0; k < maxKLen; k += blockSize { + lenk := blockSize + if k+lenk > maxKLen { + lenk = maxKLen - k + } + var aSub, bSub []float32 + if aTrans { + aSub = sliceView32(a, lda, k, i, lenk, leni) + } else { + aSub = sliceView32(a, lda, i, k, leni, lenk) + } + if bTrans { + bSub = sliceView32(b, ldb, j, k, lenj, lenk) + } else { + bSub = sliceView32(b, ldb, k, j, lenk, lenj) + } + sgemmSerial(aTrans, bTrans, leni, lenj, lenk, aSub, lda, bSub, ldb, cSub, ldc, alpha) + } + } + }() + } + + // Send out all of the {i, j} subblocks for computation. + for i := 0; i < m; i += blockSize { + for j := 0; j < n; j += blockSize { + sendChan <- subMul{ + i: i, + j: j, + } + } + } + close(sendChan) + wg.Wait() +} + +// sgemmSerial is serial matrix multiply +func sgemmSerial(aTrans, bTrans bool, m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { + switch { + case !aTrans && !bTrans: + sgemmSerialNotNot(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + case aTrans && !bTrans: + sgemmSerialTransNot(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + case !aTrans && bTrans: + sgemmSerialNotTrans(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + case aTrans && bTrans: + sgemmSerialTransTrans(m, n, k, a, lda, b, ldb, c, ldc, alpha) + return + default: + panic("unreachable") + } +} + +// sgemmSerial where neither a nor b are transposed +func sgemmSerialNotNot(m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for i := 0; i < m; i++ { + ctmp := c[i*ldc : i*ldc+n] + for l, v := range a[i*lda : i*lda+k] { + tmp := alpha * v + if tmp != 0 { + f32.AxpyUnitary(tmp, b[l*ldb:l*ldb+n], ctmp) + } + } + } +} + +// sgemmSerial where neither a is transposed and b is not +func sgemmSerialTransNot(m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for l := 0; l < k; l++ { + btmp := b[l*ldb : l*ldb+n] + for i, v := range a[l*lda : l*lda+m] { + tmp := alpha * v + if tmp != 0 { + ctmp := c[i*ldc : i*ldc+n] + f32.AxpyUnitary(tmp, btmp, ctmp) + } + } + } +} + +// sgemmSerial where neither a is not transposed and b is +func sgemmSerialNotTrans(m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for i := 0; i < m; i++ { + atmp := a[i*lda : i*lda+k] + ctmp := c[i*ldc : i*ldc+n] + for j := 0; j < n; j++ { + ctmp[j] += alpha * f32.DotUnitary(atmp, b[j*ldb:j*ldb+k]) + } + } +} + +// sgemmSerial where both are transposed +func sgemmSerialTransTrans(m, n, k int, a []float32, lda int, b []float32, ldb int, c []float32, ldc int, alpha float32) { + // This style is used instead of the literal [i*stride +j]) is used because + // approximately 5 times faster as of go 1.3. + for l := 0; l < k; l++ { + for i, v := range a[l*lda : l*lda+m] { + tmp := alpha * v + if tmp != 0 { + ctmp := c[i*ldc : i*ldc+n] + f32.AxpyInc(tmp, b[l:], ctmp, uintptr(n), uintptr(ldb), 1, 0, 0) + } + } + } +} + +func sliceView32(a []float32, lda, i, j, r, c int) []float32 { + return a[i*lda+j : (i+r-1)*lda+j+c] +} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/single_precision.bash b/vendor/gonum.org/v1/gonum/blas/gonum/single_precision.bash new file mode 100644 index 0000000000..53db63a7f0 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/blas/gonum/single_precision.bash @@ -0,0 +1,218 @@ +#!/usr/bin/env bash + +# Copyright ©2015 The Gonum Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +WARNINGF32='//\ +// Float32 implementations are autogenerated and not directly tested.\ +' +WARNINGC64='//\ +// Complex64 implementations are autogenerated and not directly tested.\ +' + +# Level1 routines. + +echo Generating level1float32.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level1float32.go +cat level1float64.go \ +| gofmt -r 'blas.Float64Level1 -> blas.Float32Level1' \ +\ +| gofmt -r 'float64 -> float32' \ +| gofmt -r 'blas.DrotmParams -> blas.SrotmParams' \ +\ +| gofmt -r 'f64.AxpyInc -> f32.AxpyInc' \ +| gofmt -r 'f64.AxpyUnitary -> f32.AxpyUnitary' \ +| gofmt -r 'f64.DotUnitary -> f32.DotUnitary' \ +| gofmt -r 'f64.ScalInc -> f32.ScalInc' \ +| gofmt -r 'f64.ScalUnitary -> f32.ScalUnitary' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1S\2_" \ + -e 's_^// D_// S_' \ + -e "s_^\(func (Implementation) \)Id\(.*\)\$_$WARNINGF32\1Is\2_" \ + -e 's_^// Id_// Is_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ + -e 's_"math"_math "gonum.org/v1/gonum/internal/math32"_' \ +>> level1float32.go + +echo Generating level1cmplx64.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level1cmplx64.go +cat level1cmplx128.go \ +| gofmt -r 'blas.Complex128Level1 -> blas.Complex64Level1' \ +\ +| gofmt -r 'float64 -> float32' \ +| gofmt -r 'complex128 -> complex64' \ +\ +| gofmt -r 'c128.AxpyInc -> c64.AxpyInc' \ +| gofmt -r 'c128.AxpyUnitary -> c64.AxpyUnitary' \ +| gofmt -r 'c128.DotcInc -> c64.DotcInc' \ +| gofmt -r 'c128.DotcUnitary -> c64.DotcUnitary' \ +| gofmt -r 'c128.DotuInc -> c64.DotuInc' \ +| gofmt -r 'c128.DotuUnitary -> c64.DotuUnitary' \ +| gofmt -r 'c128.ScalInc -> c64.ScalInc' \ +| gofmt -r 'c128.ScalUnitary -> c64.ScalUnitary' \ +| gofmt -r 'dcabs1 -> scabs1' \ +\ +| sed -e "s_^\(func (Implementation) \)Zdot\(.*\)\$_$WARNINGC64\1Cdot\2_" \ + -e 's_^// Zdot_// Cdot_' \ + -e "s_^\(func (Implementation) \)Zdscal\(.*\)\$_$WARNINGC64\1Csscal\2_" \ + -e 's_^// Zdscal_// Csscal_' \ + -e "s_^\(func (Implementation) \)Z\(.*\)\$_$WARNINGC64\1C\2_" \ + -e 's_^// Z_// C_' \ + -e "s_^\(func (Implementation) \)Iz\(.*\)\$_$WARNINGC64\1Ic\2_" \ + -e 's_^// Iz_// Ic_' \ + -e "s_^\(func (Implementation) \)Dz\(.*\)\$_$WARNINGC64\1Sc\2_" \ + -e 's_^// Dz_// Sc_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/c128"_"gonum.org/v1/gonum/internal/asm/c64"_' \ + -e 's_"math"_math "gonum.org/v1/gonum/internal/math32"_' \ +>> level1cmplx64.go + +echo Generating level1float32_sdot.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level1float32_sdot.go +cat level1float64_ddot.go \ +| gofmt -r 'float64 -> float32' \ +\ +| gofmt -r 'f64.DotInc -> f32.DotInc' \ +| gofmt -r 'f64.DotUnitary -> f32.DotUnitary' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1S\2_" \ + -e 's_^// D_// S_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ +>> level1float32_sdot.go + +echo Generating level1float32_dsdot.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level1float32_dsdot.go +cat level1float64_ddot.go \ +| gofmt -r '[]float64 -> []float32' \ +\ +| gofmt -r 'f64.DotInc -> f32.DdotInc' \ +| gofmt -r 'f64.DotUnitary -> f32.DdotUnitary' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1Ds\2_" \ + -e 's_^// D_// Ds_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ +>> level1float32_dsdot.go + +echo Generating level1float32_sdsdot.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level1float32_sdsdot.go +cat level1float64_ddot.go \ +| gofmt -r 'float64 -> float32' \ +\ +| gofmt -r 'f64.DotInc(x, y, f(n), f(incX), f(incY), f(ix), f(iy)) -> alpha + float32(f32.DdotInc(x, y, f(n), f(incX), f(incY), f(ix), f(iy)))' \ +| gofmt -r 'f64.DotUnitary(a, b) -> alpha + float32(f32.DdotUnitary(a, b))' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1Sds\2_" \ + -e 's_^// D\(.*\)$_// Sds\1 plus a constant_' \ + -e 's_\\sum_alpha + \\sum_' \ + -e 's/n int/n int, alpha float32/' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ +>> level1float32_sdsdot.go + + +# Level2 routines. + +echo Generating level2float32.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level2float32.go +cat level2float64.go \ +| gofmt -r 'blas.Float64Level2 -> blas.Float32Level2' \ +\ +| gofmt -r 'float64 -> float32' \ +\ +| gofmt -r 'f64.AxpyInc -> f32.AxpyInc' \ +| gofmt -r 'f64.AxpyIncTo -> f32.AxpyIncTo' \ +| gofmt -r 'f64.AxpyUnitary -> f32.AxpyUnitary' \ +| gofmt -r 'f64.AxpyUnitaryTo -> f32.AxpyUnitaryTo' \ +| gofmt -r 'f64.DotInc -> f32.DotInc' \ +| gofmt -r 'f64.DotUnitary -> f32.DotUnitary' \ +| gofmt -r 'f64.ScalInc -> f32.ScalInc' \ +| gofmt -r 'f64.ScalUnitary -> f32.ScalUnitary' \ +| gofmt -r 'f64.Ger -> f32.Ger' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1S\2_" \ + -e 's_^// D_// S_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ +>> level2float32.go + +echo Generating level2cmplx64.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level2cmplx64.go +cat level2cmplx128.go \ +| gofmt -r 'blas.Complex128Level2 -> blas.Complex64Level2' \ +\ +| gofmt -r 'complex128 -> complex64' \ +| gofmt -r 'float64 -> float32' \ +\ +| gofmt -r 'c128.AxpyInc -> c64.AxpyInc' \ +| gofmt -r 'c128.AxpyUnitary -> c64.AxpyUnitary' \ +| gofmt -r 'c128.DotuInc -> c64.DotuInc' \ +| gofmt -r 'c128.DotuUnitary -> c64.DotuUnitary' \ +| gofmt -r 'c128.ScalInc -> c64.ScalInc' \ +| gofmt -r 'c128.ScalUnitary -> c64.ScalUnitary' \ +\ +| sed -e "s_^\(func (Implementation) \)Z\(.*\)\$_$WARNINGC64\1C\2_" \ + -e 's_^// Z_// C_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/c128"_"gonum.org/v1/gonum/internal/asm/c64"_' \ + -e 's_"math/cmplx"_cmplx "gonum.org/v1/gonum/internal/cmplx64"_' \ +>> level2cmplx64.go + +# Level3 routines. + +echo Generating level3float32.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level3float32.go +cat level3float64.go \ +| gofmt -r 'blas.Float64Level3 -> blas.Float32Level3' \ +\ +| gofmt -r 'float64 -> float32' \ +\ +| gofmt -r 'f64.AxpyUnitaryTo -> f32.AxpyUnitaryTo' \ +| gofmt -r 'f64.AxpyUnitary -> f32.AxpyUnitary' \ +| gofmt -r 'f64.DotUnitary -> f32.DotUnitary' \ +| gofmt -r 'f64.ScalUnitary -> f32.ScalUnitary' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1S\2_" \ + -e 's_^// D_// S_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ +>> level3float32.go + +echo Generating sgemm.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > sgemm.go +cat dgemm.go \ +| gofmt -r 'float64 -> float32' \ +| gofmt -r 'sliceView64 -> sliceView32' \ +\ +| gofmt -r 'dgemmParallel -> sgemmParallel' \ +| gofmt -r 'computeNumBlocks64 -> computeNumBlocks32' \ +| gofmt -r 'dgemmSerial -> sgemmSerial' \ +| gofmt -r 'dgemmSerialNotNot -> sgemmSerialNotNot' \ +| gofmt -r 'dgemmSerialTransNot -> sgemmSerialTransNot' \ +| gofmt -r 'dgemmSerialNotTrans -> sgemmSerialNotTrans' \ +| gofmt -r 'dgemmSerialTransTrans -> sgemmSerialTransTrans' \ +\ +| gofmt -r 'f64.AxpyInc -> f32.AxpyInc' \ +| gofmt -r 'f64.AxpyUnitary -> f32.AxpyUnitary' \ +| gofmt -r 'f64.DotUnitary -> f32.DotUnitary' \ +\ +| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1S\2_" \ + -e 's_^// D_// S_' \ + -e 's_^// d_// s_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \ +>> sgemm.go + +echo Generating level3cmplx64.go +echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level3cmplx64.go +cat level3cmplx128.go \ +| gofmt -r 'blas.Complex128Level3 -> blas.Complex64Level3' \ +\ +| gofmt -r 'float64 -> float32' \ +| gofmt -r 'complex128 -> complex64' \ +\ +| gofmt -r 'c128.ScalUnitary -> c64.ScalUnitary' \ +| gofmt -r 'c128.DscalUnitary -> c64.SscalUnitary' \ +| gofmt -r 'c128.DotcUnitary -> c64.DotcUnitary' \ +| gofmt -r 'c128.AxpyUnitary -> c64.AxpyUnitary' \ +| gofmt -r 'c128.DotuUnitary -> c64.DotuUnitary' \ +\ +| sed -e "s_^\(func (Implementation) \)Z\(.*\)\$_$WARNINGC64\1C\2_" \ + -e 's_^// Z_// C_' \ + -e 's_"gonum.org/v1/gonum/internal/asm/c128"_"gonum.org/v1/gonum/internal/asm/c64"_' \ + -e 's_"math/cmplx"_cmplx "gonum.org/v1/gonum/internal/cmplx64"_' \ +>> level3cmplx64.go diff --git a/vendor/gonum.org/v1/gonum/floats/README.md b/vendor/gonum.org/v1/gonum/floats/README.md new file mode 100644 index 0000000000..ee867bb7bf --- /dev/null +++ b/vendor/gonum.org/v1/gonum/floats/README.md @@ -0,0 +1,4 @@ +# Gonum floats [![GoDoc](https://godoc.org/gonum.org/v1/gonum/floats?status.svg)](https://godoc.org/gonum.org/v1/gonum/floats) + +Package floats provides a set of helper routines for dealing with slices of float64. +The functions avoid allocations to allow for use within tight loops without garbage collection overhead. diff --git a/vendor/gonum.org/v1/gonum/floats/doc.go b/vendor/gonum.org/v1/gonum/floats/doc.go new file mode 100644 index 0000000000..bfe05c1918 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/floats/doc.go @@ -0,0 +1,11 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package floats provides a set of helper routines for dealing with slices +// of float64. The functions avoid allocations to allow for use within tight +// loops without garbage collection overhead. +// +// The convention used is that when a slice is being modified in place, it has +// the name dst. +package floats // import "gonum.org/v1/gonum/floats" diff --git a/vendor/gonum.org/v1/gonum/floats/floats.go b/vendor/gonum.org/v1/gonum/floats/floats.go new file mode 100644 index 0000000000..ae004a6215 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/floats/floats.go @@ -0,0 +1,933 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this code is governed by a BSD-style +// license that can be found in the LICENSE file + +package floats + +import ( + "errors" + "math" + "sort" + "strconv" + + "gonum.org/v1/gonum/internal/asm/f64" +) + +// Add adds, element-wise, the elements of s and dst, and stores in dst. +// Panics if the lengths of dst and s do not match. +func Add(dst, s []float64) { + if len(dst) != len(s) { + panic("floats: length of the slices do not match") + } + f64.AxpyUnitaryTo(dst, 1, s, dst) +} + +// AddTo adds, element-wise, the elements of s and t and +// stores the result in dst. Panics if the lengths of s, t and dst do not match. +func AddTo(dst, s, t []float64) []float64 { + if len(s) != len(t) { + panic("floats: length of adders do not match") + } + if len(dst) != len(s) { + panic("floats: length of destination does not match length of adder") + } + f64.AxpyUnitaryTo(dst, 1, s, t) + return dst +} + +// AddConst adds the scalar c to all of the values in dst. +func AddConst(c float64, dst []float64) { + f64.AddConst(c, dst) +} + +// AddScaled performs dst = dst + alpha * s. +// It panics if the lengths of dst and s are not equal. +func AddScaled(dst []float64, alpha float64, s []float64) { + if len(dst) != len(s) { + panic("floats: length of destination and source to not match") + } + f64.AxpyUnitaryTo(dst, alpha, s, dst) +} + +// AddScaledTo performs dst = y + alpha * s, where alpha is a scalar, +// and dst, y and s are all slices. +// It panics if the lengths of dst, y, and s are not equal. +// +// At the return of the function, dst[i] = y[i] + alpha * s[i] +func AddScaledTo(dst, y []float64, alpha float64, s []float64) []float64 { + if len(dst) != len(s) || len(dst) != len(y) { + panic("floats: lengths of slices do not match") + } + f64.AxpyUnitaryTo(dst, alpha, s, y) + return dst +} + +// argsort is a helper that implements sort.Interface, as used by +// Argsort. +type argsort struct { + s []float64 + inds []int +} + +func (a argsort) Len() int { + return len(a.s) +} + +func (a argsort) Less(i, j int) bool { + return a.s[i] < a.s[j] +} + +func (a argsort) Swap(i, j int) { + a.s[i], a.s[j] = a.s[j], a.s[i] + a.inds[i], a.inds[j] = a.inds[j], a.inds[i] +} + +// Argsort sorts the elements of dst while tracking their original order. +// At the conclusion of Argsort, dst will contain the original elements of dst +// but sorted in increasing order, and inds will contain the original position +// of the elements in the slice such that dst[i] = origDst[inds[i]]. +// It panics if the lengths of dst and inds do not match. +func Argsort(dst []float64, inds []int) { + if len(dst) != len(inds) { + panic("floats: length of inds does not match length of slice") + } + for i := range dst { + inds[i] = i + } + + a := argsort{s: dst, inds: inds} + sort.Sort(a) +} + +// Count applies the function f to every element of s and returns the number +// of times the function returned true. +func Count(f func(float64) bool, s []float64) int { + var n int + for _, val := range s { + if f(val) { + n++ + } + } + return n +} + +// CumProd finds the cumulative product of the first i elements in +// s and puts them in place into the ith element of the +// destination dst. A panic will occur if the lengths of arguments +// do not match. +// +// At the return of the function, dst[i] = s[i] * s[i-1] * s[i-2] * ... +func CumProd(dst, s []float64) []float64 { + if len(dst) != len(s) { + panic("floats: length of destination does not match length of the source") + } + if len(dst) == 0 { + return dst + } + return f64.CumProd(dst, s) +} + +// CumSum finds the cumulative sum of the first i elements in +// s and puts them in place into the ith element of the +// destination dst. A panic will occur if the lengths of arguments +// do not match. +// +// At the return of the function, dst[i] = s[i] + s[i-1] + s[i-2] + ... +func CumSum(dst, s []float64) []float64 { + if len(dst) != len(s) { + panic("floats: length of destination does not match length of the source") + } + if len(dst) == 0 { + return dst + } + return f64.CumSum(dst, s) +} + +// Distance computes the L-norm of s - t. See Norm for special cases. +// A panic will occur if the lengths of s and t do not match. +func Distance(s, t []float64, L float64) float64 { + if len(s) != len(t) { + panic("floats: slice lengths do not match") + } + if len(s) == 0 { + return 0 + } + var norm float64 + if L == 2 { + for i, v := range s { + diff := t[i] - v + norm = math.Hypot(norm, diff) + } + return norm + } + if L == 1 { + for i, v := range s { + norm += math.Abs(t[i] - v) + } + return norm + } + if math.IsInf(L, 1) { + for i, v := range s { + absDiff := math.Abs(t[i] - v) + if absDiff > norm { + norm = absDiff + } + } + return norm + } + for i, v := range s { + norm += math.Pow(math.Abs(t[i]-v), L) + } + return math.Pow(norm, 1/L) +} + +// Div performs element-wise division dst / s +// and stores the value in dst. It panics if the +// lengths of s and t are not equal. +func Div(dst, s []float64) { + if len(dst) != len(s) { + panic("floats: slice lengths do not match") + } + f64.Div(dst, s) +} + +// DivTo performs element-wise division s / t +// and stores the value in dst. It panics if the +// lengths of s, t, and dst are not equal. +func DivTo(dst, s, t []float64) []float64 { + if len(s) != len(t) || len(dst) != len(t) { + panic("floats: slice lengths do not match") + } + return f64.DivTo(dst, s, t) +} + +// Dot computes the dot product of s1 and s2, i.e. +// sum_{i = 1}^N s1[i]*s2[i]. +// A panic will occur if lengths of arguments do not match. +func Dot(s1, s2 []float64) float64 { + if len(s1) != len(s2) { + panic("floats: lengths of the slices do not match") + } + return f64.DotUnitary(s1, s2) +} + +// Equal returns true if the slices have equal lengths and +// all elements are numerically identical. +func Equal(s1, s2 []float64) bool { + if len(s1) != len(s2) { + return false + } + for i, val := range s1 { + if s2[i] != val { + return false + } + } + return true +} + +// EqualApprox returns true if the slices have equal lengths and +// all element pairs have an absolute tolerance less than tol or a +// relative tolerance less than tol. +func EqualApprox(s1, s2 []float64, tol float64) bool { + if len(s1) != len(s2) { + return false + } + for i, a := range s1 { + if !EqualWithinAbsOrRel(a, s2[i], tol, tol) { + return false + } + } + return true +} + +// EqualFunc returns true if the slices have the same lengths +// and the function returns true for all element pairs. +func EqualFunc(s1, s2 []float64, f func(float64, float64) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, val := range s1 { + if !f(val, s2[i]) { + return false + } + } + return true +} + +// EqualWithinAbs returns true if a and b have an absolute +// difference of less than tol. +func EqualWithinAbs(a, b, tol float64) bool { + return a == b || math.Abs(a-b) <= tol +} + +const minNormalFloat64 = 2.2250738585072014e-308 + +// EqualWithinRel returns true if the difference between a and b +// is not greater than tol times the greater value. +func EqualWithinRel(a, b, tol float64) bool { + if a == b { + return true + } + delta := math.Abs(a - b) + if delta <= minNormalFloat64 { + return delta <= tol*minNormalFloat64 + } + // We depend on the division in this relationship to identify + // infinities (we rely on the NaN to fail the test) otherwise + // we compare Infs of the same sign and evaluate Infs as equal + // independent of sign. + return delta/math.Max(math.Abs(a), math.Abs(b)) <= tol +} + +// EqualWithinAbsOrRel returns true if a and b are equal to within +// the absolute tolerance. +func EqualWithinAbsOrRel(a, b, absTol, relTol float64) bool { + if EqualWithinAbs(a, b, absTol) { + return true + } + return EqualWithinRel(a, b, relTol) +} + +// EqualWithinULP returns true if a and b are equal to within +// the specified number of floating point units in the last place. +func EqualWithinULP(a, b float64, ulp uint) bool { + if a == b { + return true + } + if math.IsNaN(a) || math.IsNaN(b) { + return false + } + if math.Signbit(a) != math.Signbit(b) { + return math.Float64bits(math.Abs(a))+math.Float64bits(math.Abs(b)) <= uint64(ulp) + } + return ulpDiff(math.Float64bits(a), math.Float64bits(b)) <= uint64(ulp) +} + +func ulpDiff(a, b uint64) uint64 { + if a > b { + return a - b + } + return b - a +} + +// EqualLengths returns true if all of the slices have equal length, +// and false otherwise. Returns true if there are no input slices. +func EqualLengths(slices ...[]float64) bool { + // This length check is needed: http://play.golang.org/p/sdty6YiLhM + if len(slices) == 0 { + return true + } + l := len(slices[0]) + for i := 1; i < len(slices); i++ { + if len(slices[i]) != l { + return false + } + } + return true +} + +// Find applies f to every element of s and returns the indices of the first +// k elements for which the f returns true, or all such elements +// if k < 0. +// Find will reslice inds to have 0 length, and will append +// found indices to inds. +// If k > 0 and there are fewer than k elements in s satisfying f, +// all of the found elements will be returned along with an error. +// At the return of the function, the input inds will be in an undetermined state. +func Find(inds []int, f func(float64) bool, s []float64, k int) ([]int, error) { + // inds is also returned to allow for calling with nil + + // Reslice inds to have zero length + inds = inds[:0] + + // If zero elements requested, can just return + if k == 0 { + return inds, nil + } + + // If k < 0, return all of the found indices + if k < 0 { + for i, val := range s { + if f(val) { + inds = append(inds, i) + } + } + return inds, nil + } + + // Otherwise, find the first k elements + nFound := 0 + for i, val := range s { + if f(val) { + inds = append(inds, i) + nFound++ + if nFound == k { + return inds, nil + } + } + } + // Finished iterating over the loop, which means k elements were not found + return inds, errors.New("floats: insufficient elements found") +} + +// HasNaN returns true if the slice s has any values that are NaN and false +// otherwise. +func HasNaN(s []float64) bool { + for _, v := range s { + if math.IsNaN(v) { + return true + } + } + return false +} + +// LogSpan returns a set of n equally spaced points in log space between, +// l and u where N is equal to len(dst). The first element of the +// resulting dst will be l and the final element of dst will be u. +// Panics if len(dst) < 2 +// Note that this call will return NaNs if either l or u are negative, and +// will return all zeros if l or u is zero. +// Also returns the mutated slice dst, so that it can be used in range, like: +// +// for i, x := range LogSpan(dst, l, u) { ... } +func LogSpan(dst []float64, l, u float64) []float64 { + Span(dst, math.Log(l), math.Log(u)) + for i := range dst { + dst[i] = math.Exp(dst[i]) + } + return dst +} + +// LogSumExp returns the log of the sum of the exponentials of the values in s. +// Panics if s is an empty slice. +func LogSumExp(s []float64) float64 { + // Want to do this in a numerically stable way which avoids + // overflow and underflow + // First, find the maximum value in the slice. + maxval := Max(s) + if math.IsInf(maxval, 0) { + // If it's infinity either way, the logsumexp will be infinity as well + // returning now avoids NaNs + return maxval + } + var lse float64 + // Compute the sumexp part + for _, val := range s { + lse += math.Exp(val - maxval) + } + // Take the log and add back on the constant taken out + return math.Log(lse) + maxval +} + +// Max returns the maximum value in the input slice. If the slice is empty, Max will panic. +func Max(s []float64) float64 { + return s[MaxIdx(s)] +} + +// MaxIdx returns the index of the maximum value in the input slice. If several +// entries have the maximum value, the first such index is returned. If the slice +// is empty, MaxIdx will panic. +func MaxIdx(s []float64) int { + if len(s) == 0 { + panic("floats: zero slice length") + } + max := math.NaN() + var ind int + for i, v := range s { + if math.IsNaN(v) { + continue + } + if v > max || math.IsNaN(max) { + max = v + ind = i + } + } + return ind +} + +// Min returns the maximum value in the input slice. If the slice is empty, Min will panic. +func Min(s []float64) float64 { + return s[MinIdx(s)] +} + +// MinIdx returns the index of the minimum value in the input slice. If several +// entries have the maximum value, the first such index is returned. If the slice +// is empty, MinIdx will panic. +func MinIdx(s []float64) int { + if len(s) == 0 { + panic("floats: zero slice length") + } + min := math.NaN() + var ind int + for i, v := range s { + if math.IsNaN(v) { + continue + } + if v < min || math.IsNaN(min) { + min = v + ind = i + } + } + return ind +} + +// Mul performs element-wise multiplication between dst +// and s and stores the value in dst. Panics if the +// lengths of s and t are not equal. +func Mul(dst, s []float64) { + if len(dst) != len(s) { + panic("floats: slice lengths do not match") + } + for i, val := range s { + dst[i] *= val + } +} + +// MulTo performs element-wise multiplication between s +// and t and stores the value in dst. Panics if the +// lengths of s, t, and dst are not equal. +func MulTo(dst, s, t []float64) []float64 { + if len(s) != len(t) || len(dst) != len(t) { + panic("floats: slice lengths do not match") + } + for i, val := range t { + dst[i] = val * s[i] + } + return dst +} + +const ( + nanBits = 0x7ff8000000000000 + nanMask = 0xfff8000000000000 +) + +// NaNWith returns an IEEE 754 "quiet not-a-number" value with the +// payload specified in the low 51 bits of payload. +// The NaN returned by math.NaN has a bit pattern equal to NaNWith(1). +func NaNWith(payload uint64) float64 { + return math.Float64frombits(nanBits | (payload &^ nanMask)) +} + +// NaNPayload returns the lowest 51 bits payload of an IEEE 754 "quiet +// not-a-number". For values of f other than quiet-NaN, NaNPayload +// returns zero and false. +func NaNPayload(f float64) (payload uint64, ok bool) { + b := math.Float64bits(f) + if b&nanBits != nanBits { + return 0, false + } + return b &^ nanMask, true +} + +// NearestIdx returns the index of the element in s +// whose value is nearest to v. If several such +// elements exist, the lowest index is returned. +// NearestIdx panics if len(s) == 0. +func NearestIdx(s []float64, v float64) int { + if len(s) == 0 { + panic("floats: zero length slice") + } + switch { + case math.IsNaN(v): + return 0 + case math.IsInf(v, 1): + return MaxIdx(s) + case math.IsInf(v, -1): + return MinIdx(s) + } + var ind int + dist := math.NaN() + for i, val := range s { + newDist := math.Abs(v - val) + // A NaN distance will not be closer. + if math.IsNaN(newDist) { + continue + } + if newDist < dist || math.IsNaN(dist) { + dist = newDist + ind = i + } + } + return ind +} + +// NearestIdxForSpan return the index of a hypothetical vector created +// by Span with length n and bounds l and u whose value is closest +// to v. That is, NearestIdxForSpan(n, l, u, v) is equivalent to +// Nearest(Span(make([]float64, n),l,u),v) without an allocation. +// NearestIdxForSpan panics if n is less than two. +func NearestIdxForSpan(n int, l, u float64, v float64) int { + if n <= 1 { + panic("floats: span must have length >1") + } + if math.IsNaN(v) { + return 0 + } + + // Special cases for Inf and NaN. + switch { + case math.IsNaN(l) && !math.IsNaN(u): + return n - 1 + case math.IsNaN(u): + return 0 + case math.IsInf(l, 0) && math.IsInf(u, 0): + if l == u { + return 0 + } + if n%2 == 1 { + if !math.IsInf(v, 0) { + return n / 2 + } + if math.Copysign(1, v) == math.Copysign(1, l) { + return 0 + } + return n/2 + 1 + } + if math.Copysign(1, v) == math.Copysign(1, l) { + return 0 + } + return n / 2 + case math.IsInf(l, 0): + if v == l { + return 0 + } + return n - 1 + case math.IsInf(u, 0): + if v == u { + return n - 1 + } + return 0 + case math.IsInf(v, -1): + if l <= u { + return 0 + } + return n - 1 + case math.IsInf(v, 1): + if u <= l { + return 0 + } + return n - 1 + } + + // Special cases for v outside (l, u) and (u, l). + switch { + case l < u: + if v <= l { + return 0 + } + if v >= u { + return n - 1 + } + case l > u: + if v >= l { + return 0 + } + if v <= u { + return n - 1 + } + default: + return 0 + } + + // Can't guarantee anything about exactly halfway between + // because of floating point weirdness. + return int((float64(n)-1)/(u-l)*(v-l) + 0.5) +} + +// Norm returns the L norm of the slice S, defined as +// (sum_{i=1}^N s[i]^L)^{1/L} +// Special cases: +// L = math.Inf(1) gives the maximum absolute value. +// Does not correctly compute the zero norm (use Count). +func Norm(s []float64, L float64) float64 { + // Should this complain if L is not positive? + // Should this be done in log space for better numerical stability? + // would be more cost + // maybe only if L is high? + if len(s) == 0 { + return 0 + } + if L == 2 { + twoNorm := math.Abs(s[0]) + for i := 1; i < len(s); i++ { + twoNorm = math.Hypot(twoNorm, s[i]) + } + return twoNorm + } + var norm float64 + if L == 1 { + for _, val := range s { + norm += math.Abs(val) + } + return norm + } + if math.IsInf(L, 1) { + for _, val := range s { + norm = math.Max(norm, math.Abs(val)) + } + return norm + } + for _, val := range s { + norm += math.Pow(math.Abs(val), L) + } + return math.Pow(norm, 1/L) +} + +// ParseWithNA converts the string s to a float64 in v. +// If s equals missing, w is returned as 0, otherwise 1. +func ParseWithNA(s, missing string) (v, w float64, err error) { + if s == missing { + return 0, 0, nil + } + v, err = strconv.ParseFloat(s, 64) + if err == nil { + w = 1 + } + return v, w, err +} + +// Prod returns the product of the elements of the slice. +// Returns 1 if len(s) = 0. +func Prod(s []float64) float64 { + prod := 1.0 + for _, val := range s { + prod *= val + } + return prod +} + +// Reverse reverses the order of elements in the slice. +func Reverse(s []float64) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} + +// Round returns the half away from zero rounded value of x with prec precision. +// +// Special cases are: +// Round(±0) = +0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN +func Round(x float64, prec int) float64 { + if x == 0 { + // Make sure zero is returned + // without the negative bit set. + return 0 + } + // Fast path for positive precision on integers. + if prec >= 0 && x == math.Trunc(x) { + return x + } + pow := math.Pow10(prec) + intermed := x * pow + if math.IsInf(intermed, 0) { + return x + } + if x < 0 { + x = math.Ceil(intermed - 0.5) + } else { + x = math.Floor(intermed + 0.5) + } + + if x == 0 { + return 0 + } + + return x / pow +} + +// RoundEven returns the half even rounded value of x with prec precision. +// +// Special cases are: +// RoundEven(±0) = +0 +// RoundEven(±Inf) = ±Inf +// RoundEven(NaN) = NaN +func RoundEven(x float64, prec int) float64 { + if x == 0 { + // Make sure zero is returned + // without the negative bit set. + return 0 + } + // Fast path for positive precision on integers. + if prec >= 0 && x == math.Trunc(x) { + return x + } + pow := math.Pow10(prec) + intermed := x * pow + if math.IsInf(intermed, 0) { + return x + } + if isHalfway(intermed) { + correction, _ := math.Modf(math.Mod(intermed, 2)) + intermed += correction + if intermed > 0 { + x = math.Floor(intermed) + } else { + x = math.Ceil(intermed) + } + } else { + if x < 0 { + x = math.Ceil(intermed - 0.5) + } else { + x = math.Floor(intermed + 0.5) + } + } + + if x == 0 { + return 0 + } + + return x / pow +} + +func isHalfway(x float64) bool { + _, frac := math.Modf(x) + frac = math.Abs(frac) + return frac == 0.5 || (math.Nextafter(frac, math.Inf(-1)) < 0.5 && math.Nextafter(frac, math.Inf(1)) > 0.5) +} + +// Same returns true if the input slices have the same length and the all elements +// have the same value with NaN treated as the same. +func Same(s, t []float64) bool { + if len(s) != len(t) { + return false + } + for i, v := range s { + w := t[i] + if v != w && !(math.IsNaN(v) && math.IsNaN(w)) { + return false + } + } + return true +} + +// Scale multiplies every element in dst by the scalar c. +func Scale(c float64, dst []float64) { + if len(dst) > 0 { + f64.ScalUnitary(c, dst) + } +} + +// ScaleTo multiplies the elements in s by c and stores the result in dst. +func ScaleTo(dst []float64, c float64, s []float64) []float64 { + if len(dst) != len(s) { + panic("floats: lengths of slices do not match") + } + if len(dst) > 0 { + f64.ScalUnitaryTo(dst, c, s) + } + return dst +} + +// Span returns a set of N equally spaced points between l and u, where N +// is equal to the length of the destination. The first element of the destination +// is l, the final element of the destination is u. +// +// Panics if len(dst) < 2. +// +// Span also returns the mutated slice dst, so that it can be used in range expressions, +// like: +// +// for i, x := range Span(dst, l, u) { ... } +func Span(dst []float64, l, u float64) []float64 { + n := len(dst) + if n < 2 { + panic("floats: destination must have length >1") + } + + // Special cases for Inf and NaN. + switch { + case math.IsNaN(l): + for i := range dst[:len(dst)-1] { + dst[i] = math.NaN() + } + dst[len(dst)-1] = u + return dst + case math.IsNaN(u): + for i := range dst[1:] { + dst[i+1] = math.NaN() + } + dst[0] = l + return dst + case math.IsInf(l, 0) && math.IsInf(u, 0): + for i := range dst[:len(dst)/2] { + dst[i] = l + dst[len(dst)-i-1] = u + } + if len(dst)%2 == 1 { + if l != u { + dst[len(dst)/2] = 0 + } else { + dst[len(dst)/2] = l + } + } + return dst + case math.IsInf(l, 0): + for i := range dst[:len(dst)-1] { + dst[i] = l + } + dst[len(dst)-1] = u + return dst + case math.IsInf(u, 0): + for i := range dst[1:] { + dst[i+1] = u + } + dst[0] = l + return dst + } + + step := (u - l) / float64(n-1) + for i := range dst { + dst[i] = l + step*float64(i) + } + return dst +} + +// Sub subtracts, element-wise, the elements of s from dst. Panics if +// the lengths of dst and s do not match. +func Sub(dst, s []float64) { + if len(dst) != len(s) { + panic("floats: length of the slices do not match") + } + f64.AxpyUnitaryTo(dst, -1, s, dst) +} + +// SubTo subtracts, element-wise, the elements of t from s and +// stores the result in dst. Panics if the lengths of s, t and dst do not match. +func SubTo(dst, s, t []float64) []float64 { + if len(s) != len(t) { + panic("floats: length of subtractor and subtractee do not match") + } + if len(dst) != len(s) { + panic("floats: length of destination does not match length of subtractor") + } + f64.AxpyUnitaryTo(dst, -1, t, s) + return dst +} + +// Sum returns the sum of the elements of the slice. +func Sum(s []float64) float64 { + return f64.Sum(s) +} + +// Within returns the first index i where s[i] <= v < s[i+1]. Within panics if: +// - len(s) < 2 +// - s is not sorted +func Within(s []float64, v float64) int { + if len(s) < 2 { + panic("floats: slice length less than 2") + } + if !sort.Float64sAreSorted(s) { + panic("floats: input slice not sorted") + } + if v < s[0] || v >= s[len(s)-1] || math.IsNaN(v) { + return -1 + } + for i, f := range s[1:] { + if v < f { + return i + } + } + return -1 +} diff --git a/vendor/gonum.org/v1/gonum/graph/.gitignore b/vendor/gonum.org/v1/gonum/graph/.gitignore new file mode 100644 index 0000000000..86e0d24044 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/.gitignore @@ -0,0 +1 @@ +test.out \ No newline at end of file diff --git a/vendor/gonum.org/v1/gonum/graph/README.md b/vendor/gonum.org/v1/gonum/graph/README.md new file mode 100644 index 0000000000..f0a9505ed0 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/README.md @@ -0,0 +1,3 @@ +# Gonum graph [![GoDoc](https://godoc.org/gonum.org/v1/gonum/graph?status.svg)](https://godoc.org/gonum.org/v1/gonum/graph) + +This is a generalized graph package for the Go language. diff --git a/vendor/gonum.org/v1/gonum/graph/doc.go b/vendor/gonum.org/v1/gonum/graph/doc.go new file mode 100644 index 0000000000..7eedd09ce8 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/doc.go @@ -0,0 +1,9 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package graph defines graph interfaces. +// +// Routines to test contract compliance by user implemented graph types +// are available in gonum.org/v1/gonum/graph/testgraph. +package graph // import "gonum.org/v1/gonum/graph" diff --git a/vendor/gonum.org/v1/gonum/graph/graph.go b/vendor/gonum.org/v1/gonum/graph/graph.go new file mode 100644 index 0000000000..c973583d87 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/graph.go @@ -0,0 +1,282 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package graph + +// Node is a graph node. It returns a graph-unique integer ID. +type Node interface { + ID() int64 +} + +// Edge is a graph edge. In directed graphs, the direction of the +// edge is given from -> to, otherwise the edge is semantically +// unordered. +type Edge interface { + // From returns the from node of the edge. + From() Node + + // To returns the to node of the edge. + To() Node + + // ReversedEdge returns an edge that has + // the end points of the receiver swapped. + ReversedEdge() Edge +} + +// WeightedEdge is a weighted graph edge. In directed graphs, the direction +// of the edge is given from -> to, otherwise the edge is semantically +// unordered. +type WeightedEdge interface { + Edge + Weight() float64 +} + +// Graph is a generalized graph. +type Graph interface { + // Node returns the node with the given ID if it exists + // in the graph, and nil otherwise. + Node(id int64) Node + + // Nodes returns all the nodes in the graph. + // + // Nodes must not return nil. + Nodes() Nodes + + // From returns all nodes that can be reached directly + // from the node with the given ID. + // + // From must not return nil. + From(id int64) Nodes + + // HasEdgeBetween returns whether an edge exists between + // nodes with IDs xid and yid without considering direction. + HasEdgeBetween(xid, yid int64) bool + + // Edge returns the edge from u to v, with IDs uid and vid, + // if such an edge exists and nil otherwise. The node v + // must be directly reachable from u as defined by the + // From method. + Edge(uid, vid int64) Edge +} + +// Weighted is a weighted graph. +type Weighted interface { + Graph + + // WeightedEdge returns the weighted edge from u to v + // with IDs uid and vid if such an edge exists and + // nil otherwise. The node v must be directly + // reachable from u as defined by the From method. + WeightedEdge(uid, vid int64) WeightedEdge + + // Weight returns the weight for the edge between + // x and y with IDs xid and yid if Edge(xid, yid) + // returns a non-nil Edge. + // If x and y are the same node or there is no + // joining edge between the two nodes the weight + // value returned is implementation dependent. + // Weight returns true if an edge exists between + // x and y or if x and y have the same ID, false + // otherwise. + Weight(xid, yid int64) (w float64, ok bool) +} + +// Undirected is an undirected graph. +type Undirected interface { + Graph + + // EdgeBetween returns the edge between nodes x and y + // with IDs xid and yid. + EdgeBetween(xid, yid int64) Edge +} + +// WeightedUndirected is a weighted undirected graph. +type WeightedUndirected interface { + Weighted + + // WeightedEdgeBetween returns the edge between nodes + // x and y with IDs xid and yid. + WeightedEdgeBetween(xid, yid int64) WeightedEdge +} + +// Directed is a directed graph. +type Directed interface { + Graph + + // HasEdgeFromTo returns whether an edge exists + // in the graph from u to v with IDs uid and vid. + HasEdgeFromTo(uid, vid int64) bool + + // To returns all nodes that can reach directly + // to the node with the given ID. + // + // To must not return nil. + To(id int64) Nodes +} + +// WeightedDirected is a weighted directed graph. +type WeightedDirected interface { + Weighted + + // HasEdgeFromTo returns whether an edge exists + // in the graph from u to v with the IDs uid and + // vid. + HasEdgeFromTo(uid, vid int64) bool + + // To returns all nodes that can reach directly + // to the node with the given ID. + // + // To must not return nil. + To(id int64) Nodes +} + +// NodeAdder is an interface for adding arbitrary nodes to a graph. +type NodeAdder interface { + // NewNode returns a new Node with a unique + // arbitrary ID. + NewNode() Node + + // AddNode adds a node to the graph. AddNode panics if + // the added node ID matches an existing node ID. + AddNode(Node) +} + +// NodeRemover is an interface for removing nodes from a graph. +type NodeRemover interface { + // RemoveNode removes the node with the given ID + // from the graph, as well as any edges attached + // to it. If the node is not in the graph it is + // a no-op. + RemoveNode(id int64) +} + +// EdgeAdder is an interface for adding edges to a graph. +type EdgeAdder interface { + // NewEdge returns a new Edge from the source to the destination node. + NewEdge(from, to Node) Edge + + // SetEdge adds an edge from one node to another. + // If the graph supports node addition the nodes + // will be added if they do not exist, otherwise + // SetEdge will panic. + // The behavior of an EdgeAdder when the IDs + // returned by e.From() and e.To() are equal is + // implementation-dependent. + // Whether e, e.From() and e.To() are stored + // within the graph is implementation dependent. + SetEdge(e Edge) +} + +// WeightedEdgeAdder is an interface for adding edges to a graph. +type WeightedEdgeAdder interface { + // NewWeightedEdge returns a new WeightedEdge from + // the source to the destination node. + NewWeightedEdge(from, to Node, weight float64) WeightedEdge + + // SetWeightedEdge adds an edge from one node to + // another. If the graph supports node addition + // the nodes will be added if they do not exist, + // otherwise SetWeightedEdge will panic. + // The behavior of a WeightedEdgeAdder when the IDs + // returned by e.From() and e.To() are equal is + // implementation-dependent. + // Whether e, e.From() and e.To() are stored + // within the graph is implementation dependent. + SetWeightedEdge(e WeightedEdge) +} + +// EdgeRemover is an interface for removing nodes from a graph. +type EdgeRemover interface { + // RemoveEdge removes the edge with the given end + // IDs, leaving the terminal nodes. If the edge + // does not exist it is a no-op. + RemoveEdge(fid, tid int64) +} + +// Builder is a graph that can have nodes and edges added. +type Builder interface { + NodeAdder + EdgeAdder +} + +// WeightedBuilder is a graph that can have nodes and weighted edges added. +type WeightedBuilder interface { + NodeAdder + WeightedEdgeAdder +} + +// UndirectedBuilder is an undirected graph builder. +type UndirectedBuilder interface { + Undirected + Builder +} + +// UndirectedWeightedBuilder is an undirected weighted graph builder. +type UndirectedWeightedBuilder interface { + Undirected + WeightedBuilder +} + +// DirectedBuilder is a directed graph builder. +type DirectedBuilder interface { + Directed + Builder +} + +// DirectedWeightedBuilder is a directed weighted graph builder. +type DirectedWeightedBuilder interface { + Directed + WeightedBuilder +} + +// Copy copies nodes and edges as undirected edges from the source to the destination +// without first clearing the destination. Copy will panic if a node ID in the source +// graph matches a node ID in the destination. +// +// If the source is undirected and the destination is directed both directions will +// be present in the destination after the copy is complete. +func Copy(dst Builder, src Graph) { + nodes := src.Nodes() + for nodes.Next() { + dst.AddNode(nodes.Node()) + } + nodes.Reset() + for nodes.Next() { + u := nodes.Node() + uid := u.ID() + to := src.From(uid) + for to.Next() { + v := to.Node() + dst.SetEdge(src.Edge(uid, v.ID())) + } + } +} + +// CopyWeighted copies nodes and edges as undirected edges from the source to the destination +// without first clearing the destination. Copy will panic if a node ID in the source +// graph matches a node ID in the destination. +// +// If the source is undirected and the destination is directed both directions will +// be present in the destination after the copy is complete. +// +// If the source is a directed graph, the destination is undirected, and a fundamental +// cycle exists with two nodes where the edge weights differ, the resulting destination +// graph's edge weight between those nodes is undefined. If there is a defined function +// to resolve such conflicts, an UndirectWeighted may be used to do this. +func CopyWeighted(dst WeightedBuilder, src Weighted) { + nodes := src.Nodes() + for nodes.Next() { + dst.AddNode(nodes.Node()) + } + nodes.Reset() + for nodes.Next() { + u := nodes.Node() + uid := u.ID() + to := src.From(uid) + for to.Next() { + v := to.Node() + dst.SetWeightedEdge(src.WeightedEdge(uid, v.ID())) + } + } +} diff --git a/vendor/gonum.org/v1/gonum/graph/internal/linear/doc.go b/vendor/gonum.org/v1/gonum/graph/internal/linear/doc.go new file mode 100644 index 0000000000..88d1cb80af --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/internal/linear/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package linear provides common linear data structures. +package linear // import "gonum.org/v1/gonum/graph/internal/linear" diff --git a/vendor/gonum.org/v1/gonum/graph/internal/linear/linear.go b/vendor/gonum.org/v1/gonum/graph/internal/linear/linear.go new file mode 100644 index 0000000000..62e19db6a9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/internal/linear/linear.go @@ -0,0 +1,73 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package linear + +import ( + "gonum.org/v1/gonum/graph" +) + +// NodeStack implements a LIFO stack of graph.Node. +type NodeStack []graph.Node + +// Len returns the number of graph.Nodes on the stack. +func (s *NodeStack) Len() int { return len(*s) } + +// Pop returns the last graph.Node on the stack and removes it +// from the stack. +func (s *NodeStack) Pop() graph.Node { + v := *s + v, n := v[:len(v)-1], v[len(v)-1] + *s = v + return n +} + +// Push adds the node n to the stack at the last position. +func (s *NodeStack) Push(n graph.Node) { *s = append(*s, n) } + +// NodeQueue implements a FIFO queue. +type NodeQueue struct { + head int + data []graph.Node +} + +// Len returns the number of graph.Nodes in the queue. +func (q *NodeQueue) Len() int { return len(q.data) - q.head } + +// Enqueue adds the node n to the back of the queue. +func (q *NodeQueue) Enqueue(n graph.Node) { + if len(q.data) == cap(q.data) && q.head > 0 { + l := q.Len() + copy(q.data, q.data[q.head:]) + q.head = 0 + q.data = append(q.data[:l], n) + } else { + q.data = append(q.data, n) + } +} + +// Dequeue returns the graph.Node at the front of the queue and +// removes it from the queue. +func (q *NodeQueue) Dequeue() graph.Node { + if q.Len() == 0 { + panic("queue: empty queue") + } + + var n graph.Node + n, q.data[q.head] = q.data[q.head], nil + q.head++ + + if q.Len() == 0 { + q.head = 0 + q.data = q.data[:0] + } + + return n +} + +// Reset clears the queue for reuse. +func (q *NodeQueue) Reset() { + q.head = 0 + q.data = q.data[:0] +} diff --git a/vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go b/vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go new file mode 100644 index 0000000000..563df6f2e6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ordered provides common sort ordering types. +package ordered // import "gonum.org/v1/gonum/graph/internal/ordered" diff --git a/vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go b/vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go new file mode 100644 index 0000000000..a7250d1f37 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go @@ -0,0 +1,93 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ordered + +import "gonum.org/v1/gonum/graph" + +// ByID implements the sort.Interface sorting a slice of graph.Node +// by ID. +type ByID []graph.Node + +func (n ByID) Len() int { return len(n) } +func (n ByID) Less(i, j int) bool { return n[i].ID() < n[j].ID() } +func (n ByID) Swap(i, j int) { n[i], n[j] = n[j], n[i] } + +// BySliceValues implements the sort.Interface sorting a slice of +// []int64 lexically by the values of the []int64. +type BySliceValues [][]int64 + +func (c BySliceValues) Len() int { return len(c) } +func (c BySliceValues) Less(i, j int) bool { + a, b := c[i], c[j] + l := len(a) + if len(b) < l { + l = len(b) + } + for k, v := range a[:l] { + if v < b[k] { + return true + } + if v > b[k] { + return false + } + } + return len(a) < len(b) +} +func (c BySliceValues) Swap(i, j int) { c[i], c[j] = c[j], c[i] } + +// BySliceIDs implements the sort.Interface sorting a slice of +// []graph.Node lexically by the IDs of the []graph.Node. +type BySliceIDs [][]graph.Node + +func (c BySliceIDs) Len() int { return len(c) } +func (c BySliceIDs) Less(i, j int) bool { + a, b := c[i], c[j] + l := len(a) + if len(b) < l { + l = len(b) + } + for k, v := range a[:l] { + if v.ID() < b[k].ID() { + return true + } + if v.ID() > b[k].ID() { + return false + } + } + return len(a) < len(b) +} +func (c BySliceIDs) Swap(i, j int) { c[i], c[j] = c[j], c[i] } + +// Int64s implements the sort.Interface sorting a slice of +// int64. +type Int64s []int64 + +func (s Int64s) Len() int { return len(s) } +func (s Int64s) Less(i, j int) bool { return s[i] < s[j] } +func (s Int64s) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Reverse reverses the order of nodes. +func Reverse(nodes []graph.Node) { + for i, j := 0, len(nodes)-1; i < j; i, j = i+1, j-1 { + nodes[i], nodes[j] = nodes[j], nodes[i] + } +} + +// LinesByIDs implements the sort.Interface sorting a slice of graph.LinesByIDs +// lexically by the From IDs, then by the To IDs, finally by the Line IDs. +type LinesByIDs []graph.Line + +func (n LinesByIDs) Len() int { return len(n) } +func (n LinesByIDs) Less(i, j int) bool { + a, b := n[i], n[j] + if a.From().ID() != b.From().ID() { + return a.From().ID() < b.From().ID() + } + if a.To().ID() != b.To().ID() { + return a.To().ID() < b.To().ID() + } + return n[i].ID() < n[j].ID() +} +func (n LinesByIDs) Swap(i, j int) { n[i], n[j] = n[j], n[i] } diff --git a/vendor/gonum.org/v1/gonum/graph/internal/set/doc.go b/vendor/gonum.org/v1/gonum/graph/internal/set/doc.go new file mode 100644 index 0000000000..86f2afc4e9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/internal/set/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package set provides integer and graph.Node sets. +package set // import "gonum.org/v1/gonum/graph/internal/set" diff --git a/vendor/gonum.org/v1/gonum/graph/internal/set/same.go b/vendor/gonum.org/v1/gonum/graph/internal/set/same.go new file mode 100644 index 0000000000..f95a4e1287 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/internal/set/same.go @@ -0,0 +1,36 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine,!safe + +package set + +import "unsafe" + +// same determines whether two sets are backed by the same store. In the +// current implementation using hash maps it makes use of the fact that +// hash maps are passed as a pointer to a runtime Hmap struct. A map is +// not seen by the runtime as a pointer though, so we use unsafe to get +// the maps' pointer values to compare. +func same(a, b Nodes) bool { + return *(*uintptr)(unsafe.Pointer(&a)) == *(*uintptr)(unsafe.Pointer(&b)) +} + +// intsSame determines whether two sets are backed by the same store. In the +// current implementation using hash maps it makes use of the fact that +// hash maps are passed as a pointer to a runtime Hmap struct. A map is +// not seen by the runtime as a pointer though, so we use unsafe to get +// the maps' pointer values to compare. +func intsSame(a, b Ints) bool { + return *(*uintptr)(unsafe.Pointer(&a)) == *(*uintptr)(unsafe.Pointer(&b)) +} + +// int64sSame determines whether two sets are backed by the same store. In the +// current implementation using hash maps it makes use of the fact that +// hash maps are passed as a pointer to a runtime Hmap struct. A map is +// not seen by the runtime as a pointer though, so we use unsafe to get +// the maps' pointer values to compare. +func int64sSame(a, b Int64s) bool { + return *(*uintptr)(unsafe.Pointer(&a)) == *(*uintptr)(unsafe.Pointer(&b)) +} diff --git a/vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go b/vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go new file mode 100644 index 0000000000..4ff4f4ed22 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go @@ -0,0 +1,36 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine safe + +package set + +import "reflect" + +// same determines whether two sets are backed by the same store. In the +// current implementation using hash maps it makes use of the fact that +// hash maps are passed as a pointer to a runtime Hmap struct. A map is +// not seen by the runtime as a pointer though, so we use reflect to get +// the maps' pointer values to compare. +func same(a, b Nodes) bool { + return reflect.ValueOf(a).Pointer() == reflect.ValueOf(b).Pointer() +} + +// intsSame determines whether two sets are backed by the same store. In the +// current implementation using hash maps it makes use of the fact that +// hash maps are passed as a pointer to a runtime Hmap struct. A map is +// not seen by the runtime as a pointer though, so we use reflect to get +// the maps' pointer values to compare. +func intsSame(a, b Ints) bool { + return reflect.ValueOf(a).Pointer() == reflect.ValueOf(b).Pointer() +} + +// int64sSame determines whether two sets are backed by the same store. In the +// current implementation using hash maps it makes use of the fact that +// hash maps are passed as a pointer to a runtime Hmap struct. A map is +// not seen by the runtime as a pointer though, so we use reflect to get +// the maps' pointer values to compare. +func int64sSame(a, b Int64s) bool { + return reflect.ValueOf(a).Pointer() == reflect.ValueOf(b).Pointer() +} diff --git a/vendor/gonum.org/v1/gonum/graph/internal/set/set.go b/vendor/gonum.org/v1/gonum/graph/internal/set/set.go new file mode 100644 index 0000000000..0506b8e97d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/internal/set/set.go @@ -0,0 +1,228 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package set + +import "gonum.org/v1/gonum/graph" + +// Ints is a set of int identifiers. +type Ints map[int]struct{} + +// The simple accessor methods for Ints are provided to allow ease of +// implementation change should the need arise. + +// Add inserts an element into the set. +func (s Ints) Add(e int) { + s[e] = struct{}{} +} + +// Has reports the existence of the element in the set. +func (s Ints) Has(e int) bool { + _, ok := s[e] + return ok +} + +// Remove deletes the specified element from the set. +func (s Ints) Remove(e int) { + delete(s, e) +} + +// Count reports the number of elements stored in the set. +func (s Ints) Count() int { + return len(s) +} + +// IntsEqual reports set equality between the parameters. Sets are equal if +// and only if they have the same elements. +func IntsEqual(a, b Ints) bool { + if intsSame(a, b) { + return true + } + + if len(a) != len(b) { + return false + } + + for e := range a { + if _, ok := b[e]; !ok { + return false + } + } + + return true +} + +// Int64s is a set of int64 identifiers. +type Int64s map[int64]struct{} + +// The simple accessor methods for Ints are provided to allow ease of +// implementation change should the need arise. + +// Add inserts an element into the set. +func (s Int64s) Add(e int64) { + s[e] = struct{}{} +} + +// Has reports the existence of the element in the set. +func (s Int64s) Has(e int64) bool { + _, ok := s[e] + return ok +} + +// Remove deletes the specified element from the set. +func (s Int64s) Remove(e int64) { + delete(s, e) +} + +// Count reports the number of elements stored in the set. +func (s Int64s) Count() int { + return len(s) +} + +// Int64sEqual reports set equality between the parameters. Sets are equal if +// and only if they have the same elements. +func Int64sEqual(a, b Int64s) bool { + if int64sSame(a, b) { + return true + } + + if len(a) != len(b) { + return false + } + + for e := range a { + if _, ok := b[e]; !ok { + return false + } + } + + return true +} + +// Nodes is a set of nodes keyed in their integer identifiers. +type Nodes map[int64]graph.Node + +// NewNodes returns a new Nodes. +func NewNodes() Nodes { + return make(Nodes) +} + +// NewNodes returns a new Nodes with the given size hint, n. +func NewNodesSize(n int) Nodes { + return make(Nodes, n) +} + +// The simple accessor methods for Nodes are provided to allow ease of +// implementation change should the need arise. + +// Add inserts an element into the set. +func (s Nodes) Add(n graph.Node) { + s[n.ID()] = n +} + +// Remove deletes the specified element from the set. +func (s Nodes) Remove(e graph.Node) { + delete(s, e.ID()) +} + +// Count returns the number of element in the set. +func (s Nodes) Count() int { + return len(s) +} + +// Has reports the existence of the elements in the set. +func (s Nodes) Has(n graph.Node) bool { + _, ok := s[n.ID()] + return ok +} + +// CloneNodes returns a clone of src. +func CloneNodes(src Nodes) Nodes { + dst := make(Nodes, len(src)) + for e, n := range src { + dst[e] = n + } + return dst +} + +// Equal reports set equality between the parameters. Sets are equal if +// and only if they have the same elements. +func Equal(a, b Nodes) bool { + if same(a, b) { + return true + } + + if len(a) != len(b) { + return false + } + + for e := range a { + if _, ok := b[e]; !ok { + return false + } + } + + return true +} + +// UnionOfNodes returns the union of a and b. +// +// The union of two sets, a and b, is the set containing all the +// elements of each, for instance: +// +// {a,b,c} UNION {d,e,f} = {a,b,c,d,e,f} +// +// Since sets may not have repetition, unions of two sets that overlap +// do not contain repeat elements, that is: +// +// {a,b,c} UNION {b,c,d} = {a,b,c,d} +// +func UnionOfNodes(a, b Nodes) Nodes { + if same(a, b) { + return CloneNodes(a) + } + + dst := make(Nodes) + for e, n := range a { + dst[e] = n + } + for e, n := range b { + dst[e] = n + } + + return dst +} + +// IntersectionOfNodes returns the intersection of a and b. +// +// The intersection of two sets, a and b, is the set containing all +// the elements shared between the two sets, for instance: +// +// {a,b,c} INTERSECT {b,c,d} = {b,c} +// +// The intersection between a set and itself is itself, and thus +// effectively a copy operation: +// +// {a,b,c} INTERSECT {a,b,c} = {a,b,c} +// +// The intersection between two sets that share no elements is the empty +// set: +// +// {a,b,c} INTERSECT {d,e,f} = {} +// +func IntersectionOfNodes(a, b Nodes) Nodes { + if same(a, b) { + return CloneNodes(a) + } + dst := make(Nodes) + if len(a) > len(b) { + a, b = b, a + } + for e, n := range a { + if _, ok := b[e]; ok { + dst[e] = n + } + } + return dst +} diff --git a/vendor/gonum.org/v1/gonum/graph/internal/uid/uid.go b/vendor/gonum.org/v1/gonum/graph/internal/uid/uid.go new file mode 100644 index 0000000000..5f503c13da --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/internal/uid/uid.go @@ -0,0 +1,54 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uid implements unique ID provision for graphs. +package uid + +import "gonum.org/v1/gonum/graph/internal/set" + +// Max is the maximum value of int64. +const Max = int64(^uint64(0) >> 1) + +// Set implements available ID storage. +type Set struct { + maxID int64 + used, free set.Int64s +} + +// NewSet returns a new Set. The returned value should not be passed except by pointer. +func NewSet() Set { + return Set{maxID: -1, used: make(set.Int64s), free: make(set.Int64s)} +} + +// NewID returns a new unique ID. The ID returned is not considered used +// until passed in a call to use. +func (s *Set) NewID() int64 { + for id := range s.free { + return id + } + if s.maxID != Max { + return s.maxID + 1 + } + for id := int64(0); id <= s.maxID+1; id++ { + if !s.used.Has(id) { + return id + } + } + panic("unreachable") +} + +// Use adds the id to the used IDs in the Set. +func (s *Set) Use(id int64) { + s.used.Add(id) + s.free.Remove(id) + if id > s.maxID { + s.maxID = id + } +} + +// Release frees the id for reuse. +func (s *Set) Release(id int64) { + s.free.Add(id) + s.used.Remove(id) +} diff --git a/vendor/gonum.org/v1/gonum/graph/iterator/doc.go b/vendor/gonum.org/v1/gonum/graph/iterator/doc.go new file mode 100644 index 0000000000..0983bc7c36 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/iterator/doc.go @@ -0,0 +1,9 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package iterator provides node, edge and line iterators. +// +// The iterators provided satisfy the graph.Nodes, graph.Edges and +// graph.Lines interfaces. +package iterator diff --git a/vendor/gonum.org/v1/gonum/graph/iterator/edges.go b/vendor/gonum.org/v1/gonum/graph/iterator/edges.go new file mode 100644 index 0000000000..21ef0433e8 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/iterator/edges.go @@ -0,0 +1,131 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package iterator + +import "gonum.org/v1/gonum/graph" + +// OrderedEdges implements the graph.Edges and graph.EdgeSlicer interfaces. +// The iteration order of OrderedEdges is the order of edges passed to +// NewEdgeIterator. +type OrderedEdges struct { + idx int + edges []graph.Edge +} + +// NewOrderedEdges returns an OrderedEdges initialized with the provided edges. +func NewOrderedEdges(edges []graph.Edge) *OrderedEdges { + return &OrderedEdges{idx: -1, edges: edges} +} + +// Len returns the remaining number of edges to be iterated over. +func (e *OrderedEdges) Len() int { + if e.idx >= len(e.edges) { + return 0 + } + if e.idx <= 0 { + return len(e.edges) + } + return len(e.edges[e.idx:]) +} + +// Next returns whether the next call of Edge will return a valid edge. +func (e *OrderedEdges) Next() bool { + if uint(e.idx)+1 < uint(len(e.edges)) { + e.idx++ + return true + } + e.idx = len(e.edges) + return false +} + +// Edge returns the current edge of the iterator. Next must have been +// called prior to a call to Edge. +func (e *OrderedEdges) Edge() graph.Edge { + if e.idx >= len(e.edges) || e.idx < 0 { + return nil + } + return e.edges[e.idx] +} + +// EdgeSlice returns all the remaining edges in the iterator and advances +// the iterator. +func (e *OrderedEdges) EdgeSlice() []graph.Edge { + if e.idx >= len(e.edges) { + return nil + } + idx := e.idx + if idx == -1 { + idx = 0 + } + e.idx = len(e.edges) + return e.edges[idx:] +} + +// Reset returns the iterator to its initial state. +func (e *OrderedEdges) Reset() { + e.idx = -1 +} + +// OrderedWeightedEdges implements the graph.Edges and graph.EdgeSlicer interfaces. +// The iteration order of OrderedWeightedEdges is the order of edges passed to +// NewEdgeIterator. +type OrderedWeightedEdges struct { + idx int + edges []graph.WeightedEdge +} + +// NewOrderedWeightedEdges returns an OrderedWeightedEdges initialized with the provided edges. +func NewOrderedWeightedEdges(edges []graph.WeightedEdge) *OrderedWeightedEdges { + return &OrderedWeightedEdges{idx: -1, edges: edges} +} + +// Len returns the remaining number of edges to be iterated over. +func (e *OrderedWeightedEdges) Len() int { + if e.idx >= len(e.edges) { + return 0 + } + if e.idx <= 0 { + return len(e.edges) + } + return len(e.edges[e.idx:]) +} + +// Next returns whether the next call of WeightedEdge will return a valid edge. +func (e *OrderedWeightedEdges) Next() bool { + if uint(e.idx)+1 < uint(len(e.edges)) { + e.idx++ + return true + } + e.idx = len(e.edges) + return false +} + +// WeightedEdge returns the current edge of the iterator. Next must have been +// called prior to a call to WeightedEdge. +func (e *OrderedWeightedEdges) WeightedEdge() graph.WeightedEdge { + if e.idx >= len(e.edges) || e.idx < 0 { + return nil + } + return e.edges[e.idx] +} + +// WeightedEdgeSlice returns all the remaining edges in the iterator and advances +// the iterator. +func (e *OrderedWeightedEdges) WeightedEdgeSlice() []graph.WeightedEdge { + if e.idx >= len(e.edges) { + return nil + } + idx := e.idx + if idx == -1 { + idx = 0 + } + e.idx = len(e.edges) + return e.edges[idx:] +} + +// Reset returns the iterator to its initial state. +func (e *OrderedWeightedEdges) Reset() { + e.idx = -1 +} diff --git a/vendor/gonum.org/v1/gonum/graph/iterator/lines.go b/vendor/gonum.org/v1/gonum/graph/iterator/lines.go new file mode 100644 index 0000000000..ed655df016 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/iterator/lines.go @@ -0,0 +1,131 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package iterator + +import "gonum.org/v1/gonum/graph" + +// OrderedLines implements the graph.Lines and graph.LineSlicer interfaces. +// The iteration order of OrderedLines is the order of lines passed to +// NewLineIterator. +type OrderedLines struct { + idx int + lines []graph.Line +} + +// NewOrderedLines returns an OrderedLines initialized with the provided lines. +func NewOrderedLines(lines []graph.Line) *OrderedLines { + return &OrderedLines{idx: -1, lines: lines} +} + +// Len returns the remaining number of lines to be iterated over. +func (e *OrderedLines) Len() int { + if e.idx >= len(e.lines) { + return 0 + } + if e.idx <= 0 { + return len(e.lines) + } + return len(e.lines[e.idx:]) +} + +// Next returns whether the next call of Line will return a valid line. +func (e *OrderedLines) Next() bool { + if uint(e.idx)+1 < uint(len(e.lines)) { + e.idx++ + return true + } + e.idx = len(e.lines) + return false +} + +// Line returns the current line of the iterator. Next must have been +// called prior to a call to Line. +func (e *OrderedLines) Line() graph.Line { + if e.idx >= len(e.lines) || e.idx < 0 { + return nil + } + return e.lines[e.idx] +} + +// LineSlice returns all the remaining lines in the iterator and advances +// the iterator. +func (e *OrderedLines) LineSlice() []graph.Line { + if e.idx >= len(e.lines) { + return nil + } + idx := e.idx + if idx == -1 { + idx = 0 + } + e.idx = len(e.lines) + return e.lines[idx:] +} + +// Reset returns the iterator to its initial state. +func (e *OrderedLines) Reset() { + e.idx = -1 +} + +// OrderedWeightedLines implements the graph.Lines and graph.LineSlicer interfaces. +// The iteration order of OrderedWeightedLines is the order of lines passed to +// NewLineIterator. +type OrderedWeightedLines struct { + idx int + lines []graph.WeightedLine +} + +// NewWeightedLineIterator returns an OrderedWeightedLines initialized with the provided lines. +func NewOrderedWeightedLines(lines []graph.WeightedLine) *OrderedWeightedLines { + return &OrderedWeightedLines{idx: -1, lines: lines} +} + +// Len returns the remaining number of lines to be iterated over. +func (e *OrderedWeightedLines) Len() int { + if e.idx >= len(e.lines) { + return 0 + } + if e.idx <= 0 { + return len(e.lines) + } + return len(e.lines[e.idx:]) +} + +// Next returns whether the next call of WeightedLine will return a valid line. +func (e *OrderedWeightedLines) Next() bool { + if uint(e.idx)+1 < uint(len(e.lines)) { + e.idx++ + return true + } + e.idx = len(e.lines) + return false +} + +// WeightedLine returns the current line of the iterator. Next must have been +// called prior to a call to WeightedLine. +func (e *OrderedWeightedLines) WeightedLine() graph.WeightedLine { + if e.idx >= len(e.lines) || e.idx < 0 { + return nil + } + return e.lines[e.idx] +} + +// WeightedLineSlice returns all the remaining lines in the iterator and advances +// the iterator. +func (e *OrderedWeightedLines) WeightedLineSlice() []graph.WeightedLine { + if e.idx >= len(e.lines) { + return nil + } + idx := e.idx + if idx == -1 { + idx = 0 + } + e.idx = len(e.lines) + return e.lines[idx:] +} + +// Reset returns the iterator to its initial state. +func (e *OrderedWeightedLines) Reset() { + e.idx = -1 +} diff --git a/vendor/gonum.org/v1/gonum/graph/iterator/nodes.go b/vendor/gonum.org/v1/gonum/graph/iterator/nodes.go new file mode 100644 index 0000000000..952dd770f2 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/iterator/nodes.go @@ -0,0 +1,125 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package iterator + +import "gonum.org/v1/gonum/graph" + +// OrderedNodes implements the graph.Nodes and graph.NodeSlicer interfaces. +// The iteration order of OrderedNodes is the order of nodes passed to +// NewNodeIterator. +type OrderedNodes struct { + idx int + nodes []graph.Node +} + +// NewOrderedNodes returns a OrderedNodes initialized with the provided nodes. +func NewOrderedNodes(nodes []graph.Node) *OrderedNodes { + return &OrderedNodes{idx: -1, nodes: nodes} +} + +// Len returns the remaining number of nodes to be iterated over. +func (n *OrderedNodes) Len() int { + if n.idx >= len(n.nodes) { + return 0 + } + if n.idx <= 0 { + return len(n.nodes) + } + return len(n.nodes[n.idx:]) +} + +// Next returns whether the next call of Node will return a valid node. +func (n *OrderedNodes) Next() bool { + if uint(n.idx)+1 < uint(len(n.nodes)) { + n.idx++ + return true + } + n.idx = len(n.nodes) + return false +} + +// Node returns the current node of the iterator. Next must have been +// called prior to a call to Node. +func (n *OrderedNodes) Node() graph.Node { + if n.idx >= len(n.nodes) || n.idx < 0 { + return nil + } + return n.nodes[n.idx] +} + +// NodeSlice returns all the remaining nodes in the iterator and advances +// the iterator. +func (n *OrderedNodes) NodeSlice() []graph.Node { + if n.idx >= len(n.nodes) { + return nil + } + idx := n.idx + if idx == -1 { + idx = 0 + } + n.idx = len(n.nodes) + return n.nodes[idx:] +} + +// Reset returns the iterator to its initial state. +func (n *OrderedNodes) Reset() { + n.idx = -1 +} + +// ImplicitNodes implements the graph.Nodes interface for a set of nodes over +// a contiguous ID range. +type ImplicitNodes struct { + beg, end int + curr int + newNode func(id int) graph.Node +} + +// NewImplicitNodes returns a new implicit node iterator spanning nodes in [beg,end). +// The provided new func maps the id to a graph.Node. NewImplicitNodes will panic +// if beg is greater than end. +func NewImplicitNodes(beg, end int, new func(id int) graph.Node) *ImplicitNodes { + if beg > end { + panic("iterator: invalid range") + } + return &ImplicitNodes{beg: beg, end: end, curr: beg - 1, newNode: new} +} + +// Len returns the remaining number of nodes to be iterated over. +func (n *ImplicitNodes) Len() int { + return n.end - n.curr - 1 +} + +// Next returns whether the next call of Node will return a valid node. +func (n *ImplicitNodes) Next() bool { + if n.curr == n.end { + return false + } + n.curr++ + return n.curr < n.end +} + +// Node returns the current node of the iterator. Next must have been +// called prior to a call to Node. +func (n *ImplicitNodes) Node() graph.Node { + if n.Len() == -1 || n.curr < n.beg { + return nil + } + return n.newNode(n.curr) +} + +// Reset returns the iterator to its initial state. +func (n *ImplicitNodes) Reset() { + n.curr = n.beg - 1 +} + +// NodeSlice returns all the remaining nodes in the iterator and advances +// the iterator. +func (n *ImplicitNodes) NodeSlice() []graph.Node { + nodes := make([]graph.Node, 0, n.Len()) + for n.curr++; n.curr < n.end; n.curr++ { + nodes = append(nodes, n.newNode(n.curr)) + } + return nodes +} diff --git a/vendor/gonum.org/v1/gonum/graph/multigraph.go b/vendor/gonum.org/v1/gonum/graph/multigraph.go new file mode 100644 index 0000000000..038a3d515f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/multigraph.go @@ -0,0 +1,198 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package graph + +// Line is an edge in a multigraph. A Line returns an ID that must +// distinguish Lines sharing Node end points. +type Line interface { + // From returns the from node of the edge. + From() Node + + // To returns the to node of the edge. + To() Node + + // ReversedLine returns a line that has the + // end points of the receiver swapped. + ReversedLine() Line + + // ID returns the unique ID for the Line. + ID() int64 +} + +// WeightedLine is a weighted multigraph edge. +type WeightedLine interface { + Line + Weight() float64 +} + +// Multigraph is a generalized multigraph. +type Multigraph interface { + // Node returns the node with the given ID if it exists + // in the multigraph, and nil otherwise. + Node(id int64) Node + + // Nodes returns all the nodes in the multigraph. + // + // Nodes must not return nil. + Nodes() Nodes + + // From returns all nodes that can be reached directly + // from the node with the given ID. + // + // From must not return nil. + From(id int64) Nodes + + // HasEdgeBetween returns whether an edge exists between + // nodes with IDs xid and yid without considering direction. + HasEdgeBetween(xid, yid int64) bool + + // Lines returns the lines from u to v, with IDs uid and + // vid, if any such lines exist and nil otherwise. The + // node v must be directly reachable from u as defined by + // the From method. + // + // Lines must not return nil. + Lines(uid, vid int64) Lines +} + +// WeightedMultigraph is a weighted multigraph. +type WeightedMultigraph interface { + Multigraph + + // WeightedLines returns the weighted lines from u to v + // with IDs uid and vid if any such lines exist and nil + // otherwise. The node v must be directly reachable + // from u as defined by the From method. + // + // WeightedLines must not return nil. + WeightedLines(uid, vid int64) WeightedLines +} + +// UndirectedMultigraph is an undirected multigraph. +type UndirectedMultigraph interface { + Multigraph + + // LinesBetween returns the lines between nodes x and y + // with IDs xid and yid. + // + // LinesBetween must not return nil. + LinesBetween(xid, yid int64) Lines +} + +// WeightedUndirectedMultigraph is a weighted undirected multigraph. +type WeightedUndirectedMultigraph interface { + WeightedMultigraph + + // WeightedLinesBetween returns the lines between nodes + // x and y with IDs xid and yid. + // + // WeightedLinesBetween must not return nil. + WeightedLinesBetween(xid, yid int64) WeightedLines +} + +// DirectedMultigraph is a directed multigraph. +type DirectedMultigraph interface { + Multigraph + + // HasEdgeFromTo returns whether an edge exists + // in the multigraph from u to v with IDs uid + // and vid. + HasEdgeFromTo(uid, vid int64) bool + + // To returns all nodes that can reach directly + // to the node with the given ID. + // + // To must not return nil. + To(id int64) Nodes +} + +// WeightedDirectedMultigraph is a weighted directed multigraph. +type WeightedDirectedMultigraph interface { + WeightedMultigraph + + // HasEdgeFromTo returns whether an edge exists + // in the multigraph from u to v with IDs uid + // and vid. + HasEdgeFromTo(uid, vid int64) bool + + // To returns all nodes that can reach directly + // to the node with the given ID. + // + // To must not return nil. + To(id int64) Nodes +} + +// LineAdder is an interface for adding lines to a multigraph. +type LineAdder interface { + // NewLine returns a new Line from the source to the destination node. + NewLine(from, to Node) Line + + // SetLine adds a Line from one node to another. + // If the multigraph supports node addition the nodes + // will be added if they do not exist, otherwise + // SetLine will panic. + // Whether l, l.From() and l.To() are stored + // within the graph is implementation dependent. + SetLine(l Line) +} + +// WeightedLineAdder is an interface for adding lines to a multigraph. +type WeightedLineAdder interface { + // NewWeightedLine returns a new WeightedLine from + // the source to the destination node. + NewWeightedLine(from, to Node, weight float64) WeightedLine + + // SetWeightedLine adds a weighted line from one node + // to another. If the multigraph supports node addition + // the nodes will be added if they do not exist, + // otherwise SetWeightedLine will panic. + // Whether l, l.From() and l.To() are stored + // within the graph is implementation dependent. + SetWeightedLine(l WeightedLine) +} + +// LineRemover is an interface for removing lines from a multigraph. +type LineRemover interface { + // RemoveLine removes the line with the given end + // and line IDs, leaving the terminal nodes. If + // the line does not exist it is a no-op. + RemoveLine(fid, tid, id int64) +} + +// MultigraphBuilder is a multigraph that can have nodes and lines added. +type MultigraphBuilder interface { + NodeAdder + LineAdder +} + +// WeightedMultigraphBuilder is a multigraph that can have nodes and weighted lines added. +type WeightedMultigraphBuilder interface { + NodeAdder + WeightedLineAdder +} + +// UndirectedMultgraphBuilder is an undirected multigraph builder. +type UndirectedMultigraphBuilder interface { + UndirectedMultigraph + MultigraphBuilder +} + +// UndirectedWeightedMultigraphBuilder is an undirected weighted multigraph builder. +type UndirectedWeightedMultigraphBuilder interface { + UndirectedMultigraph + WeightedMultigraphBuilder +} + +// DirectedMultigraphBuilder is a directed multigraph builder. +type DirectedMultigraphBuilder interface { + DirectedMultigraph + MultigraphBuilder +} + +// DirectedWeightedMultigraphBuilder is a directed weighted multigraph builder. +type DirectedWeightedMultigraphBuilder interface { + DirectedMultigraph + WeightedMultigraphBuilder +} diff --git a/vendor/gonum.org/v1/gonum/graph/nodes_edges.go b/vendor/gonum.org/v1/gonum/graph/nodes_edges.go new file mode 100644 index 0000000000..3d5dae1fa1 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/nodes_edges.go @@ -0,0 +1,300 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package graph + +// Iterator is an item iterator. +type Iterator interface { + // Next advances the iterator and returns whether + // the next call to the item method will return a + // non-nil item. + // + // Next should be called prior to any call to the + // iterator's item retrieval method after the + // iterator has been obtained or reset. + // + // The order of iteration is implementation + // dependent. + Next() bool + + // Len returns the number of items remaining in the + // iterator. + // + // If the number of items in the iterator is unknown, + // too large to materialize or too costly to calculate + // then Len may return a negative value. + // In this case the consuming function must be able + // to operate on the items of the iterator directly + // without materializing the items into a slice. + // The magnitude of a negative length has + // implementation-dependent semantics. + Len() int + + // Reset returns the iterator to its start position. + Reset() +} + +// Nodes is a Node iterator. +type Nodes interface { + Iterator + + // Node returns the current Node from the iterator. + Node() Node +} + +// NodeSlicer wraps the NodeSlice method. +type NodeSlicer interface { + // NodeSlice returns the set of nodes remaining + // to be iterated by a Nodes iterator. + // The holder of the iterator may arbitrarily + // change elements in the returned slice, but + // those changes may be reflected to other + // iterators. + NodeSlice() []Node +} + +// NodesOf returns it.Len() nodes from it. If it is a NodeSlicer, the NodeSlice method +// is used to obtain the nodes. It is safe to pass a nil Nodes to NodesOf. +// +// If the Nodes has an indeterminate length, NodesOf will panic. +func NodesOf(it Nodes) []Node { + if it == nil { + return nil + } + len := it.Len() + switch { + case len == 0: + return nil + case len < 0: + panic("graph: called NodesOf on indeterminate iterator") + } + switch it := it.(type) { + case NodeSlicer: + return it.NodeSlice() + } + n := make([]Node, 0, len) + for it.Next() { + n = append(n, it.Node()) + } + return n +} + +// Edges is an Edge iterator. +type Edges interface { + Iterator + + // Edge returns the current Edge from the iterator. + Edge() Edge +} + +// EdgeSlicer wraps the EdgeSlice method. +type EdgeSlicer interface { + // EdgeSlice returns the set of edges remaining + // to be iterated by an Edges iterator. + // The holder of the iterator may arbitrarily + // change elements in the returned slice, but + // those changes may be reflected to other + // iterators. + EdgeSlice() []Edge +} + +// EdgesOf returns it.Len() nodes from it. If it is an EdgeSlicer, the EdgeSlice method is used +// to obtain the edges. It is safe to pass a nil Edges to EdgesOf. +// +// If the Edges has an indeterminate length, EdgesOf will panic. +func EdgesOf(it Edges) []Edge { + if it == nil { + return nil + } + len := it.Len() + switch { + case len == 0: + return nil + case len < 0: + panic("graph: called EdgesOf on indeterminate iterator") + } + switch it := it.(type) { + case EdgeSlicer: + return it.EdgeSlice() + } + e := make([]Edge, 0, len) + for it.Next() { + e = append(e, it.Edge()) + } + return e +} + +// WeightedEdges is a WeightedEdge iterator. +type WeightedEdges interface { + Iterator + + // Edge returns the current Edge from the iterator. + WeightedEdge() WeightedEdge +} + +// WeightedEdgeSlicer wraps the WeightedEdgeSlice method. +type WeightedEdgeSlicer interface { + // EdgeSlice returns the set of edges remaining + // to be iterated by an Edges iterator. + // The holder of the iterator may arbitrarily + // change elements in the returned slice, but + // those changes may be reflected to other + // iterators. + WeightedEdgeSlice() []WeightedEdge +} + +// WeightedEdgesOf returns it.Len() weighted edge from it. If it is a WeightedEdgeSlicer, the +// WeightedEdgeSlice method is used to obtain the edges. It is safe to pass a nil WeightedEdges +// to WeightedEdgesOf. +// +// If the WeightedEdges has an indeterminate length, WeightedEdgesOf will panic. +func WeightedEdgesOf(it WeightedEdges) []WeightedEdge { + if it == nil { + return nil + } + len := it.Len() + switch { + case len == 0: + return nil + case len < 0: + panic("graph: called WeightedEdgesOf on indeterminate iterator") + } + switch it := it.(type) { + case WeightedEdgeSlicer: + return it.WeightedEdgeSlice() + } + e := make([]WeightedEdge, 0, len) + for it.Next() { + e = append(e, it.WeightedEdge()) + } + return e +} + +// Lines is a Line iterator. +type Lines interface { + Iterator + + // Line returns the current Line from the iterator. + Line() Line +} + +// LineSlicer wraps the LineSlice method. +type LineSlicer interface { + // LineSlice returns the set of lines remaining + // to be iterated by an Lines iterator. + // The holder of the iterator may arbitrarily + // change elements in the returned slice, but + // those changes may be reflected to other + // iterators. + LineSlice() []Line +} + +// LinesOf returns it.Len() nodes from it. If it is a LineSlicer, the LineSlice method is used +// to obtain the lines. It is safe to pass a nil Lines to LinesOf. +// +// If the Lines has an indeterminate length, LinesOf will panic. +func LinesOf(it Lines) []Line { + if it == nil { + return nil + } + len := it.Len() + switch { + case len == 0: + return nil + case len < 0: + panic("graph: called LinesOf on indeterminate iterator") + } + switch it := it.(type) { + case LineSlicer: + return it.LineSlice() + } + l := make([]Line, 0, len) + for it.Next() { + l = append(l, it.Line()) + } + return l +} + +// WeightedLines is a WeightedLine iterator. +type WeightedLines interface { + Iterator + + // Line returns the current Line from the iterator. + WeightedLine() WeightedLine +} + +// WeightedLineSlicer wraps the WeightedLineSlice method. +type WeightedLineSlicer interface { + // LineSlice returns the set of lines remaining + // to be iterated by an Lines iterator. + // The holder of the iterator may arbitrarily + // change elements in the returned slice, but + // those changes may be reflected to other + // iterators. + WeightedLineSlice() []WeightedLine +} + +// WeightedLinesOf returns it.Len() weighted line from it. If it is a WeightedLineSlicer, the +// WeightedLineSlice method is used to obtain the lines. It is safe to pass a nil WeightedLines +// to WeightedLinesOf. +// +// If the WeightedLines has an indeterminate length, WeightedLinesOf will panic. +func WeightedLinesOf(it WeightedLines) []WeightedLine { + if it == nil { + return nil + } + len := it.Len() + switch { + case len == 0: + return nil + case len < 0: + panic("graph: called WeightedLinesOf on indeterminate iterator") + } + switch it := it.(type) { + case WeightedLineSlicer: + return it.WeightedLineSlice() + } + l := make([]WeightedLine, 0, len) + for it.Next() { + l = append(l, it.WeightedLine()) + } + return l +} + +// Empty is an empty set of nodes, edges or lines. It should be used when +// a graph returns a zero-length Iterator. Empty implements the slicer +// interfaces for nodes, edges and lines, returning nil for each of these. +const Empty = nothing + +var ( + _ Iterator = Empty + _ Nodes = Empty + _ NodeSlicer = Empty + _ Edges = Empty + _ EdgeSlicer = Empty + _ WeightedEdges = Empty + _ WeightedEdgeSlicer = Empty + _ Lines = Empty + _ LineSlicer = Empty + _ WeightedLines = Empty + _ WeightedLineSlicer = Empty +) + +const nothing = empty(true) + +type empty bool + +func (empty) Next() bool { return false } +func (empty) Len() int { return 0 } +func (empty) Reset() {} +func (empty) Node() Node { return nil } +func (empty) NodeSlice() []Node { return nil } +func (empty) Edge() Edge { return nil } +func (empty) EdgeSlice() []Edge { return nil } +func (empty) WeightedEdge() WeightedEdge { return nil } +func (empty) WeightedEdgeSlice() []WeightedEdge { return nil } +func (empty) Line() Line { return nil } +func (empty) LineSlice() []Line { return nil } +func (empty) WeightedLine() WeightedLine { return nil } +func (empty) WeightedLineSlice() []WeightedLine { return nil } diff --git a/vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go b/vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go new file mode 100644 index 0000000000..3daca9adec --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go @@ -0,0 +1,301 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "sort" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/ordered" + "gonum.org/v1/gonum/graph/iterator" + "gonum.org/v1/gonum/mat" +) + +var ( + dm *DirectedMatrix + + _ graph.Graph = dm + _ graph.Directed = dm + _ edgeSetter = dm + _ weightedEdgeSetter = dm +) + +// DirectedMatrix represents a directed graph using an adjacency +// matrix such that all IDs are in a contiguous block from 0 to n-1. +// Edges are stored implicitly as an edge weight, so edges stored in +// the graph are not recoverable. +type DirectedMatrix struct { + mat *mat.Dense + nodes []graph.Node + + self float64 + absent float64 +} + +// NewDirectedMatrix creates a directed dense graph with n nodes. +// All edges are initialized with the weight given by init. The self parameter +// specifies the cost of self connection, and absent specifies the weight +// returned for absent edges. +func NewDirectedMatrix(n int, init, self, absent float64) *DirectedMatrix { + matrix := make([]float64, n*n) + if init != 0 { + for i := range matrix { + matrix[i] = init + } + } + for i := 0; i < len(matrix); i += n + 1 { + matrix[i] = self + } + return &DirectedMatrix{ + mat: mat.NewDense(n, n, matrix), + self: self, + absent: absent, + } +} + +// NewDirectedMatrixFrom creates a directed dense graph with the given nodes. +// The IDs of the nodes must be contiguous from 0 to len(nodes)-1, but may +// be in any order. If IDs are not contiguous NewDirectedMatrixFrom will panic. +// All edges are initialized with the weight given by init. The self parameter +// specifies the cost of self connection, and absent specifies the weight +// returned for absent edges. +func NewDirectedMatrixFrom(nodes []graph.Node, init, self, absent float64) *DirectedMatrix { + sort.Sort(ordered.ByID(nodes)) + for i, n := range nodes { + if int64(i) != n.ID() { + panic("simple: non-contiguous node IDs") + } + } + g := NewDirectedMatrix(len(nodes), init, self, absent) + g.nodes = nodes + return g +} + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *DirectedMatrix) Edge(uid, vid int64) graph.Edge { + return g.WeightedEdge(uid, vid) +} + +// Edges returns all the edges in the graph. +func (g *DirectedMatrix) Edges() graph.Edges { + var edges []graph.Edge + r, _ := g.mat.Dims() + for i := 0; i < r; i++ { + for j := 0; j < r; j++ { + if i == j { + continue + } + if w := g.mat.At(i, j); !isSame(w, g.absent) { + edges = append(edges, WeightedEdge{F: g.Node(int64(i)), T: g.Node(int64(j)), W: w}) + } + } + } + if len(edges) == 0 { + return graph.Empty + } + return iterator.NewOrderedEdges(edges) +} + +// From returns all nodes in g that can be reached directly from n. +func (g *DirectedMatrix) From(id int64) graph.Nodes { + if !g.has(id) { + return graph.Empty + } + var nodes []graph.Node + _, c := g.mat.Dims() + for j := 0; j < c; j++ { + if int64(j) == id { + continue + } + // id is not greater than maximum int by this point. + if !isSame(g.mat.At(int(id), j), g.absent) { + nodes = append(nodes, g.Node(int64(j))) + } + } + if len(nodes) == 0 { + return graph.Empty + } + return iterator.NewOrderedNodes(nodes) +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y without +// considering direction. +func (g *DirectedMatrix) HasEdgeBetween(xid, yid int64) bool { + if !g.has(xid) { + return false + } + if !g.has(yid) { + return false + } + // xid and yid are not greater than maximum int by this point. + return xid != yid && (!isSame(g.mat.At(int(xid), int(yid)), g.absent) || !isSame(g.mat.At(int(yid), int(xid)), g.absent)) +} + +// HasEdgeFromTo returns whether an edge exists in the graph from u to v. +func (g *DirectedMatrix) HasEdgeFromTo(uid, vid int64) bool { + if !g.has(uid) { + return false + } + if !g.has(vid) { + return false + } + // uid and vid are not greater than maximum int by this point. + return uid != vid && !isSame(g.mat.At(int(uid), int(vid)), g.absent) +} + +// Matrix returns the mat.Matrix representation of the graph. The orientation +// of the matrix is such that the matrix entry at G_{ij} is the weight of the edge +// from node i to node j. +func (g *DirectedMatrix) Matrix() mat.Matrix { + // Prevent alteration of dimensions of the returned matrix. + m := *g.mat + return &m +} + +// Node returns the node with the given ID if it exists in the graph, +// and nil otherwise. +func (g *DirectedMatrix) Node(id int64) graph.Node { + if !g.has(id) { + return nil + } + if g.nodes == nil { + return Node(id) + } + return g.nodes[id] +} + +// Nodes returns all the nodes in the graph. +func (g *DirectedMatrix) Nodes() graph.Nodes { + if g.nodes != nil { + nodes := make([]graph.Node, len(g.nodes)) + copy(nodes, g.nodes) + return iterator.NewOrderedNodes(nodes) + } + r, _ := g.mat.Dims() + // Matrix graphs must have at least one node. + return iterator.NewImplicitNodes(0, r, newSimpleNode) +} + +// RemoveEdge removes the edge with the given end point nodes from the graph, leaving the terminal +// nodes. If the edge does not exist it is a no-op. +func (g *DirectedMatrix) RemoveEdge(fid, tid int64) { + if !g.has(fid) { + return + } + if !g.has(tid) { + return + } + // fid and tid are not greater than maximum int by this point. + g.mat.Set(int(fid), int(tid), g.absent) +} + +// SetEdge sets e, an edge from one node to another with unit weight. If the ends of the edge +// are not in g or the edge is a self loop, SetEdge panics. SetEdge will store the nodes of +// e in the graph if it was initialized with NewDirectedMatrixFrom. +func (g *DirectedMatrix) SetEdge(e graph.Edge) { + g.setWeightedEdge(e, 1) +} + +// SetWeightedEdge sets e, an edge from one node to another. If the ends of the edge are not in g +// or the edge is a self loop, SetWeightedEdge panics. SetWeightedEdge will store the nodes of +// e in the graph if it was initialized with NewDirectedMatrixFrom. +func (g *DirectedMatrix) SetWeightedEdge(e graph.WeightedEdge) { + g.setWeightedEdge(e, e.Weight()) +} + +func (g *DirectedMatrix) setWeightedEdge(e graph.Edge, weight float64) { + from := e.From() + fid := from.ID() + to := e.To() + tid := to.ID() + if fid == tid { + panic("simple: set illegal edge") + } + if int64(int(fid)) != fid { + panic("simple: unavailable from node ID for dense graph") + } + if int64(int(tid)) != tid { + panic("simple: unavailable to node ID for dense graph") + } + if g.nodes != nil { + g.nodes[fid] = from + g.nodes[tid] = to + } + // fid and tid are not greater than maximum int by this point. + g.mat.Set(int(fid), int(tid), weight) +} + +// To returns all nodes in g that can reach directly to n. +func (g *DirectedMatrix) To(id int64) graph.Nodes { + if !g.has(id) { + return graph.Empty + } + var nodes []graph.Node + r, _ := g.mat.Dims() + for i := 0; i < r; i++ { + if int64(i) == id { + continue + } + // id is not greater than maximum int by this point. + if !isSame(g.mat.At(i, int(id)), g.absent) { + nodes = append(nodes, g.Node(int64(i))) + } + } + if len(nodes) == 0 { + return graph.Empty + } + return iterator.NewOrderedNodes(nodes) +} + +// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge. +// If x and y are the same node or there is no joining edge between the two nodes the weight +// value returned is either the graph's absent or self value. Weight returns true if an edge +// exists between x and y or if x and y have the same ID, false otherwise. +func (g *DirectedMatrix) Weight(xid, yid int64) (w float64, ok bool) { + if xid == yid { + return g.self, true + } + if g.HasEdgeFromTo(xid, yid) { + // xid and yid are not greater than maximum int by this point. + return g.mat.At(int(xid), int(yid)), true + } + return g.absent, false +} + +// WeightedEdge returns the weighted edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *DirectedMatrix) WeightedEdge(uid, vid int64) graph.WeightedEdge { + if g.HasEdgeFromTo(uid, vid) { + // xid and yid are not greater than maximum int by this point. + return WeightedEdge{F: g.Node(uid), T: g.Node(vid), W: g.mat.At(int(uid), int(vid))} + } + return nil +} + +// WeightedEdges returns all the edges in the graph. +func (g *DirectedMatrix) WeightedEdges() graph.WeightedEdges { + var edges []graph.WeightedEdge + r, _ := g.mat.Dims() + for i := 0; i < r; i++ { + for j := 0; j < r; j++ { + if i == j { + continue + } + if w := g.mat.At(i, j); !isSame(w, g.absent) { + edges = append(edges, WeightedEdge{F: g.Node(int64(i)), T: g.Node(int64(j)), W: w}) + } + } + } + if len(edges) == 0 { + return graph.Empty + } + return iterator.NewOrderedWeightedEdges(edges) +} + +func (g *DirectedMatrix) has(id int64) bool { + r, _ := g.mat.Dims() + return 0 <= id && id < int64(r) +} diff --git a/vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go b/vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go new file mode 100644 index 0000000000..f51debb4fc --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go @@ -0,0 +1,268 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "sort" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/ordered" + "gonum.org/v1/gonum/graph/iterator" + "gonum.org/v1/gonum/mat" +) + +var ( + um *UndirectedMatrix + + _ graph.Graph = um + _ graph.Undirected = um + _ edgeSetter = um + _ weightedEdgeSetter = um +) + +// UndirectedMatrix represents an undirected graph using an adjacency +// matrix such that all IDs are in a contiguous block from 0 to n-1. +// Edges are stored implicitly as an edge weight, so edges stored in +// the graph are not recoverable. +type UndirectedMatrix struct { + mat *mat.SymDense + nodes []graph.Node + + self float64 + absent float64 +} + +// NewUndirectedMatrix creates an undirected dense graph with n nodes. +// All edges are initialized with the weight given by init. The self parameter +// specifies the cost of self connection, and absent specifies the weight +// returned for absent edges. +func NewUndirectedMatrix(n int, init, self, absent float64) *UndirectedMatrix { + matrix := make([]float64, n*n) + if init != 0 { + for i := range matrix { + matrix[i] = init + } + } + for i := 0; i < len(matrix); i += n + 1 { + matrix[i] = self + } + return &UndirectedMatrix{ + mat: mat.NewSymDense(n, matrix), + self: self, + absent: absent, + } +} + +// NewUndirectedMatrixFrom creates an undirected dense graph with the given nodes. +// The IDs of the nodes must be contiguous from 0 to len(nodes)-1, but may +// be in any order. If IDs are not contiguous NewUndirectedMatrixFrom will panic. +// All edges are initialized with the weight given by init. The self parameter +// specifies the cost of self connection, and absent specifies the weight +// returned for absent edges. +func NewUndirectedMatrixFrom(nodes []graph.Node, init, self, absent float64) *UndirectedMatrix { + sort.Sort(ordered.ByID(nodes)) + for i, n := range nodes { + if int64(i) != n.ID() { + panic("simple: non-contiguous node IDs") + } + } + g := NewUndirectedMatrix(len(nodes), init, self, absent) + g.nodes = nodes + return g +} + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *UndirectedMatrix) Edge(uid, vid int64) graph.Edge { + return g.WeightedEdgeBetween(uid, vid) +} + +// EdgeBetween returns the edge between nodes x and y. +func (g *UndirectedMatrix) EdgeBetween(uid, vid int64) graph.Edge { + return g.WeightedEdgeBetween(uid, vid) +} + +// Edges returns all the edges in the graph. +func (g *UndirectedMatrix) Edges() graph.Edges { + var edges []graph.Edge + r, _ := g.mat.Dims() + for i := 0; i < r; i++ { + for j := i + 1; j < r; j++ { + if w := g.mat.At(i, j); !isSame(w, g.absent) { + edges = append(edges, WeightedEdge{F: g.Node(int64(i)), T: g.Node(int64(j)), W: w}) + } + } + } + if len(edges) == 0 { + return graph.Empty + } + return iterator.NewOrderedEdges(edges) +} + +// From returns all nodes in g that can be reached directly from n. +func (g *UndirectedMatrix) From(id int64) graph.Nodes { + if !g.has(id) { + return graph.Empty + } + var nodes []graph.Node + r := g.mat.Symmetric() + for i := 0; i < r; i++ { + if int64(i) == id { + continue + } + // id is not greater than maximum int by this point. + if !isSame(g.mat.At(int(id), i), g.absent) { + nodes = append(nodes, g.Node(int64(i))) + } + } + if len(nodes) == 0 { + return graph.Empty + } + return iterator.NewOrderedNodes(nodes) +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y. +func (g *UndirectedMatrix) HasEdgeBetween(uid, vid int64) bool { + if !g.has(uid) { + return false + } + if !g.has(vid) { + return false + } + // uid and vid are not greater than maximum int by this point. + return uid != vid && !isSame(g.mat.At(int(uid), int(vid)), g.absent) +} + +// Matrix returns the mat.Matrix representation of the graph. +func (g *UndirectedMatrix) Matrix() mat.Matrix { + // Prevent alteration of dimensions of the returned matrix. + m := *g.mat + return &m +} + +// Node returns the node with the given ID if it exists in the graph, +// and nil otherwise. +func (g *UndirectedMatrix) Node(id int64) graph.Node { + if !g.has(id) { + return nil + } + if g.nodes == nil { + return Node(id) + } + return g.nodes[id] +} + +// Nodes returns all the nodes in the graph. +func (g *UndirectedMatrix) Nodes() graph.Nodes { + if g.nodes != nil { + nodes := make([]graph.Node, len(g.nodes)) + copy(nodes, g.nodes) + return iterator.NewOrderedNodes(nodes) + } + r := g.mat.Symmetric() + // Matrix graphs must have at least one node. + return iterator.NewImplicitNodes(0, r, newSimpleNode) +} + +// RemoveEdge removes the edge with the given end point IDs from the graph, leaving the terminal +// nodes. If the edge does not exist it is a no-op. +func (g *UndirectedMatrix) RemoveEdge(fid, tid int64) { + if !g.has(fid) { + return + } + if !g.has(tid) { + return + } + // fid and tid are not greater than maximum int by this point. + g.mat.SetSym(int(fid), int(tid), g.absent) +} + +// SetEdge sets e, an edge from one node to another with unit weight. If the ends of the edge are +// not in g or the edge is a self loop, SetEdge panics. SetEdge will store the nodes of +// e in the graph if it was initialized with NewUndirectedMatrixFrom. +func (g *UndirectedMatrix) SetEdge(e graph.Edge) { + g.setWeightedEdge(e, 1) +} + +// SetWeightedEdge sets e, an edge from one node to another. If the ends of the edge are not in g +// or the edge is a self loop, SetWeightedEdge panics. SetWeightedEdge will store the nodes of +// e in the graph if it was initialized with NewUndirectedMatrixFrom. +func (g *UndirectedMatrix) SetWeightedEdge(e graph.WeightedEdge) { + g.setWeightedEdge(e, e.Weight()) +} + +func (g *UndirectedMatrix) setWeightedEdge(e graph.Edge, weight float64) { + from := e.From() + fid := from.ID() + to := e.To() + tid := to.ID() + if fid == tid { + panic("simple: set illegal edge") + } + if int64(int(fid)) != fid { + panic("simple: unavailable from node ID for dense graph") + } + if int64(int(tid)) != tid { + panic("simple: unavailable to node ID for dense graph") + } + if g.nodes != nil { + g.nodes[fid] = from + g.nodes[tid] = to + } + // fid and tid are not greater than maximum int by this point. + g.mat.SetSym(int(fid), int(tid), weight) +} + +// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge. +// If x and y are the same node or there is no joining edge between the two nodes the weight +// value returned is either the graph's absent or self value. Weight returns true if an edge +// exists between x and y or if x and y have the same ID, false otherwise. +func (g *UndirectedMatrix) Weight(xid, yid int64) (w float64, ok bool) { + if xid == yid { + return g.self, true + } + if g.HasEdgeBetween(xid, yid) { + // xid and yid are not greater than maximum int by this point. + return g.mat.At(int(xid), int(yid)), true + } + return g.absent, false +} + +// WeightedEdge returns the weighted edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *UndirectedMatrix) WeightedEdge(uid, vid int64) graph.WeightedEdge { + return g.WeightedEdgeBetween(uid, vid) +} + +// WeightedEdgeBetween returns the weighted edge between nodes x and y. +func (g *UndirectedMatrix) WeightedEdgeBetween(uid, vid int64) graph.WeightedEdge { + if g.HasEdgeBetween(uid, vid) { + // uid and vid are not greater than maximum int by this point. + return WeightedEdge{F: g.Node(uid), T: g.Node(vid), W: g.mat.At(int(uid), int(vid))} + } + return nil +} + +// WeightedEdges returns all the edges in the graph. +func (g *UndirectedMatrix) WeightedEdges() graph.WeightedEdges { + var edges []graph.WeightedEdge + r, _ := g.mat.Dims() + for i := 0; i < r; i++ { + for j := i + 1; j < r; j++ { + if w := g.mat.At(i, j); !isSame(w, g.absent) { + edges = append(edges, WeightedEdge{F: g.Node(int64(i)), T: g.Node(int64(j)), W: w}) + } + } + } + if len(edges) == 0 { + return graph.Empty + } + return iterator.NewOrderedWeightedEdges(edges) +} + +func (g *UndirectedMatrix) has(id int64) bool { + r := g.mat.Symmetric() + return 0 <= id && id < int64(r) +} diff --git a/vendor/gonum.org/v1/gonum/graph/simple/directed.go b/vendor/gonum.org/v1/gonum/graph/simple/directed.go new file mode 100644 index 0000000000..f19efbd0a2 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/simple/directed.go @@ -0,0 +1,235 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "fmt" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/uid" + "gonum.org/v1/gonum/graph/iterator" +) + +var ( + dg *DirectedGraph + + _ graph.Graph = dg + _ graph.Directed = dg + _ graph.NodeAdder = dg + _ graph.NodeRemover = dg + _ graph.EdgeAdder = dg + _ graph.EdgeRemover = dg +) + +// DirectedGraph implements a generalized directed graph. +type DirectedGraph struct { + nodes map[int64]graph.Node + from map[int64]map[int64]graph.Edge + to map[int64]map[int64]graph.Edge + + nodeIDs uid.Set +} + +// NewDirectedGraph returns a DirectedGraph. +func NewDirectedGraph() *DirectedGraph { + return &DirectedGraph{ + nodes: make(map[int64]graph.Node), + from: make(map[int64]map[int64]graph.Edge), + to: make(map[int64]map[int64]graph.Edge), + + nodeIDs: uid.NewSet(), + } +} + +// AddNode adds n to the graph. It panics if the added node ID matches an existing node ID. +func (g *DirectedGraph) AddNode(n graph.Node) { + if _, exists := g.nodes[n.ID()]; exists { + panic(fmt.Sprintf("simple: node ID collision: %d", n.ID())) + } + g.nodes[n.ID()] = n + g.from[n.ID()] = make(map[int64]graph.Edge) + g.to[n.ID()] = make(map[int64]graph.Edge) + g.nodeIDs.Use(n.ID()) +} + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *DirectedGraph) Edge(uid, vid int64) graph.Edge { + edge, ok := g.from[uid][vid] + if !ok { + return nil + } + return edge +} + +// Edges returns all the edges in the graph. +func (g *DirectedGraph) Edges() graph.Edges { + var edges []graph.Edge + for _, u := range g.nodes { + for _, e := range g.from[u.ID()] { + edges = append(edges, e) + } + } + if len(edges) == 0 { + return graph.Empty + } + return iterator.NewOrderedEdges(edges) +} + +// From returns all nodes in g that can be reached directly from n. +func (g *DirectedGraph) From(id int64) graph.Nodes { + if _, ok := g.from[id]; !ok { + return graph.Empty + } + + from := make([]graph.Node, len(g.from[id])) + i := 0 + for vid := range g.from[id] { + from[i] = g.nodes[vid] + i++ + } + if len(from) == 0 { + return graph.Empty + } + return iterator.NewOrderedNodes(from) +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y without +// considering direction. +func (g *DirectedGraph) HasEdgeBetween(xid, yid int64) bool { + if _, ok := g.from[xid][yid]; ok { + return true + } + _, ok := g.from[yid][xid] + return ok +} + +// HasEdgeFromTo returns whether an edge exists in the graph from u to v. +func (g *DirectedGraph) HasEdgeFromTo(uid, vid int64) bool { + if _, ok := g.from[uid][vid]; !ok { + return false + } + return true +} + +// NewEdge returns a new Edge from the source to the destination node. +func (g *DirectedGraph) NewEdge(from, to graph.Node) graph.Edge { + return &Edge{F: from, T: to} +} + +// NewNode returns a new unique Node to be added to g. The Node's ID does +// not become valid in g until the Node is added to g. +func (g *DirectedGraph) NewNode() graph.Node { + if len(g.nodes) == 0 { + return Node(0) + } + if int64(len(g.nodes)) == uid.Max { + panic("simple: cannot allocate node: no slot") + } + return Node(g.nodeIDs.NewID()) +} + +// Node returns the node with the given ID if it exists in the graph, +// and nil otherwise. +func (g *DirectedGraph) Node(id int64) graph.Node { + return g.nodes[id] +} + +// Nodes returns all the nodes in the graph. +func (g *DirectedGraph) Nodes() graph.Nodes { + if len(g.nodes) == 0 { + return graph.Empty + } + nodes := make([]graph.Node, len(g.nodes)) + i := 0 + for _, n := range g.nodes { + nodes[i] = n + i++ + } + return iterator.NewOrderedNodes(nodes) +} + +// RemoveEdge removes the edge with the given end point IDs from the graph, leaving the terminal +// nodes. If the edge does not exist it is a no-op. +func (g *DirectedGraph) RemoveEdge(fid, tid int64) { + if _, ok := g.nodes[fid]; !ok { + return + } + if _, ok := g.nodes[tid]; !ok { + return + } + + delete(g.from[fid], tid) + delete(g.to[tid], fid) +} + +// RemoveNode removes the node with the given ID from the graph, as well as any edges attached +// to it. If the node is not in the graph it is a no-op. +func (g *DirectedGraph) RemoveNode(id int64) { + if _, ok := g.nodes[id]; !ok { + return + } + delete(g.nodes, id) + + for from := range g.from[id] { + delete(g.to[from], id) + } + delete(g.from, id) + + for to := range g.to[id] { + delete(g.from[to], id) + } + delete(g.to, id) + + g.nodeIDs.Release(id) +} + +// SetEdge adds e, an edge from one node to another. If the nodes do not exist, they are added +// and are set to the nodes of the edge otherwise. +// It will panic if the IDs of the e.From and e.To are equal. +func (g *DirectedGraph) SetEdge(e graph.Edge) { + var ( + from = e.From() + fid = from.ID() + to = e.To() + tid = to.ID() + ) + + if fid == tid { + panic("simple: adding self edge") + } + + if _, ok := g.nodes[fid]; !ok { + g.AddNode(from) + } else { + g.nodes[fid] = from + } + if _, ok := g.nodes[tid]; !ok { + g.AddNode(to) + } else { + g.nodes[tid] = to + } + + g.from[fid][tid] = e + g.to[tid][fid] = e +} + +// To returns all nodes in g that can reach directly to n. +func (g *DirectedGraph) To(id int64) graph.Nodes { + if _, ok := g.from[id]; !ok { + return graph.Empty + } + + to := make([]graph.Node, len(g.to[id])) + i := 0 + for uid := range g.to[id] { + to[i] = g.nodes[uid] + i++ + } + if len(to) == 0 { + return graph.Empty + } + return iterator.NewOrderedNodes(to) +} diff --git a/vendor/gonum.org/v1/gonum/graph/simple/doc.go b/vendor/gonum.org/v1/gonum/graph/simple/doc.go new file mode 100644 index 0000000000..dc3f24c54f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/simple/doc.go @@ -0,0 +1,9 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package simple provides a suite of simple graph implementations satisfying +// the gonum/graph interfaces. +// +// All types in simple return the graph.Empty value for empty iterators. +package simple // import "gonum.org/v1/gonum/graph/simple" diff --git a/vendor/gonum.org/v1/gonum/graph/simple/simple.go b/vendor/gonum.org/v1/gonum/graph/simple/simple.go new file mode 100644 index 0000000000..3b45765877 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/simple/simple.go @@ -0,0 +1,72 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "math" + + "gonum.org/v1/gonum/graph" +) + +// Node is a simple graph node. +type Node int64 + +// ID returns the ID number of the node. +func (n Node) ID() int64 { + return int64(n) +} + +func newSimpleNode(id int) graph.Node { + return Node(id) +} + +// Edge is a simple graph edge. +type Edge struct { + F, T graph.Node +} + +// From returns the from-node of the edge. +func (e Edge) From() graph.Node { return e.F } + +// To returns the to-node of the edge. +func (e Edge) To() graph.Node { return e.T } + +// ReversedLine returns a new Edge with the F and T fields +// swapped. +func (e Edge) ReversedEdge() graph.Edge { return Edge{F: e.T, T: e.F} } + +// WeightedEdge is a simple weighted graph edge. +type WeightedEdge struct { + F, T graph.Node + W float64 +} + +// From returns the from-node of the edge. +func (e WeightedEdge) From() graph.Node { return e.F } + +// To returns the to-node of the edge. +func (e WeightedEdge) To() graph.Node { return e.T } + +// ReversedLine returns a new Edge with the F and T fields +// swapped. The weight of the new Edge is the same as +// the weight of the receiver. +func (e WeightedEdge) ReversedEdge() graph.Edge { return WeightedEdge{F: e.T, T: e.F, W: e.W} } + +// Weight returns the weight of the edge. +func (e WeightedEdge) Weight() float64 { return e.W } + +// isSame returns whether two float64 values are the same where NaN values +// are equalable. +func isSame(a, b float64) bool { + return a == b || (math.IsNaN(a) && math.IsNaN(b)) +} + +type edgeSetter interface { + SetEdge(e graph.Edge) +} + +type weightedEdgeSetter interface { + SetWeightedEdge(e graph.WeightedEdge) +} diff --git a/vendor/gonum.org/v1/gonum/graph/simple/undirected.go b/vendor/gonum.org/v1/gonum/graph/simple/undirected.go new file mode 100644 index 0000000000..841a8e380c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/simple/undirected.go @@ -0,0 +1,216 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "fmt" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/uid" + "gonum.org/v1/gonum/graph/iterator" +) + +var ( + ug *UndirectedGraph + + _ graph.Graph = ug + _ graph.Undirected = ug + _ graph.NodeAdder = ug + _ graph.NodeRemover = ug + _ graph.EdgeAdder = ug + _ graph.EdgeRemover = ug +) + +// UndirectedGraph implements a generalized undirected graph. +type UndirectedGraph struct { + nodes map[int64]graph.Node + edges map[int64]map[int64]graph.Edge + + nodeIDs uid.Set +} + +// NewUndirectedGraph returns an UndirectedGraph. +func NewUndirectedGraph() *UndirectedGraph { + return &UndirectedGraph{ + nodes: make(map[int64]graph.Node), + edges: make(map[int64]map[int64]graph.Edge), + + nodeIDs: uid.NewSet(), + } +} + +// AddNode adds n to the graph. It panics if the added node ID matches an existing node ID. +func (g *UndirectedGraph) AddNode(n graph.Node) { + if _, exists := g.nodes[n.ID()]; exists { + panic(fmt.Sprintf("simple: node ID collision: %d", n.ID())) + } + g.nodes[n.ID()] = n + g.edges[n.ID()] = make(map[int64]graph.Edge) + g.nodeIDs.Use(n.ID()) +} + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *UndirectedGraph) Edge(uid, vid int64) graph.Edge { + return g.EdgeBetween(uid, vid) +} + +// EdgeBetween returns the edge between nodes x and y. +func (g *UndirectedGraph) EdgeBetween(xid, yid int64) graph.Edge { + edge, ok := g.edges[xid][yid] + if !ok { + return nil + } + if edge.From().ID() == xid { + return edge + } + return edge.ReversedEdge() +} + +// Edges returns all the edges in the graph. +func (g *UndirectedGraph) Edges() graph.Edges { + if len(g.edges) == 0 { + return graph.Empty + } + var edges []graph.Edge + seen := make(map[[2]int64]struct{}) + for _, u := range g.edges { + for _, e := range u { + uid := e.From().ID() + vid := e.To().ID() + if _, ok := seen[[2]int64{uid, vid}]; ok { + continue + } + seen[[2]int64{uid, vid}] = struct{}{} + seen[[2]int64{vid, uid}] = struct{}{} + edges = append(edges, e) + } + } + if len(edges) == 0 { + return graph.Empty + } + return iterator.NewOrderedEdges(edges) +} + +// From returns all nodes in g that can be reached directly from n. +func (g *UndirectedGraph) From(id int64) graph.Nodes { + if _, ok := g.nodes[id]; !ok { + return graph.Empty + } + + nodes := make([]graph.Node, len(g.edges[id])) + i := 0 + for from := range g.edges[id] { + nodes[i] = g.nodes[from] + i++ + } + if len(nodes) == 0 { + return graph.Empty + } + return iterator.NewOrderedNodes(nodes) +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y. +func (g *UndirectedGraph) HasEdgeBetween(xid, yid int64) bool { + _, ok := g.edges[xid][yid] + return ok +} + +// NewEdge returns a new Edge from the source to the destination node. +func (g *UndirectedGraph) NewEdge(from, to graph.Node) graph.Edge { + return &Edge{F: from, T: to} +} + +// NewNode returns a new unique Node to be added to g. The Node's ID does +// not become valid in g until the Node is added to g. +func (g *UndirectedGraph) NewNode() graph.Node { + if len(g.nodes) == 0 { + return Node(0) + } + if int64(len(g.nodes)) == uid.Max { + panic("simple: cannot allocate node: no slot") + } + return Node(g.nodeIDs.NewID()) +} + +// Node returns the node with the given ID if it exists in the graph, +// and nil otherwise. +func (g *UndirectedGraph) Node(id int64) graph.Node { + return g.nodes[id] +} + +// Nodes returns all the nodes in the graph. +func (g *UndirectedGraph) Nodes() graph.Nodes { + if len(g.nodes) == 0 { + return graph.Empty + } + nodes := make([]graph.Node, len(g.nodes)) + i := 0 + for _, n := range g.nodes { + nodes[i] = n + i++ + } + return iterator.NewOrderedNodes(nodes) +} + +// RemoveEdge removes the edge with the given end IDs from the graph, leaving the terminal nodes. +// If the edge does not exist it is a no-op. +func (g *UndirectedGraph) RemoveEdge(fid, tid int64) { + if _, ok := g.nodes[fid]; !ok { + return + } + if _, ok := g.nodes[tid]; !ok { + return + } + + delete(g.edges[fid], tid) + delete(g.edges[tid], fid) +} + +// RemoveNode removes the node with the given ID from the graph, as well as any edges attached +// to it. If the node is not in the graph it is a no-op. +func (g *UndirectedGraph) RemoveNode(id int64) { + if _, ok := g.nodes[id]; !ok { + return + } + delete(g.nodes, id) + + for from := range g.edges[id] { + delete(g.edges[from], id) + } + delete(g.edges, id) + + g.nodeIDs.Release(id) +} + +// SetEdge adds e, an edge from one node to another. If the nodes do not exist, they are added +// and are set to the nodes of the edge otherwise. +// It will panic if the IDs of the e.From and e.To are equal. +func (g *UndirectedGraph) SetEdge(e graph.Edge) { + var ( + from = e.From() + fid = from.ID() + to = e.To() + tid = to.ID() + ) + + if fid == tid { + panic("simple: adding self edge") + } + + if _, ok := g.nodes[fid]; !ok { + g.AddNode(from) + } else { + g.nodes[fid] = from + } + if _, ok := g.nodes[tid]; !ok { + g.AddNode(to) + } else { + g.nodes[tid] = to + } + + g.edges[fid][tid] = e + g.edges[tid][fid] = e +} diff --git a/vendor/gonum.org/v1/gonum/graph/simple/weighted_directed.go b/vendor/gonum.org/v1/gonum/graph/simple/weighted_directed.go new file mode 100644 index 0000000000..92bd2842fd --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/simple/weighted_directed.go @@ -0,0 +1,279 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "fmt" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/uid" + "gonum.org/v1/gonum/graph/iterator" +) + +var ( + wdg *WeightedDirectedGraph + + _ graph.Graph = wdg + _ graph.Weighted = wdg + _ graph.Directed = wdg + _ graph.WeightedDirected = wdg + _ graph.NodeAdder = wdg + _ graph.NodeRemover = wdg + _ graph.WeightedEdgeAdder = wdg + _ graph.EdgeRemover = wdg +) + +// WeightedDirectedGraph implements a generalized weighted directed graph. +type WeightedDirectedGraph struct { + nodes map[int64]graph.Node + from map[int64]map[int64]graph.WeightedEdge + to map[int64]map[int64]graph.WeightedEdge + + self, absent float64 + + nodeIDs uid.Set +} + +// NewWeightedDirectedGraph returns a WeightedDirectedGraph with the specified self and absent +// edge weight values. +func NewWeightedDirectedGraph(self, absent float64) *WeightedDirectedGraph { + return &WeightedDirectedGraph{ + nodes: make(map[int64]graph.Node), + from: make(map[int64]map[int64]graph.WeightedEdge), + to: make(map[int64]map[int64]graph.WeightedEdge), + + self: self, + absent: absent, + + nodeIDs: uid.NewSet(), + } +} + +// AddNode adds n to the graph. It panics if the added node ID matches an existing node ID. +func (g *WeightedDirectedGraph) AddNode(n graph.Node) { + if _, exists := g.nodes[n.ID()]; exists { + panic(fmt.Sprintf("simple: node ID collision: %d", n.ID())) + } + g.nodes[n.ID()] = n + g.from[n.ID()] = make(map[int64]graph.WeightedEdge) + g.to[n.ID()] = make(map[int64]graph.WeightedEdge) + g.nodeIDs.Use(n.ID()) +} + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *WeightedDirectedGraph) Edge(uid, vid int64) graph.Edge { + return g.WeightedEdge(uid, vid) +} + +// Edges returns all the edges in the graph. +func (g *WeightedDirectedGraph) Edges() graph.Edges { + var edges []graph.Edge + for _, u := range g.nodes { + for _, e := range g.from[u.ID()] { + edges = append(edges, e) + } + } + if len(edges) == 0 { + return graph.Empty + } + return iterator.NewOrderedEdges(edges) +} + +// From returns all nodes in g that can be reached directly from n. +func (g *WeightedDirectedGraph) From(id int64) graph.Nodes { + if _, ok := g.from[id]; !ok { + return graph.Empty + } + + from := make([]graph.Node, len(g.from[id])) + i := 0 + for vid := range g.from[id] { + from[i] = g.nodes[vid] + i++ + } + if len(from) == 0 { + return graph.Empty + } + return iterator.NewOrderedNodes(from) +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y without +// considering direction. +func (g *WeightedDirectedGraph) HasEdgeBetween(xid, yid int64) bool { + if _, ok := g.from[xid][yid]; ok { + return true + } + _, ok := g.from[yid][xid] + return ok +} + +// HasEdgeFromTo returns whether an edge exists in the graph from u to v. +func (g *WeightedDirectedGraph) HasEdgeFromTo(uid, vid int64) bool { + if _, ok := g.from[uid][vid]; !ok { + return false + } + return true +} + +// NewNode returns a new unique Node to be added to g. The Node's ID does +// not become valid in g until the Node is added to g. +func (g *WeightedDirectedGraph) NewNode() graph.Node { + if len(g.nodes) == 0 { + return Node(0) + } + if int64(len(g.nodes)) == uid.Max { + panic("simple: cannot allocate node: no slot") + } + return Node(g.nodeIDs.NewID()) +} + +// NewWeightedEdge returns a new weighted edge from the source to the destination node. +func (g *WeightedDirectedGraph) NewWeightedEdge(from, to graph.Node, weight float64) graph.WeightedEdge { + return &WeightedEdge{F: from, T: to, W: weight} +} + +// Node returns the node with the given ID if it exists in the graph, +// and nil otherwise. +func (g *WeightedDirectedGraph) Node(id int64) graph.Node { + return g.nodes[id] +} + +// Nodes returns all the nodes in the graph. +func (g *WeightedDirectedGraph) Nodes() graph.Nodes { + if len(g.from) == 0 { + return graph.Empty + } + nodes := make([]graph.Node, len(g.nodes)) + i := 0 + for _, n := range g.nodes { + nodes[i] = n + i++ + } + return iterator.NewOrderedNodes(nodes) +} + +// RemoveEdge removes the edge with the given end point IDs from the graph, leaving the terminal +// nodes. If the edge does not exist it is a no-op. +func (g *WeightedDirectedGraph) RemoveEdge(fid, tid int64) { + if _, ok := g.nodes[fid]; !ok { + return + } + if _, ok := g.nodes[tid]; !ok { + return + } + + delete(g.from[fid], tid) + delete(g.to[tid], fid) +} + +// RemoveNode removes the node with the given ID from the graph, as well as any edges attached +// to it. If the node is not in the graph it is a no-op. +func (g *WeightedDirectedGraph) RemoveNode(id int64) { + if _, ok := g.nodes[id]; !ok { + return + } + delete(g.nodes, id) + + for from := range g.from[id] { + delete(g.to[from], id) + } + delete(g.from, id) + + for to := range g.to[id] { + delete(g.from[to], id) + } + delete(g.to, id) + + g.nodeIDs.Release(id) +} + +// SetWeightedEdge adds a weighted edge from one node to another. If the nodes do not exist, they are added +// and are set to the nodes of the edge otherwise. +// It will panic if the IDs of the e.From and e.To are equal. +func (g *WeightedDirectedGraph) SetWeightedEdge(e graph.WeightedEdge) { + var ( + from = e.From() + fid = from.ID() + to = e.To() + tid = to.ID() + ) + + if fid == tid { + panic("simple: adding self edge") + } + + if _, ok := g.nodes[fid]; !ok { + g.AddNode(from) + } else { + g.nodes[fid] = from + } + if _, ok := g.nodes[tid]; !ok { + g.AddNode(to) + } else { + g.nodes[tid] = to + } + + g.from[fid][tid] = e + g.to[tid][fid] = e +} + +// To returns all nodes in g that can reach directly to n. +func (g *WeightedDirectedGraph) To(id int64) graph.Nodes { + if _, ok := g.from[id]; !ok { + return graph.Empty + } + + to := make([]graph.Node, len(g.to[id])) + i := 0 + for uid := range g.to[id] { + to[i] = g.nodes[uid] + i++ + } + if len(to) == 0 { + return graph.Empty + } + return iterator.NewOrderedNodes(to) +} + +// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge. +// If x and y are the same node or there is no joining edge between the two nodes the weight +// value returned is either the graph's absent or self value. Weight returns true if an edge +// exists between x and y or if x and y have the same ID, false otherwise. +func (g *WeightedDirectedGraph) Weight(xid, yid int64) (w float64, ok bool) { + if xid == yid { + return g.self, true + } + if to, ok := g.from[xid]; ok { + if e, ok := to[yid]; ok { + return e.Weight(), true + } + } + return g.absent, false +} + +// WeightedEdge returns the weighted edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *WeightedDirectedGraph) WeightedEdge(uid, vid int64) graph.WeightedEdge { + edge, ok := g.from[uid][vid] + if !ok { + return nil + } + return edge +} + +// WeightedEdges returns all the weighted edges in the graph. +func (g *WeightedDirectedGraph) WeightedEdges() graph.WeightedEdges { + var edges []graph.WeightedEdge + for _, u := range g.nodes { + for _, e := range g.from[u.ID()] { + edges = append(edges, e) + } + } + if len(edges) == 0 { + return graph.Empty + } + return iterator.NewOrderedWeightedEdges(edges) +} diff --git a/vendor/gonum.org/v1/gonum/graph/simple/weighted_undirected.go b/vendor/gonum.org/v1/gonum/graph/simple/weighted_undirected.go new file mode 100644 index 0000000000..5932576832 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/simple/weighted_undirected.go @@ -0,0 +1,273 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package simple + +import ( + "fmt" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/uid" + "gonum.org/v1/gonum/graph/iterator" +) + +var ( + wug *WeightedUndirectedGraph + + _ graph.Graph = wug + _ graph.Weighted = wug + _ graph.Undirected = wug + _ graph.WeightedUndirected = wug + _ graph.NodeAdder = wug + _ graph.NodeRemover = wug + _ graph.WeightedEdgeAdder = wug + _ graph.EdgeRemover = wug +) + +// WeightedUndirectedGraph implements a generalized weighted undirected graph. +type WeightedUndirectedGraph struct { + nodes map[int64]graph.Node + edges map[int64]map[int64]graph.WeightedEdge + + self, absent float64 + + nodeIDs uid.Set +} + +// NewWeightedUndirectedGraph returns an WeightedUndirectedGraph with the specified self and absent +// edge weight values. +func NewWeightedUndirectedGraph(self, absent float64) *WeightedUndirectedGraph { + return &WeightedUndirectedGraph{ + nodes: make(map[int64]graph.Node), + edges: make(map[int64]map[int64]graph.WeightedEdge), + + self: self, + absent: absent, + + nodeIDs: uid.NewSet(), + } +} + +// AddNode adds n to the graph. It panics if the added node ID matches an existing node ID. +func (g *WeightedUndirectedGraph) AddNode(n graph.Node) { + if _, exists := g.nodes[n.ID()]; exists { + panic(fmt.Sprintf("simple: node ID collision: %d", n.ID())) + } + g.nodes[n.ID()] = n + g.edges[n.ID()] = make(map[int64]graph.WeightedEdge) + g.nodeIDs.Use(n.ID()) +} + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *WeightedUndirectedGraph) Edge(uid, vid int64) graph.Edge { + return g.WeightedEdgeBetween(uid, vid) +} + +// EdgeBetween returns the edge between nodes x and y. +func (g *WeightedUndirectedGraph) EdgeBetween(xid, yid int64) graph.Edge { + return g.WeightedEdgeBetween(xid, yid) +} + +// Edges returns all the edges in the graph. +func (g *WeightedUndirectedGraph) Edges() graph.Edges { + if len(g.edges) == 0 { + return graph.Empty + } + var edges []graph.Edge + seen := make(map[[2]int64]struct{}) + for _, u := range g.edges { + for _, e := range u { + uid := e.From().ID() + vid := e.To().ID() + if _, ok := seen[[2]int64{uid, vid}]; ok { + continue + } + seen[[2]int64{uid, vid}] = struct{}{} + seen[[2]int64{vid, uid}] = struct{}{} + edges = append(edges, e) + } + } + if len(edges) == 0 { + return graph.Empty + } + return iterator.NewOrderedEdges(edges) +} + +// From returns all nodes in g that can be reached directly from n. +func (g *WeightedUndirectedGraph) From(id int64) graph.Nodes { + if _, ok := g.nodes[id]; !ok { + return graph.Empty + } + + nodes := make([]graph.Node, len(g.edges[id])) + i := 0 + for from := range g.edges[id] { + nodes[i] = g.nodes[from] + i++ + } + if len(nodes) == 0 { + return graph.Empty + } + return iterator.NewOrderedNodes(nodes) +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y. +func (g *WeightedUndirectedGraph) HasEdgeBetween(xid, yid int64) bool { + _, ok := g.edges[xid][yid] + return ok +} + +// NewNode returns a new unique Node to be added to g. The Node's ID does +// not become valid in g until the Node is added to g. +func (g *WeightedUndirectedGraph) NewNode() graph.Node { + if len(g.nodes) == 0 { + return Node(0) + } + if int64(len(g.nodes)) == uid.Max { + panic("simple: cannot allocate node: no slot") + } + return Node(g.nodeIDs.NewID()) +} + +// NewWeightedEdge returns a new weighted edge from the source to the destination node. +func (g *WeightedUndirectedGraph) NewWeightedEdge(from, to graph.Node, weight float64) graph.WeightedEdge { + return &WeightedEdge{F: from, T: to, W: weight} +} + +// Node returns the node with the given ID if it exists in the graph, +// and nil otherwise. +func (g *WeightedUndirectedGraph) Node(id int64) graph.Node { + return g.nodes[id] +} + +// Nodes returns all the nodes in the graph. +func (g *WeightedUndirectedGraph) Nodes() graph.Nodes { + if len(g.nodes) == 0 { + return graph.Empty + } + nodes := make([]graph.Node, len(g.nodes)) + i := 0 + for _, n := range g.nodes { + nodes[i] = n + i++ + } + return iterator.NewOrderedNodes(nodes) +} + +// RemoveEdge removes the edge with the given end point IDs from the graph, leaving the terminal +// nodes. If the edge does not exist it is a no-op. +func (g *WeightedUndirectedGraph) RemoveEdge(fid, tid int64) { + if _, ok := g.nodes[fid]; !ok { + return + } + if _, ok := g.nodes[tid]; !ok { + return + } + + delete(g.edges[fid], tid) + delete(g.edges[tid], fid) +} + +// RemoveNode removes the node with the given ID from the graph, as well as any edges attached +// to it. If the node is not in the graph it is a no-op. +func (g *WeightedUndirectedGraph) RemoveNode(id int64) { + if _, ok := g.nodes[id]; !ok { + return + } + delete(g.nodes, id) + + for from := range g.edges[id] { + delete(g.edges[from], id) + } + delete(g.edges, id) + + g.nodeIDs.Release(id) +} + +// SetWeightedEdge adds a weighted edge from one node to another. If the nodes do not exist, they are added +// and are set to the nodes of the edge otherwise. +// It will panic if the IDs of the e.From and e.To are equal. +func (g *WeightedUndirectedGraph) SetWeightedEdge(e graph.WeightedEdge) { + var ( + from = e.From() + fid = from.ID() + to = e.To() + tid = to.ID() + ) + + if fid == tid { + panic("simple: adding self edge") + } + + if _, ok := g.nodes[fid]; !ok { + g.AddNode(from) + } else { + g.nodes[fid] = from + } + if _, ok := g.nodes[tid]; !ok { + g.AddNode(to) + } else { + g.nodes[tid] = to + } + + g.edges[fid][tid] = e + g.edges[tid][fid] = e +} + +// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge. +// If x and y are the same node or there is no joining edge between the two nodes the weight +// value returned is either the graph's absent or self value. Weight returns true if an edge +// exists between x and y or if x and y have the same ID, false otherwise. +func (g *WeightedUndirectedGraph) Weight(xid, yid int64) (w float64, ok bool) { + if xid == yid { + return g.self, true + } + if n, ok := g.edges[xid]; ok { + if e, ok := n[yid]; ok { + return e.Weight(), true + } + } + return g.absent, false +} + +// WeightedEdge returns the weighted edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +func (g *WeightedUndirectedGraph) WeightedEdge(uid, vid int64) graph.WeightedEdge { + return g.WeightedEdgeBetween(uid, vid) +} + +// WeightedEdgeBetween returns the weighted edge between nodes x and y. +func (g *WeightedUndirectedGraph) WeightedEdgeBetween(xid, yid int64) graph.WeightedEdge { + edge, ok := g.edges[xid][yid] + if !ok { + return nil + } + if edge.From().ID() == xid { + return edge + } + return edge.ReversedEdge().(graph.WeightedEdge) +} + +// WeightedEdges returns all the weighted edges in the graph. +func (g *WeightedUndirectedGraph) WeightedEdges() graph.WeightedEdges { + var edges []graph.WeightedEdge + seen := make(map[[2]int64]struct{}) + for _, u := range g.edges { + for _, e := range u { + uid := e.From().ID() + vid := e.To().ID() + if _, ok := seen[[2]int64{uid, vid}]; ok { + continue + } + seen[[2]int64{uid, vid}] = struct{}{} + seen[[2]int64{vid, uid}] = struct{}{} + edges = append(edges, e) + } + } + if len(edges) == 0 { + return graph.Empty + } + return iterator.NewOrderedWeightedEdges(edges) +} diff --git a/vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go b/vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go new file mode 100644 index 0000000000..83fdb5bdf8 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go @@ -0,0 +1,250 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package topo + +import ( + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/ordered" + "gonum.org/v1/gonum/graph/internal/set" +) + +// DegeneracyOrdering returns the degeneracy ordering and the k-cores of +// the undirected graph g. +func DegeneracyOrdering(g graph.Undirected) (order []graph.Node, cores [][]graph.Node) { + order, offsets := degeneracyOrdering(g) + + ordered.Reverse(order) + cores = make([][]graph.Node, len(offsets)) + offset := len(order) + for i, n := range offsets { + cores[i] = order[offset-n : offset] + offset -= n + } + return order, cores +} + +// KCore returns the k-core of the undirected graph g with nodes in an +// optimal ordering for the coloring number. +func KCore(k int, g graph.Undirected) []graph.Node { + order, offsets := degeneracyOrdering(g) + + var offset int + for _, n := range offsets[:k] { + offset += n + } + core := make([]graph.Node, len(order)-offset) + copy(core, order[offset:]) + return core +} + +// degeneracyOrdering is the common code for DegeneracyOrdering and KCore. It +// returns l, the nodes of g in optimal ordering for coloring number and +// s, a set of relative offsets into l for each k-core, where k is an index +// into s. +func degeneracyOrdering(g graph.Undirected) (l []graph.Node, s []int) { + nodes := graph.NodesOf(g.Nodes()) + + // The algorithm used here is essentially as described at + // http://en.wikipedia.org/w/index.php?title=Degeneracy_%28graph_theory%29&oldid=640308710 + + // Initialize an output list L in return parameters. + + // Compute a number d_v for each vertex v in G, + // the number of neighbors of v that are not already in L. + // Initially, these numbers are just the degrees of the vertices. + dv := make(map[int64]int, len(nodes)) + var ( + maxDegree int + neighbours = make(map[int64][]graph.Node) + ) + for _, n := range nodes { + id := n.ID() + adj := graph.NodesOf(g.From(id)) + neighbours[id] = adj + dv[id] = len(adj) + if len(adj) > maxDegree { + maxDegree = len(adj) + } + } + + // Initialize an array D such that D[i] contains a list of the + // vertices v that are not already in L for which d_v = i. + d := make([][]graph.Node, maxDegree+1) + for _, n := range nodes { + deg := dv[n.ID()] + d[deg] = append(d[deg], n) + } + + // Initialize k to 0. + k := 0 + // Repeat n times: + s = []int{0} + for range nodes { + // Scan the array cells D[0], D[1], ... until + // finding an i for which D[i] is nonempty. + var ( + i int + di []graph.Node + ) + for i, di = range d { + if len(di) != 0 { + break + } + } + + // Set k to max(k,i). + if i > k { + k = i + s = append(s, make([]int, k-len(s)+1)...) + } + + // Select a vertex v from D[i]. Add v to the + // beginning of L and remove it from D[i]. + var v graph.Node + v, d[i] = di[len(di)-1], di[:len(di)-1] + l = append(l, v) + s[k]++ + delete(dv, v.ID()) + + // For each neighbor w of v not already in L, + // subtract one from d_w and move w to the + // cell of D corresponding to the new value of d_w. + for _, w := range neighbours[v.ID()] { + dw, ok := dv[w.ID()] + if !ok { + continue + } + for i, n := range d[dw] { + if n.ID() == w.ID() { + d[dw][i], d[dw] = d[dw][len(d[dw])-1], d[dw][:len(d[dw])-1] + dw-- + d[dw] = append(d[dw], w) + break + } + } + dv[w.ID()] = dw + } + } + + return l, s +} + +// BronKerbosch returns the set of maximal cliques of the undirected graph g. +func BronKerbosch(g graph.Undirected) [][]graph.Node { + nodes := graph.NodesOf(g.Nodes()) + + // The algorithm used here is essentially BronKerbosch3 as described at + // http://en.wikipedia.org/w/index.php?title=Bron%E2%80%93Kerbosch_algorithm&oldid=656805858 + + p := set.NewNodesSize(len(nodes)) + for _, n := range nodes { + p.Add(n) + } + x := set.NewNodes() + var bk bronKerbosch + order, _ := degeneracyOrdering(g) + ordered.Reverse(order) + for _, v := range order { + neighbours := graph.NodesOf(g.From(v.ID())) + nv := set.NewNodesSize(len(neighbours)) + for _, n := range neighbours { + nv.Add(n) + } + bk.maximalCliquePivot(g, []graph.Node{v}, set.IntersectionOfNodes(p, nv), set.IntersectionOfNodes(x, nv)) + p.Remove(v) + x.Add(v) + } + return bk +} + +type bronKerbosch [][]graph.Node + +func (bk *bronKerbosch) maximalCliquePivot(g graph.Undirected, r []graph.Node, p, x set.Nodes) { + if len(p) == 0 && len(x) == 0 { + *bk = append(*bk, r) + return + } + + neighbours := bk.choosePivotFrom(g, p, x) + nu := set.NewNodesSize(len(neighbours)) + for _, n := range neighbours { + nu.Add(n) + } + for _, v := range p { + if nu.Has(v) { + continue + } + vid := v.ID() + neighbours := graph.NodesOf(g.From(vid)) + nv := set.NewNodesSize(len(neighbours)) + for _, n := range neighbours { + nv.Add(n) + } + + var found bool + for _, n := range r { + if n.ID() == vid { + found = true + break + } + } + var sr []graph.Node + if !found { + sr = append(r[:len(r):len(r)], v) + } + + bk.maximalCliquePivot(g, sr, set.IntersectionOfNodes(p, nv), set.IntersectionOfNodes(x, nv)) + p.Remove(v) + x.Add(v) + } +} + +func (*bronKerbosch) choosePivotFrom(g graph.Undirected, p, x set.Nodes) (neighbors []graph.Node) { + // TODO(kortschak): Investigate the impact of pivot choice that maximises + // |p ⋂ neighbours(u)| as a function of input size. Until then, leave as + // compile time option. + if !tomitaTanakaTakahashi { + for _, n := range p { + return graph.NodesOf(g.From(n.ID())) + } + for _, n := range x { + return graph.NodesOf(g.From(n.ID())) + } + panic("bronKerbosch: empty set") + } + + var ( + max = -1 + pivot graph.Node + ) + maxNeighbors := func(s set.Nodes) { + outer: + for _, u := range s { + nb := graph.NodesOf(g.From(u.ID())) + c := len(nb) + if c <= max { + continue + } + for n := range nb { + if _, ok := p[int64(n)]; ok { + continue + } + c-- + if c <= max { + continue outer + } + } + max = c + pivot = u + neighbors = nb + } + } + maxNeighbors(p) + maxNeighbors(x) + if pivot == nil { + panic("bronKerbosch: empty set") + } + return neighbors +} diff --git a/vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go b/vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go new file mode 100644 index 0000000000..28f1b96ee7 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go @@ -0,0 +1,111 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package topo + +import ( + "sort" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/ordered" + "gonum.org/v1/gonum/graph/internal/set" +) + +// Builder is a pure topological graph construction type. +type Builder interface { + AddNode(graph.Node) + SetEdge(graph.Edge) +} + +// CliqueGraph builds the clique graph of g in dst using Clique and CliqueGraphEdge +// nodes and edges. The nodes returned by calls to Nodes on the nodes and edges of +// the constructed graph are the cliques and the common nodes between cliques +// respectively. The dst graph is not cleared. +func CliqueGraph(dst Builder, g graph.Undirected) { + cliques := BronKerbosch(g) + + // Construct a consistent view of cliques in g. Sorting costs + // us a little, but not as much as the cliques themselves. + for _, c := range cliques { + sort.Sort(ordered.ByID(c)) + } + sort.Sort(ordered.BySliceIDs(cliques)) + + cliqueNodes := make(cliqueNodeSets, len(cliques)) + for id, c := range cliques { + s := set.NewNodesSize(len(c)) + for _, n := range c { + s.Add(n) + } + ns := &nodeSet{Clique: Clique{id: int64(id), nodes: c}, nodes: s} + dst.AddNode(ns.Clique) + for _, n := range c { + nid := n.ID() + cliqueNodes[nid] = append(cliqueNodes[nid], ns) + } + } + + for _, cliques := range cliqueNodes { + for i, uc := range cliques { + for _, vc := range cliques[i+1:] { + // Retain the nodes that contribute to the + // edge between the cliques. + var edgeNodes []graph.Node + switch 1 { + case len(uc.Clique.nodes): + edgeNodes = []graph.Node{uc.Clique.nodes[0]} + case len(vc.Clique.nodes): + edgeNodes = []graph.Node{vc.Clique.nodes[0]} + default: + for _, n := range set.IntersectionOfNodes(uc.nodes, vc.nodes) { + edgeNodes = append(edgeNodes, n) + } + sort.Sort(ordered.ByID(edgeNodes)) + } + + dst.SetEdge(CliqueGraphEdge{from: uc.Clique, to: vc.Clique, nodes: edgeNodes}) + } + } + } +} + +type cliqueNodeSets map[int64][]*nodeSet + +type nodeSet struct { + Clique + nodes set.Nodes +} + +// Clique is a node in a clique graph. +type Clique struct { + id int64 + nodes []graph.Node +} + +// ID returns the node ID. +func (n Clique) ID() int64 { return n.id } + +// Nodes returns the nodes in the clique. +func (n Clique) Nodes() []graph.Node { return n.nodes } + +// CliqueGraphEdge is an edge in a clique graph. +type CliqueGraphEdge struct { + from, to Clique + nodes []graph.Node +} + +// From returns the from node of the edge. +func (e CliqueGraphEdge) From() graph.Node { return e.from } + +// To returns the to node of the edge. +func (e CliqueGraphEdge) To() graph.Node { return e.to } + +// ReversedEdge returns a new CliqueGraphEdge with +// the edge end points swapped. The nodes of the +// new edge are shared with the receiver. +func (e CliqueGraphEdge) ReversedEdge() graph.Edge { e.from, e.to = e.to, e.from; return e } + +// Nodes returns the common nodes in the cliques of the underlying graph +// corresponding to the from and to nodes in the clique graph. +func (e CliqueGraphEdge) Nodes() []graph.Node { return e.nodes } diff --git a/vendor/gonum.org/v1/gonum/graph/topo/doc.go b/vendor/gonum.org/v1/gonum/graph/topo/doc.go new file mode 100644 index 0000000000..cbcdff1e70 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/topo/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package topo provides graph topology analysis functions. +package topo // import "gonum.org/v1/gonum/graph/topo" diff --git a/vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go b/vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go new file mode 100644 index 0000000000..8a78ba2f39 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go @@ -0,0 +1,285 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package topo + +import ( + "sort" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/ordered" + "gonum.org/v1/gonum/graph/internal/set" + "gonum.org/v1/gonum/graph/iterator" +) + +// johnson implements Johnson's "Finding all the elementary +// circuits of a directed graph" algorithm. SIAM J. Comput. 4(1):1975. +// +// Comments in the johnson methods are kept in sync with the comments +// and labels from the paper. +type johnson struct { + adjacent johnsonGraph // SCC adjacency list. + b []set.Ints // Johnson's "B-list". + blocked []bool + s int + + stack []graph.Node + + result [][]graph.Node +} + +// DirectedCyclesIn returns the set of elementary cycles in the graph g. +func DirectedCyclesIn(g graph.Directed) [][]graph.Node { + jg := johnsonGraphFrom(g) + j := johnson{ + adjacent: jg, + b: make([]set.Ints, len(jg.orig)), + blocked: make([]bool, len(jg.orig)), + } + + // len(j.nodes) is the order of g. + for j.s < len(j.adjacent.orig)-1 { + // We use the previous SCC adjacency to reduce the work needed. + sccs := TarjanSCC(j.adjacent.subgraph(j.s)) + // A_k = adjacency structure of strong component K with least + // vertex in subgraph of G induced by {s, s+1, ... ,n}. + j.adjacent = j.adjacent.sccSubGraph(sccs, 2) // Only allow SCCs with >= 2 vertices. + if j.adjacent.order() == 0 { + break + } + + // s = least vertex in V_k + if s := j.adjacent.leastVertexIndex(); s < j.s { + j.s = s + } + for i, v := range j.adjacent.orig { + if !j.adjacent.nodes.Has(v.ID()) { + continue + } + if len(j.adjacent.succ[v.ID()]) > 0 { + j.blocked[i] = false + j.b[i] = make(set.Ints) + } + } + //L3: + _ = j.circuit(j.s) + j.s++ + } + + return j.result +} + +// circuit is the CIRCUIT sub-procedure in the paper. +func (j *johnson) circuit(v int) bool { + f := false + n := j.adjacent.orig[v] + j.stack = append(j.stack, n) + j.blocked[v] = true + + //L1: + for w := range j.adjacent.succ[n.ID()] { + w := j.adjacent.indexOf(w) + if w == j.s { + // Output circuit composed of stack followed by s. + r := make([]graph.Node, len(j.stack)+1) + copy(r, j.stack) + r[len(r)-1] = j.adjacent.orig[j.s] + j.result = append(j.result, r) + f = true + } else if !j.blocked[w] { + if j.circuit(w) { + f = true + } + } + } + + //L2: + if f { + j.unblock(v) + } else { + for w := range j.adjacent.succ[n.ID()] { + j.b[j.adjacent.indexOf(w)].Add(v) + } + } + j.stack = j.stack[:len(j.stack)-1] + + return f +} + +// unblock is the UNBLOCK sub-procedure in the paper. +func (j *johnson) unblock(u int) { + j.blocked[u] = false + for w := range j.b[u] { + j.b[u].Remove(w) + if j.blocked[w] { + j.unblock(w) + } + } +} + +// johnsonGraph is an edge list representation of a graph with helpers +// necessary for Johnson's algorithm +type johnsonGraph struct { + // Keep the original graph nodes and a + // look-up to into the non-sparse + // collection of potentially sparse IDs. + orig []graph.Node + index map[int64]int + + nodes set.Int64s + succ map[int64]set.Int64s +} + +// johnsonGraphFrom returns a deep copy of the graph g. +func johnsonGraphFrom(g graph.Directed) johnsonGraph { + nodes := graph.NodesOf(g.Nodes()) + sort.Sort(ordered.ByID(nodes)) + c := johnsonGraph{ + orig: nodes, + index: make(map[int64]int, len(nodes)), + + nodes: make(set.Int64s, len(nodes)), + succ: make(map[int64]set.Int64s), + } + for i, u := range nodes { + uid := u.ID() + c.index[uid] = i + for _, v := range graph.NodesOf(g.From(uid)) { + if c.succ[uid] == nil { + c.succ[uid] = make(set.Int64s) + c.nodes.Add(uid) + } + c.nodes.Add(v.ID()) + c.succ[uid].Add(v.ID()) + } + } + return c +} + +// order returns the order of the graph. +func (g johnsonGraph) order() int { return g.nodes.Count() } + +// indexOf returns the index of the retained node for the given node ID. +func (g johnsonGraph) indexOf(id int64) int { + return g.index[id] +} + +// leastVertexIndex returns the index into orig of the least vertex. +func (g johnsonGraph) leastVertexIndex() int { + for _, v := range g.orig { + if g.nodes.Has(v.ID()) { + return g.indexOf(v.ID()) + } + } + panic("johnsonCycles: empty set") +} + +// subgraph returns a subgraph of g induced by {s, s+1, ... , n}. The +// subgraph is destructively generated in g. +func (g johnsonGraph) subgraph(s int) johnsonGraph { + sn := g.orig[s].ID() + for u, e := range g.succ { + if u < sn { + g.nodes.Remove(u) + delete(g.succ, u) + continue + } + for v := range e { + if v < sn { + g.succ[u].Remove(v) + } + } + } + return g +} + +// sccSubGraph returns the graph of the tarjan's strongly connected +// components with each SCC containing at least min vertices. +// sccSubGraph returns nil if there is no SCC with at least min +// members. +func (g johnsonGraph) sccSubGraph(sccs [][]graph.Node, min int) johnsonGraph { + if len(g.nodes) == 0 { + g.nodes = nil + g.succ = nil + return g + } + sub := johnsonGraph{ + orig: g.orig, + index: g.index, + nodes: make(set.Int64s), + succ: make(map[int64]set.Int64s), + } + + var n int + for _, scc := range sccs { + if len(scc) < min { + continue + } + n++ + for _, u := range scc { + for _, v := range scc { + if _, ok := g.succ[u.ID()][v.ID()]; ok { + if sub.succ[u.ID()] == nil { + sub.succ[u.ID()] = make(set.Int64s) + sub.nodes.Add(u.ID()) + } + sub.nodes.Add(v.ID()) + sub.succ[u.ID()].Add(v.ID()) + } + } + } + } + if n == 0 { + g.nodes = nil + g.succ = nil + return g + } + + return sub +} + +// Nodes is required to satisfy Tarjan. +func (g johnsonGraph) Nodes() graph.Nodes { + n := make([]graph.Node, 0, len(g.nodes)) + for id := range g.nodes { + n = append(n, johnsonGraphNode(id)) + } + return iterator.NewOrderedNodes(n) +} + +// Successors is required to satisfy Tarjan. +func (g johnsonGraph) From(id int64) graph.Nodes { + adj := g.succ[id] + if len(adj) == 0 { + return graph.Empty + } + succ := make([]graph.Node, 0, len(adj)) + for id := range adj { + succ = append(succ, johnsonGraphNode(id)) + } + return iterator.NewOrderedNodes(succ) +} + +func (johnsonGraph) Has(int64) bool { + panic("topo: unintended use of johnsonGraph") +} +func (johnsonGraph) Node(int64) graph.Node { + panic("topo: unintended use of johnsonGraph") +} +func (johnsonGraph) HasEdgeBetween(_, _ int64) bool { + panic("topo: unintended use of johnsonGraph") +} +func (johnsonGraph) Edge(_, _ int64) graph.Edge { + panic("topo: unintended use of johnsonGraph") +} +func (johnsonGraph) HasEdgeFromTo(_, _ int64) bool { + panic("topo: unintended use of johnsonGraph") +} +func (johnsonGraph) To(int64) graph.Nodes { + panic("topo: unintended use of johnsonGraph") +} + +type johnsonGraphNode int64 + +func (n johnsonGraphNode) ID() int64 { return int64(n) } diff --git a/vendor/gonum.org/v1/gonum/graph/topo/non_tomita_choice.go b/vendor/gonum.org/v1/gonum/graph/topo/non_tomita_choice.go new file mode 100644 index 0000000000..36171d6fed --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/topo/non_tomita_choice.go @@ -0,0 +1,9 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !tomita + +package topo + +const tomitaTanakaTakahashi = false diff --git a/vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go b/vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go new file mode 100644 index 0000000000..44b362a6fd --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go @@ -0,0 +1,83 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package topo + +import ( + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/linear" + "gonum.org/v1/gonum/graph/internal/set" +) + +// UndirectedCyclesIn returns a set of cycles that forms a cycle basis in the graph g. +// Any cycle in g can be constructed as a symmetric difference of its elements. +func UndirectedCyclesIn(g graph.Undirected) [][]graph.Node { + // From "An algorithm for finding a fundamental set of cycles of a graph" + // https://doi.org/10.1145/363219.363232 + + var cycles [][]graph.Node + done := make(set.Int64s) + var tree linear.NodeStack + nodes := g.Nodes() + for nodes.Next() { + n := nodes.Node() + id := n.ID() + if done.Has(id) { + continue + } + done.Add(id) + + tree = tree[:0] + tree.Push(n) + from := sets{id: set.Int64s{}} + to := map[int64]graph.Node{id: n} + + for tree.Len() != 0 { + u := tree.Pop() + uid := u.ID() + adj := from[uid] + for _, v := range graph.NodesOf(g.From(uid)) { + vid := v.ID() + switch { + case uid == vid: + cycles = append(cycles, []graph.Node{u}) + case !from.has(vid): + done.Add(vid) + to[vid] = u + tree.Push(v) + from.add(uid, vid) + case !adj.Has(vid): + c := []graph.Node{v, u} + adj := from[vid] + p := to[uid] + for !adj.Has(p.ID()) { + c = append(c, p) + p = to[p.ID()] + } + c = append(c, p, c[0]) + cycles = append(cycles, c) + adj.Add(uid) + } + } + } + } + + return cycles +} + +type sets map[int64]set.Int64s + +func (s sets) add(uid, vid int64) { + e, ok := s[vid] + if !ok { + e = make(set.Int64s) + s[vid] = e + } + e.Add(uid) +} + +func (s sets) has(uid int64) bool { + _, ok := s[uid] + return ok +} diff --git a/vendor/gonum.org/v1/gonum/graph/topo/tarjan.go b/vendor/gonum.org/v1/gonum/graph/topo/tarjan.go new file mode 100644 index 0000000000..6471292758 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/topo/tarjan.go @@ -0,0 +1,199 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package topo + +import ( + "fmt" + "sort" + + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/ordered" + "gonum.org/v1/gonum/graph/internal/set" +) + +// Unorderable is an error containing sets of unorderable graph.Nodes. +type Unorderable [][]graph.Node + +// Error satisfies the error interface. +func (e Unorderable) Error() string { + const maxNodes = 10 + var n int + for _, c := range e { + n += len(c) + } + if n > maxNodes { + // Don't return errors that are too long. + return fmt.Sprintf("topo: no topological ordering: %d nodes in %d cyclic components", n, len(e)) + } + return fmt.Sprintf("topo: no topological ordering: cyclic components: %v", [][]graph.Node(e)) +} + +func lexical(nodes []graph.Node) { sort.Sort(ordered.ByID(nodes)) } + +// Sort performs a topological sort of the directed graph g returning the 'from' to 'to' +// sort order. If a topological ordering is not possible, an Unorderable error is returned +// listing cyclic components in g with each cyclic component's members sorted by ID. When +// an Unorderable error is returned, each cyclic component's topological position within +// the sorted nodes is marked with a nil graph.Node. +func Sort(g graph.Directed) (sorted []graph.Node, err error) { + sccs := TarjanSCC(g) + return sortedFrom(sccs, lexical) +} + +// SortStabilized performs a topological sort of the directed graph g returning the 'from' +// to 'to' sort order, or the order defined by the in place order sort function where there +// is no unambiguous topological ordering. If a topological ordering is not possible, an +// Unorderable error is returned listing cyclic components in g with each cyclic component's +// members sorted by the provided order function. If order is nil, nodes are ordered lexically +// by node ID. When an Unorderable error is returned, each cyclic component's topological +// position within the sorted nodes is marked with a nil graph.Node. +func SortStabilized(g graph.Directed, order func([]graph.Node)) (sorted []graph.Node, err error) { + if order == nil { + order = lexical + } + sccs := tarjanSCCstabilized(g, order) + return sortedFrom(sccs, order) +} + +func sortedFrom(sccs [][]graph.Node, order func([]graph.Node)) ([]graph.Node, error) { + sorted := make([]graph.Node, 0, len(sccs)) + var sc Unorderable + for _, s := range sccs { + if len(s) != 1 { + order(s) + sc = append(sc, s) + sorted = append(sorted, nil) + continue + } + sorted = append(sorted, s[0]) + } + var err error + if sc != nil { + for i, j := 0, len(sc)-1; i < j; i, j = i+1, j-1 { + sc[i], sc[j] = sc[j], sc[i] + } + err = sc + } + ordered.Reverse(sorted) + return sorted, err +} + +// TarjanSCC returns the strongly connected components of the graph g using Tarjan's algorithm. +// +// A strongly connected component of a graph is a set of vertices where it's possible to reach any +// vertex in the set from any other (meaning there's a cycle between them.) +// +// Generally speaking, a directed graph where the number of strongly connected components is equal +// to the number of nodes is acyclic, unless you count reflexive edges as a cycle (which requires +// only a little extra testing.) +// +func TarjanSCC(g graph.Directed) [][]graph.Node { + return tarjanSCCstabilized(g, nil) +} + +func tarjanSCCstabilized(g graph.Directed, order func([]graph.Node)) [][]graph.Node { + nodes := graph.NodesOf(g.Nodes()) + var succ func(id int64) []graph.Node + if order == nil { + succ = func(id int64) []graph.Node { + return graph.NodesOf(g.From(id)) + } + } else { + order(nodes) + ordered.Reverse(nodes) + + succ = func(id int64) []graph.Node { + to := graph.NodesOf(g.From(id)) + order(to) + ordered.Reverse(to) + return to + } + } + + t := tarjan{ + succ: succ, + + indexTable: make(map[int64]int, len(nodes)), + lowLink: make(map[int64]int, len(nodes)), + onStack: make(set.Int64s), + } + for _, v := range nodes { + if t.indexTable[v.ID()] == 0 { + t.strongconnect(v) + } + } + return t.sccs +} + +// tarjan implements Tarjan's strongly connected component finding +// algorithm. The implementation is from the pseudocode at +// +// http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm?oldid=642744644 +// +type tarjan struct { + succ func(id int64) []graph.Node + + index int + indexTable map[int64]int + lowLink map[int64]int + onStack set.Int64s + + stack []graph.Node + + sccs [][]graph.Node +} + +// strongconnect is the strongconnect function described in the +// wikipedia article. +func (t *tarjan) strongconnect(v graph.Node) { + vID := v.ID() + + // Set the depth index for v to the smallest unused index. + t.index++ + t.indexTable[vID] = t.index + t.lowLink[vID] = t.index + t.stack = append(t.stack, v) + t.onStack.Add(vID) + + // Consider successors of v. + for _, w := range t.succ(vID) { + wID := w.ID() + if t.indexTable[wID] == 0 { + // Successor w has not yet been visited; recur on it. + t.strongconnect(w) + t.lowLink[vID] = min(t.lowLink[vID], t.lowLink[wID]) + } else if t.onStack.Has(wID) { + // Successor w is in stack s and hence in the current SCC. + t.lowLink[vID] = min(t.lowLink[vID], t.indexTable[wID]) + } + } + + // If v is a root node, pop the stack and generate an SCC. + if t.lowLink[vID] == t.indexTable[vID] { + // Start a new strongly connected component. + var ( + scc []graph.Node + w graph.Node + ) + for { + w, t.stack = t.stack[len(t.stack)-1], t.stack[:len(t.stack)-1] + t.onStack.Remove(w.ID()) + // Add w to current strongly connected component. + scc = append(scc, w) + if w.ID() == vID { + break + } + } + // Output the current strongly connected component. + t.sccs = append(t.sccs, scc) + } +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/gonum.org/v1/gonum/graph/topo/tomita_choice.go b/vendor/gonum.org/v1/gonum/graph/topo/tomita_choice.go new file mode 100644 index 0000000000..f85a0d6c0f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/topo/tomita_choice.go @@ -0,0 +1,9 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build tomita + +package topo + +const tomitaTanakaTakahashi = true diff --git a/vendor/gonum.org/v1/gonum/graph/topo/topo.go b/vendor/gonum.org/v1/gonum/graph/topo/topo.go new file mode 100644 index 0000000000..bece61a6ca --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/topo/topo.go @@ -0,0 +1,68 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package topo + +import ( + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/traverse" +) + +// IsPathIn returns whether path is a path in g. +// +// As special cases, IsPathIn returns true for a zero length path or for +// a path of length 1 when the node in path exists in the graph. +func IsPathIn(g graph.Graph, path []graph.Node) bool { + switch len(path) { + case 0: + return true + case 1: + return g.Node(path[0].ID()) != nil + default: + var canReach func(uid, vid int64) bool + switch g := g.(type) { + case graph.Directed: + canReach = g.HasEdgeFromTo + default: + canReach = g.HasEdgeBetween + } + + for i, u := range path[:len(path)-1] { + if !canReach(u.ID(), path[i+1].ID()) { + return false + } + } + return true + } +} + +// PathExistsIn returns whether there is a path in g starting at from extending +// to to. +// +// PathExistsIn exists as a helper function. If many tests for path existence +// are being performed, other approaches will be more efficient. +func PathExistsIn(g graph.Graph, from, to graph.Node) bool { + var t traverse.BreadthFirst + return t.Walk(g, from, func(n graph.Node, _ int) bool { return n.ID() == to.ID() }) != nil +} + +// ConnectedComponents returns the connected components of the undirected graph g. +func ConnectedComponents(g graph.Undirected) [][]graph.Node { + var ( + w traverse.DepthFirst + c []graph.Node + cc [][]graph.Node + ) + during := func(n graph.Node) { + c = append(c, n) + } + after := func() { + cc = append(cc, []graph.Node(nil)) + cc[len(cc)-1] = append(cc[len(cc)-1], c...) + c = c[:0] + } + w.WalkAll(g, nil, after, during) + + return cc +} diff --git a/vendor/gonum.org/v1/gonum/graph/traverse/doc.go b/vendor/gonum.org/v1/gonum/graph/traverse/doc.go new file mode 100644 index 0000000000..dc98bbf437 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/traverse/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package traverse provides basic graph traversal primitives. +package traverse // import "gonum.org/v1/gonum/graph/traverse" diff --git a/vendor/gonum.org/v1/gonum/graph/traverse/traverse.go b/vendor/gonum.org/v1/gonum/graph/traverse/traverse.go new file mode 100644 index 0000000000..125b16114c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/traverse/traverse.go @@ -0,0 +1,231 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package traverse + +import ( + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/internal/linear" + "gonum.org/v1/gonum/graph/internal/set" +) + +var _ Graph = graph.Graph(nil) + +// Graph is the subset of graph.Graph necessary for graph traversal. +type Graph interface { + // From returns all nodes that can be reached directly + // from the node with the given ID. + From(id int64) graph.Nodes + + // Edge returns the edge from u to v, with IDs uid and vid, + // if such an edge exists and nil otherwise. The node v + // must be directly reachable from u as defined by + // the From method. + Edge(uid, vid int64) graph.Edge +} + +// BreadthFirst implements stateful breadth-first graph traversal. +type BreadthFirst struct { + // Visit is called on all nodes on their first visit. + Visit func(graph.Node) + + // Traverse is called on all edges that may be traversed + // during the walk. This includes edges that would hop to + // an already visited node. + // + // The value returned by Traverse determines whether + // an edge can be traversed during the walk. + Traverse func(graph.Edge) bool + + queue linear.NodeQueue + visited set.Int64s +} + +// Walk performs a breadth-first traversal of the graph g starting from the given node, +// depending on the Traverse field and the until parameter if they are non-nil. +// The traversal follows edges for which Traverse(edge) is true and returns the first node +// for which until(node, depth) is true. During the traversal, if the Visit field is +// non-nil, it is called with each node the first time it is visited. +func (b *BreadthFirst) Walk(g Graph, from graph.Node, until func(n graph.Node, d int) bool) graph.Node { + if b.visited == nil { + b.visited = make(set.Int64s) + } + b.queue.Enqueue(from) + if b.Visit != nil && !b.visited.Has(from.ID()) { + b.Visit(from) + } + b.visited.Add(from.ID()) + + var ( + depth int + children int + untilNext = 1 + ) + for b.queue.Len() > 0 { + t := b.queue.Dequeue() + if until != nil && until(t, depth) { + return t + } + tid := t.ID() + to := g.From(tid) + for to.Next() { + n := to.Node() + nid := n.ID() + if b.Traverse != nil && !b.Traverse(g.Edge(tid, nid)) { + continue + } + if b.visited.Has(nid) { + continue + } + if b.Visit != nil { + b.Visit(n) + } + b.visited.Add(nid) + children++ + b.queue.Enqueue(n) + } + if untilNext--; untilNext == 0 { + depth++ + untilNext = children + children = 0 + } + } + + return nil +} + +// WalkAll calls Walk for each unvisited node of the graph g using edges independent +// of their direction. The functions before and after are called prior to commencing +// and after completing each walk if they are non-nil respectively. The function +// during is called on each node as it is traversed. +func (b *BreadthFirst) WalkAll(g graph.Undirected, before, after func(), during func(graph.Node)) { + b.Reset() + nodes := g.Nodes() + for nodes.Next() { + from := nodes.Node() + if b.Visited(from) { + continue + } + if before != nil { + before() + } + b.Walk(g, from, func(n graph.Node, _ int) bool { + if during != nil { + during(n) + } + return false + }) + if after != nil { + after() + } + } +} + +// Visited returned whether the node n was visited during a traverse. +func (b *BreadthFirst) Visited(n graph.Node) bool { + return b.visited.Has(n.ID()) +} + +// Reset resets the state of the traverser for reuse. +func (b *BreadthFirst) Reset() { + b.queue.Reset() + b.visited = nil +} + +// DepthFirst implements stateful depth-first graph traversal. +type DepthFirst struct { + // Visit is called on all nodes on their first visit. + Visit func(graph.Node) + + // Traverse is called on all edges that may be traversed + // during the walk. This includes edges that would hop to + // an already visited node. + // + // The value returned by Traverse determines whether an + // edge can be traversed during the walk. + Traverse func(graph.Edge) bool + + stack linear.NodeStack + visited set.Int64s +} + +// Walk performs a depth-first traversal of the graph g starting from the given node, +// depending on the Traverse field and the until parameter if they are non-nil. +// The traversal follows edges for which Traverse(edge) is true and returns the first node +// for which until(node) is true. During the traversal, if the Visit field is non-nil, it +// is called with each node the first time it is visited. +func (d *DepthFirst) Walk(g Graph, from graph.Node, until func(graph.Node) bool) graph.Node { + if d.visited == nil { + d.visited = make(set.Int64s) + } + d.stack.Push(from) + if d.Visit != nil && !d.visited.Has(from.ID()) { + d.Visit(from) + } + d.visited.Add(from.ID()) + + for d.stack.Len() > 0 { + t := d.stack.Pop() + if until != nil && until(t) { + return t + } + tid := t.ID() + to := g.From(tid) + for to.Next() { + n := to.Node() + nid := n.ID() + if d.Traverse != nil && !d.Traverse(g.Edge(tid, nid)) { + continue + } + if d.visited.Has(nid) { + continue + } + if d.Visit != nil { + d.Visit(n) + } + d.visited.Add(nid) + d.stack.Push(n) + } + } + + return nil +} + +// WalkAll calls Walk for each unvisited node of the graph g using edges independent +// of their direction. The functions before and after are called prior to commencing +// and after completing each walk if they are non-nil respectively. The function +// during is called on each node as it is traversed. +func (d *DepthFirst) WalkAll(g graph.Undirected, before, after func(), during func(graph.Node)) { + d.Reset() + nodes := g.Nodes() + for nodes.Next() { + from := nodes.Node() + if d.Visited(from) { + continue + } + if before != nil { + before() + } + d.Walk(g, from, func(n graph.Node) bool { + if during != nil { + during(n) + } + return false + }) + if after != nil { + after() + } + } +} + +// Visited returned whether the node n was visited during a traverse. +func (d *DepthFirst) Visited(n graph.Node) bool { + return d.visited.Has(n.ID()) +} + +// Reset resets the state of the traverser for reuse. +func (d *DepthFirst) Reset() { + d.stack = d.stack[:0] + d.visited = nil +} diff --git a/vendor/gonum.org/v1/gonum/graph/undirect.go b/vendor/gonum.org/v1/gonum/graph/undirect.go new file mode 100644 index 0000000000..07ce64a060 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/graph/undirect.go @@ -0,0 +1,270 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package graph + +// Undirect converts a directed graph to an undirected graph. +type Undirect struct { + G Directed +} + +var _ Undirected = Undirect{} + +// Node returns the node with the given ID if it exists in the graph, +// and nil otherwise. +func (g Undirect) Node(id int64) Node { return g.G.Node(id) } + +// Nodes returns all the nodes in the graph. +func (g Undirect) Nodes() Nodes { return g.G.Nodes() } + +// From returns all nodes in g that can be reached directly from u. +func (g Undirect) From(uid int64) Nodes { + return newNodeFilterIterator(g.G.From(uid), g.G.To(uid)) +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y. +func (g Undirect) HasEdgeBetween(xid, yid int64) bool { return g.G.HasEdgeBetween(xid, yid) } + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +// If an edge exists, the Edge returned is an EdgePair. The weight of +// the edge is determined by applying the Merge func to the weights of the +// edges between u and v. +func (g Undirect) Edge(uid, vid int64) Edge { return g.EdgeBetween(uid, vid) } + +// EdgeBetween returns the edge between nodes x and y. If an edge exists, the +// Edge returned is an EdgePair. The weight of the edge is determined by +// applying the Merge func to the weights of edges between x and y. +func (g Undirect) EdgeBetween(xid, yid int64) Edge { + fe := g.G.Edge(xid, yid) + re := g.G.Edge(yid, xid) + if fe == nil && re == nil { + return nil + } + + return EdgePair{fe, re} +} + +// UndirectWeighted converts a directed weighted graph to an undirected weighted graph, +// resolving edge weight conflicts. +type UndirectWeighted struct { + G WeightedDirected + + // Absent is the value used to + // represent absent edge weights + // passed to Merge if the reverse + // edge is present. + Absent float64 + + // Merge defines how discordant edge + // weights in G are resolved. A merge + // is performed if at least one edge + // exists between the nodes being + // considered. The edges corresponding + // to the two weights are also passed, + // in the same order. + // The order of weight parameters + // passed to Merge is not defined, so + // the function should be commutative. + // If Merge is nil, the arithmetic + // mean is used to merge weights. + Merge func(x, y float64, xe, ye Edge) float64 +} + +var ( + _ Undirected = UndirectWeighted{} + _ WeightedUndirected = UndirectWeighted{} +) + +// Node returns the node with the given ID if it exists in the graph, +// and nil otherwise. +func (g UndirectWeighted) Node(id int64) Node { return g.G.Node(id) } + +// Nodes returns all the nodes in the graph. +func (g UndirectWeighted) Nodes() Nodes { return g.G.Nodes() } + +// From returns all nodes in g that can be reached directly from u. +func (g UndirectWeighted) From(uid int64) Nodes { + return newNodeFilterIterator(g.G.From(uid), g.G.To(uid)) +} + +// HasEdgeBetween returns whether an edge exists between nodes x and y. +func (g UndirectWeighted) HasEdgeBetween(xid, yid int64) bool { return g.G.HasEdgeBetween(xid, yid) } + +// Edge returns the edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +// If an edge exists, the Edge returned is an EdgePair. The weight of +// the edge is determined by applying the Merge func to the weights of the +// edges between u and v. +func (g UndirectWeighted) Edge(uid, vid int64) Edge { return g.WeightedEdgeBetween(uid, vid) } + +// WeightedEdge returns the weighted edge from u to v if such an edge exists and nil otherwise. +// The node v must be directly reachable from u as defined by the From method. +// If an edge exists, the Edge returned is an EdgePair. The weight of +// the edge is determined by applying the Merge func to the weights of the +// edges between u and v. +func (g UndirectWeighted) WeightedEdge(uid, vid int64) WeightedEdge { + return g.WeightedEdgeBetween(uid, vid) +} + +// EdgeBetween returns the edge between nodes x and y. If an edge exists, the +// Edge returned is an EdgePair. The weight of the edge is determined by +// applying the Merge func to the weights of edges between x and y. +func (g UndirectWeighted) EdgeBetween(xid, yid int64) Edge { + return g.WeightedEdgeBetween(xid, yid) +} + +// WeightedEdgeBetween returns the weighted edge between nodes x and y. If an edge exists, the +// Edge returned is an EdgePair. The weight of the edge is determined by +// applying the Merge func to the weights of edges between x and y. +func (g UndirectWeighted) WeightedEdgeBetween(xid, yid int64) WeightedEdge { + fe := g.G.Edge(xid, yid) + re := g.G.Edge(yid, xid) + if fe == nil && re == nil { + return nil + } + + f, ok := g.G.Weight(xid, yid) + if !ok { + f = g.Absent + } + r, ok := g.G.Weight(yid, xid) + if !ok { + r = g.Absent + } + + var w float64 + if g.Merge == nil { + w = (f + r) / 2 + } else { + w = g.Merge(f, r, fe, re) + } + return WeightedEdgePair{EdgePair: [2]Edge{fe, re}, W: w} +} + +// Weight returns the weight for the edge between x and y if Edge(x, y) returns a non-nil Edge. +// If x and y are the same node the internal node weight is returned. If there is no joining +// edge between the two nodes the weight value returned is zero. Weight returns true if an edge +// exists between x and y or if x and y have the same ID, false otherwise. +func (g UndirectWeighted) Weight(xid, yid int64) (w float64, ok bool) { + fe := g.G.Edge(xid, yid) + re := g.G.Edge(yid, xid) + + f, fOk := g.G.Weight(xid, yid) + if !fOk { + f = g.Absent + } + r, rOK := g.G.Weight(yid, xid) + if !rOK { + r = g.Absent + } + ok = fOk || rOK + + if g.Merge == nil { + return (f + r) / 2, ok + } + return g.Merge(f, r, fe, re), ok +} + +// EdgePair is an opposed pair of directed edges. +type EdgePair [2]Edge + +// From returns the from node of the first non-nil edge, or nil. +func (e EdgePair) From() Node { + if e[0] != nil { + return e[0].From() + } else if e[1] != nil { + return e[1].From() + } + return nil +} + +// To returns the to node of the first non-nil edge, or nil. +func (e EdgePair) To() Node { + if e[0] != nil { + return e[0].To() + } else if e[1] != nil { + return e[1].To() + } + return nil +} + +// ReversedEdge returns a new Edge with the end point of the +// edges in the pair swapped. +func (e EdgePair) ReversedEdge() Edge { + if e[0] != nil { + e[0] = e[0].ReversedEdge() + } + if e[1] != nil { + e[1] = e[1].ReversedEdge() + } + return e +} + +// WeightedEdgePair is an opposed pair of directed edges. +type WeightedEdgePair struct { + EdgePair + W float64 +} + +// ReversedEdge returns a new Edge with the end point of the +// edges in the pair swapped. +func (e WeightedEdgePair) ReversedEdge() Edge { + e.EdgePair = e.EdgePair.ReversedEdge().(EdgePair) + return e +} + +// Weight returns the merged edge weights of the two edges. +func (e WeightedEdgePair) Weight() float64 { return e.W } + +// nodeFilterIterator combines two Nodes to produce a single stream of +// unique nodes. +type nodeFilterIterator struct { + a, b Nodes + + // unique indicates the node in b with the key ID is unique. + unique map[int64]bool +} + +func newNodeFilterIterator(a, b Nodes) *nodeFilterIterator { + n := nodeFilterIterator{a: a, b: b, unique: make(map[int64]bool)} + for n.b.Next() { + n.unique[n.b.Node().ID()] = true + } + n.b.Reset() + for n.a.Next() { + n.unique[n.a.Node().ID()] = false + } + n.a.Reset() + return &n +} + +func (n *nodeFilterIterator) Len() int { + return len(n.unique) +} + +func (n *nodeFilterIterator) Next() bool { + n.Len() + if n.a.Next() { + return true + } + for n.b.Next() { + if n.unique[n.b.Node().ID()] { + return true + } + } + return false +} + +func (n *nodeFilterIterator) Node() Node { + if n.a.Len() != 0 { + return n.a.Node() + } + return n.b.Node() +} + +func (n *nodeFilterIterator) Reset() { + n.a.Reset() + n.b.Reset() +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyinc_amd64.s new file mode 100644 index 0000000000..0a4c14c292 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyinc_amd64.s @@ -0,0 +1,134 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// MOVDDUP X2, X3 +#define MOVDDUP_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xDA +// MOVDDUP X4, X5 +#define MOVDDUP_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xEC +// MOVDDUP X6, X7 +#define MOVDDUP_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xFE +// MOVDDUP X8, X9 +#define MOVDDUP_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC8 + +// ADDSUBPD X2, X3 +#define ADDSUBPD_X2_X3 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA +// ADDSUBPD X4, X5 +#define ADDSUBPD_X4_X5 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC +// ADDSUBPD X6, X7 +#define ADDSUBPD_X6_X7 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE +// ADDSUBPD X8, X9 +#define ADDSUBPD_X8_X9 BYTE $0x66; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 + +// func AxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyInc(SB), NOSPLIT, $0 + MOVQ x_base+16(FP), SI // SI = &x + MOVQ y_base+40(FP), DI // DI = &y + MOVQ n+64(FP), CX // CX = n + CMPQ CX, $0 // if n==0 { return } + JE axpyi_end + MOVQ ix+88(FP), R8 // R8 = ix // Load the first index + SHLQ $4, R8 // R8 *= sizeof(complex128) + MOVQ iy+96(FP), R9 // R9 = iy + SHLQ $4, R9 // R9 *= sizeof(complex128) + LEAQ (SI)(R8*1), SI // SI = &(x[ix]) + LEAQ (DI)(R9*1), DI // DI = &(y[iy]) + MOVQ DI, DX // DX = DI // Separate Read/Write pointers + MOVQ incX+72(FP), R8 // R8 = incX + SHLQ $4, R8 // R8 *= sizeof(complex128) + MOVQ incY+80(FP), R9 // R9 = iy + SHLQ $4, R9 // R9 *= sizeof(complex128) + MOVUPS alpha+0(FP), X0 // X0 = { imag(a), real(a) } + MOVAPS X0, X1 + SHUFPD $0x1, X1, X1 // X1 = { real(a), imag(a) } + MOVAPS X0, X10 // Copy X0 and X1 for pipelining + MOVAPS X1, X11 + MOVQ CX, BX + ANDQ $3, CX // CX = n % 4 + SHRQ $2, BX // BX = floor( n / 4 ) + JZ axpyi_tail // if BX == 0 { goto axpyi_tail } + +axpyi_loop: // do { + MOVUPS (SI), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVUPS (SI)(R8*1), X4 + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + MOVUPS (SI), X6 + MOVUPS (SI)(R8*1), X8 + + // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_X2_X3 + MOVDDUP_X4_X5 + MOVDDUP_X6_X7 + MOVDDUP_X8_X9 + + // X_i = { imag(x[i]), imag(x[i]) } + SHUFPD $0x3, X2, X2 + SHUFPD $0x3, X4, X4 + SHUFPD $0x3, X6, X6 + SHUFPD $0x3, X8, X8 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X1, X2 + MULPD X0, X3 + MULPD X11, X4 + MULPD X10, X5 + MULPD X1, X6 + MULPD X0, X7 + MULPD X11, X8 + MULPD X10, X9 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DX), X3 + ADDPD (DX)(R9*1), X5 + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + ADDPD (DX), X7 + ADDPD (DX)(R9*1), X9 + MOVUPS X3, (DI) // dst[i] = X_(i+1) + MOVUPS X5, (DI)(R9*1) + LEAQ (DI)(R9*2), DI + MOVUPS X7, (DI) + MOVUPS X9, (DI)(R9*1) + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + LEAQ (DI)(R9*2), DI // DI = &(DI[incY*2]) + DECQ BX + JNZ axpyi_loop // } while --BX > 0 + CMPQ CX, $0 // if CX == 0 { return } + JE axpyi_end + +axpyi_tail: // do { + MOVUPS (SI), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } + SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD X1, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X0, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DI), X3 + MOVUPS X3, (DI) // y[i] = X_i + ADDQ R8, SI // SI = &(SI[incX]) + ADDQ R9, DI // DI = &(DI[incY]) + LOOP axpyi_tail // } while --CX > 0 + +axpyi_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyincto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyincto_amd64.s new file mode 100644 index 0000000000..cb57f4bed3 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyincto_amd64.s @@ -0,0 +1,141 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// MOVDDUP X2, X3 +#define MOVDDUP_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xDA +// MOVDDUP X4, X5 +#define MOVDDUP_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xEC +// MOVDDUP X6, X7 +#define MOVDDUP_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xFE +// MOVDDUP X8, X9 +#define MOVDDUP_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC8 + +// ADDSUBPD X2, X3 +#define ADDSUBPD_X2_X3 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA +// ADDSUBPD X4, X5 +#define ADDSUBPD_X4_X5 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC +// ADDSUBPD X6, X7 +#define ADDSUBPD_X6_X7 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE +// ADDSUBPD X8, X9 +#define ADDSUBPD_X8_X9 BYTE $0x66; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 + +// func AxpyIncTo(dst []complex128, incDst, idst uintptr, alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyIncTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ x_base+56(FP), SI // SI = &x + MOVQ y_base+80(FP), DX // DX = &y + MOVQ n+104(FP), CX // CX = n + CMPQ CX, $0 // if n==0 { return } + JE axpyi_end + MOVQ ix+128(FP), R8 // R8 = ix // Load the first index + SHLQ $4, R8 // R8 *= sizeof(complex128) + MOVQ iy+136(FP), R9 // R9 = iy + SHLQ $4, R9 // R9 *= sizeof(complex128) + MOVQ idst+32(FP), R10 // R10 = idst + SHLQ $4, R10 // R10 *= sizeof(complex128) + LEAQ (SI)(R8*1), SI // SI = &(x[ix]) + LEAQ (DX)(R9*1), DX // DX = &(y[iy]) + LEAQ (DI)(R10*1), DI // DI = &(dst[idst]) + MOVQ incX+112(FP), R8 // R8 = incX + SHLQ $4, R8 // R8 *= sizeof(complex128) + MOVQ incY+120(FP), R9 // R9 = incY + SHLQ $4, R9 // R9 *= sizeof(complex128) + MOVQ incDst+24(FP), R10 // R10 = incDst + SHLQ $4, R10 // R10 *= sizeof(complex128) + MOVUPS alpha+40(FP), X0 // X0 = { imag(a), real(a) } + MOVAPS X0, X1 + SHUFPD $0x1, X1, X1 // X1 = { real(a), imag(a) } + MOVAPS X0, X10 // Copy X0 and X1 for pipelining + MOVAPS X1, X11 + MOVQ CX, BX + ANDQ $3, CX // CX = n % 4 + SHRQ $2, BX // BX = floor( n / 4 ) + JZ axpyi_tail // if BX == 0 { goto axpyi_tail } + +axpyi_loop: // do { + MOVUPS (SI), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVUPS (SI)(R8*1), X4 + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + + MOVUPS (SI), X6 + MOVUPS (SI)(R8*1), X8 + + // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_X2_X3 + MOVDDUP_X4_X5 + MOVDDUP_X6_X7 + MOVDDUP_X8_X9 + + // X_i = { imag(x[i]), imag(x[i]) } + SHUFPD $0x3, X2, X2 + SHUFPD $0x3, X4, X4 + SHUFPD $0x3, X6, X6 + SHUFPD $0x3, X8, X8 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X1, X2 + MULPD X0, X3 + MULPD X11, X4 + MULPD X10, X5 + MULPD X1, X6 + MULPD X0, X7 + MULPD X11, X8 + MULPD X10, X9 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DX), X3 + ADDPD (DX)(R9*1), X5 + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + ADDPD (DX), X7 + ADDPD (DX)(R9*1), X9 + MOVUPS X3, (DI) // dst[i] = X_(i+1) + MOVUPS X5, (DI)(R10*1) + LEAQ (DI)(R10*2), DI + MOVUPS X7, (DI) + MOVUPS X9, (DI)(R10*1) + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + LEAQ (DI)(R10*2), DI // DI = &(DI[incDst*2]) + DECQ BX + JNZ axpyi_loop // } while --BX > 0 + CMPQ CX, $0 // if CX == 0 { return } + JE axpyi_end + +axpyi_tail: // do { + MOVUPS (SI), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } + SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD X1, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X0, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DX), X3 + MOVUPS X3, (DI) // y[i] X_(i+1) + ADDQ R8, SI // SI += incX + ADDQ R9, DX // DX += incY + ADDQ R10, DI // DI += incDst + LOOP axpyi_tail // } while --CX > 0 + +axpyi_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitary_amd64.s new file mode 100644 index 0000000000..f1fddce71d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitary_amd64.s @@ -0,0 +1,122 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// MOVDDUP X2, X3 +#define MOVDDUP_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xDA +// MOVDDUP X4, X5 +#define MOVDDUP_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xEC +// MOVDDUP X6, X7 +#define MOVDDUP_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xFE +// MOVDDUP X8, X9 +#define MOVDDUP_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC8 + +// ADDSUBPD X2, X3 +#define ADDSUBPD_X2_X3 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA +// ADDSUBPD X4, X5 +#define ADDSUBPD_X4_X5 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC +// ADDSUBPD X6, X7 +#define ADDSUBPD_X6_X7 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE +// ADDSUBPD X8, X9 +#define ADDSUBPD_X8_X9 BYTE $0x66; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 + +// func AxpyUnitary(alpha complex128, x, y []complex128) +TEXT ·AxpyUnitary(SB), NOSPLIT, $0 + MOVQ x_base+16(FP), SI // SI = &x + MOVQ y_base+40(FP), DI // DI = &y + MOVQ x_len+24(FP), CX // CX = min( len(x), len(y) ) + CMPQ y_len+48(FP), CX + CMOVQLE y_len+48(FP), CX + CMPQ CX, $0 // if CX == 0 { return } + JE caxy_end + PXOR X0, X0 // Clear work registers and cache-align loop + PXOR X1, X1 + MOVUPS alpha+0(FP), X0 // X0 = { imag(a), real(a) } + MOVAPS X0, X1 + SHUFPD $0x1, X1, X1 // X1 = { real(a), imag(a) } + XORQ AX, AX // i = 0 + MOVAPS X0, X10 // Copy X0 and X1 for pipelining + MOVAPS X1, X11 + MOVQ CX, BX + ANDQ $3, CX // CX = n % 4 + SHRQ $2, BX // BX = floor( n / 4 ) + JZ caxy_tail // if BX == 0 { goto caxy_tail } + +caxy_loop: // do { + MOVUPS (SI)(AX*8), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVUPS 16(SI)(AX*8), X4 + MOVUPS 32(SI)(AX*8), X6 + MOVUPS 48(SI)(AX*8), X8 + + // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_X2_X3 + MOVDDUP_X4_X5 + MOVDDUP_X6_X7 + MOVDDUP_X8_X9 + + // X_i = { imag(x[i]), imag(x[i]) } + SHUFPD $0x3, X2, X2 + SHUFPD $0x3, X4, X4 + SHUFPD $0x3, X6, X6 + SHUFPD $0x3, X8, X8 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X1, X2 + MULPD X0, X3 + MULPD X11, X4 + MULPD X10, X5 + MULPD X1, X6 + MULPD X0, X7 + MULPD X11, X8 + MULPD X10, X9 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DI)(AX*8), X3 + ADDPD 16(DI)(AX*8), X5 + ADDPD 32(DI)(AX*8), X7 + ADDPD 48(DI)(AX*8), X9 + MOVUPS X3, (DI)(AX*8) // y[i] = X_(i+1) + MOVUPS X5, 16(DI)(AX*8) + MOVUPS X7, 32(DI)(AX*8) + MOVUPS X9, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + DECQ BX + JNZ caxy_loop // } while --BX > 0 + CMPQ CX, $0 // if CX == 0 { return } + JE caxy_end + +caxy_tail: // do { + MOVUPS (SI)(AX*8), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } + SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD X1, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X0, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DI)(AX*8), X3 + MOVUPS X3, (DI)(AX*8) // y[i] = X_(i+1) + ADDQ $2, AX // i += 2 + LOOP caxy_tail // } while --CX > 0 + +caxy_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitaryto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitaryto_amd64.s new file mode 100644 index 0000000000..b80015fda8 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/axpyunitaryto_amd64.s @@ -0,0 +1,123 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// MOVDDUP X2, X3 +#define MOVDDUP_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xDA +// MOVDDUP X4, X5 +#define MOVDDUP_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xEC +// MOVDDUP X6, X7 +#define MOVDDUP_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0x12; BYTE $0xFE +// MOVDDUP X8, X9 +#define MOVDDUP_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC8 + +// ADDSUBPD X2, X3 +#define ADDSUBPD_X2_X3 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA +// ADDSUBPD X4, X5 +#define ADDSUBPD_X4_X5 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC +// ADDSUBPD X6, X7 +#define ADDSUBPD_X6_X7 BYTE $0x66; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE +// ADDSUBPD X8, X9 +#define ADDSUBPD_X8_X9 BYTE $0x66; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 + +// func AxpyUnitaryTo(dst []complex128, alpha complex64, x, y []complex128) +TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ x_base+40(FP), SI // SI = &x + MOVQ y_base+64(FP), DX // DX = &y + MOVQ x_len+48(FP), CX // CX = min( len(x), len(y), len(dst) ) + CMPQ y_len+72(FP), CX + CMOVQLE y_len+72(FP), CX + CMPQ dst_len+8(FP), CX + CMOVQLE dst_len+8(FP), CX + CMPQ CX, $0 // if CX == 0 { return } + JE caxy_end + MOVUPS alpha+24(FP), X0 // X0 = { imag(a), real(a) } + MOVAPS X0, X1 + SHUFPD $0x1, X1, X1 // X1 = { real(a), imag(a) } + XORQ AX, AX // i = 0 + MOVAPS X0, X10 // Copy X0 and X1 for pipelining + MOVAPS X1, X11 + MOVQ CX, BX + ANDQ $3, CX // CX = n % 4 + SHRQ $2, BX // BX = floor( n / 4 ) + JZ caxy_tail // if BX == 0 { goto caxy_tail } + +caxy_loop: // do { + MOVUPS (SI)(AX*8), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVUPS 16(SI)(AX*8), X4 + MOVUPS 32(SI)(AX*8), X6 + MOVUPS 48(SI)(AX*8), X8 + + // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_X2_X3 // Load and duplicate imag elements (xi, xi) + MOVDDUP_X4_X5 + MOVDDUP_X6_X7 + MOVDDUP_X8_X9 + + // X_i = { imag(x[i]), imag(x[i]) } + SHUFPD $0x3, X2, X2 // duplicate real elements (xr, xr) + SHUFPD $0x3, X4, X4 + SHUFPD $0x3, X6, X6 + SHUFPD $0x3, X8, X8 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X1, X2 + MULPD X0, X3 + MULPD X11, X4 + MULPD X10, X5 + MULPD X1, X6 + MULPD X0, X7 + MULPD X11, X8 + MULPD X10, X9 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DX)(AX*8), X3 + ADDPD 16(DX)(AX*8), X5 + ADDPD 32(DX)(AX*8), X7 + ADDPD 48(DX)(AX*8), X9 + MOVUPS X3, (DI)(AX*8) // y[i] = X_(i+1) + MOVUPS X5, 16(DI)(AX*8) + MOVUPS X7, 32(DI)(AX*8) + MOVUPS X9, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + DECQ BX + JNZ caxy_loop // } while --BX > 0 + CMPQ CX, $0 // if CX == 0 { return } + JE caxy_end + +caxy_tail: // Same calculation, but read in values to avoid trampling memory + MOVUPS (SI)(AX*8), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } + SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD X1, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X0, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + + // X_(i+1) = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + ADDPD (DX)(AX*8), X3 + MOVUPS X3, (DI)(AX*8) // y[i] = X_(i+1) + ADDQ $2, AX // i += 2 + LOOP caxy_tail // } while --CX > 0 + +caxy_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/doc.go b/vendor/gonum.org/v1/gonum/internal/asm/c128/doc.go new file mode 100644 index 0000000000..8802ff138a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package c128 provides complex128 vector primitives. +package c128 // import "gonum.org/v1/gonum/internal/asm/c128" diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcinc_amd64.s new file mode 100644 index 0000000000..301d294fa4 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcinc_amd64.s @@ -0,0 +1,153 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVDDUP_XPTR__X3 LONG $0x1E120FF2 // MOVDDUP (SI), X3 +#define MOVDDUP_XPTR_INCX__X5 LONG $0x120F42F2; WORD $0x062C // MOVDDUP (SI)(R8*1), X5 +#define MOVDDUP_XPTR_INCX_2__X7 LONG $0x120F42F2; WORD $0x463C // MOVDDUP (SI)(R8*2), X7 +#define MOVDDUP_XPTR_INCx3X__X9 LONG $0x120F46F2; WORD $0x0E0C // MOVDDUP (SI)(R9*1), X9 + +#define MOVDDUP_8_XPTR__X2 LONG $0x56120FF2; BYTE $0x08 // MOVDDUP 8(SI), X2 +#define MOVDDUP_8_XPTR_INCX__X4 LONG $0x120F42F2; WORD $0x0664; BYTE $0x08 // MOVDDUP 8(SI)(R8*1), X4 +#define MOVDDUP_8_XPTR_INCX_2__X6 LONG $0x120F42F2; WORD $0x4674; BYTE $0x08 // MOVDDUP 8(SI)(R8*2), X6 +#define MOVDDUP_8_XPTR_INCx3X__X8 LONG $0x120F46F2; WORD $0x0E44; BYTE $0x08 // MOVDDUP 8(SI)(R9*1), X8 + +#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 +#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 +#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 +#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define SUM X0 +#define P_SUM X1 +#define INC_X R8 +#define INCx3_X R9 +#define INC_Y R10 +#define INCx3_Y R11 +#define NEG1 X15 +#define P_NEG1 X14 + +// func DotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) +TEXT ·DotcInc(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + MOVQ n+48(FP), LEN // LEN = n + PXOR SUM, SUM // SUM = 0 + CMPQ LEN, $0 // if LEN == 0 { return } + JE dot_end + PXOR P_SUM, P_SUM // P_SUM = 0 + MOVQ ix+72(FP), INC_X // INC_X = ix * sizeof(complex128) + SHLQ $4, INC_X + MOVQ iy+80(FP), INC_Y // INC_Y = iy * sizeof(complex128) + SHLQ $4, INC_Y + LEAQ (X_PTR)(INC_X*1), X_PTR // X_PTR = &(X_PTR[ix]) + LEAQ (Y_PTR)(INC_Y*1), Y_PTR // Y_PTR = &(Y_PTR[iy]) + MOVQ incX+56(FP), INC_X // INC_X = incX + SHLQ $4, INC_X // INC_X *= sizeof(complex128) + MOVQ incY+64(FP), INC_Y // INC_Y = incY + SHLQ $4, INC_Y // INC_Y *= sizeof(complex128) + MOVSD $(-1.0), NEG1 + SHUFPD $0, NEG1, NEG1 // { -1, -1 } + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = n % 4 + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ dot_tail // if n <= 4 { goto dot_tail } + MOVAPS NEG1, P_NEG1 // Copy NEG1 to P_NEG1 for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = 3 * incX * sizeof(complex128) + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = 3 * incY * sizeof(complex128) + +dot_loop: // do { + MOVDDUP_XPTR__X3 // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_XPTR_INCX__X5 + MOVDDUP_XPTR_INCX_2__X7 + MOVDDUP_XPTR_INCx3X__X9 + + MOVDDUP_8_XPTR__X2 // X_i = { imag(x[i]), imag(x[i]) } + MOVDDUP_8_XPTR_INCX__X4 + MOVDDUP_8_XPTR_INCX_2__X6 + MOVDDUP_8_XPTR_INCx3X__X8 + + // X_i = { -imag(x[i]), -imag(x[i]) } + MULPD NEG1, X2 + MULPD P_NEG1, X4 + MULPD NEG1, X6 + MULPD P_NEG1, X8 + + // X_j = { imag(y[i]), real(y[i]) } + MOVUPS (Y_PTR), X10 + MOVUPS (Y_PTR)(INC_Y*1), X11 + MOVUPS (Y_PTR)(INC_Y*2), X12 + MOVUPS (Y_PTR)(INCx3_Y*1), X13 + + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X10, X3 + MULPD X11, X5 + MULPD X12, X7 + MULPD X13, X9 + + // X_j = { real(y[i]), imag(y[i]) } + SHUFPD $0x1, X10, X10 + SHUFPD $0x1, X11, X11 + SHUFPD $0x1, X12, X12 + SHUFPD $0x1, X13, X13 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X10, X2 + MULPD X11, X4 + MULPD X12, X6 + MULPD X13, X8 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // psum += result[i] + ADDPD X3, SUM + ADDPD X5, P_SUM + ADDPD X7, SUM + ADDPD X9, P_SUM + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4]) + + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + ADDPD P_SUM, SUM // sum += psum + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail: // do { + MOVDDUP_XPTR__X3 // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_8_XPTR__X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD NEG1, X2 // X_i = { -imag(x[i]) , -imag(x[i]) } + MOVUPS (Y_PTR), X10 // X_j = { imag(y[i]) , real(y[i]) } + MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) } + MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDPD X3, SUM // sum += result[i] + ADDQ INC_X, X_PTR // X_PTR += incX + ADDQ INC_Y, Y_PTR // Y_PTR += incY + DECQ TAIL + JNZ dot_tail // } while --TAIL > 0 + +dot_end: + MOVUPS SUM, sum+88(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcunitary_amd64.s new file mode 100644 index 0000000000..1db7e156d7 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/dotcunitary_amd64.s @@ -0,0 +1,143 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVDDUP_XPTR_IDX_8__X3 LONG $0x1C120FF2; BYTE $0xC6 // MOVDDUP (SI)(AX*8), X3 +#define MOVDDUP_16_XPTR_IDX_8__X5 LONG $0x6C120FF2; WORD $0x10C6 // MOVDDUP 16(SI)(AX*8), X5 +#define MOVDDUP_32_XPTR_IDX_8__X7 LONG $0x7C120FF2; WORD $0x20C6 // MOVDDUP 32(SI)(AX*8), X7 +#define MOVDDUP_48_XPTR_IDX_8__X9 LONG $0x120F44F2; WORD $0xC64C; BYTE $0x30 // MOVDDUP 48(SI)(AX*8), X9 + +#define MOVDDUP_XPTR_IIDX_8__X2 LONG $0x14120FF2; BYTE $0xD6 // MOVDDUP (SI)(DX*8), X2 +#define MOVDDUP_16_XPTR_IIDX_8__X4 LONG $0x64120FF2; WORD $0x10D6 // MOVDDUP 16(SI)(DX*8), X4 +#define MOVDDUP_32_XPTR_IIDX_8__X6 LONG $0x74120FF2; WORD $0x20D6 // MOVDDUP 32(SI)(DX*8), X6 +#define MOVDDUP_48_XPTR_IIDX_8__X8 LONG $0x120F44F2; WORD $0xD644; BYTE $0x30 // MOVDDUP 48(SI)(DX*8), X8 + +#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 +#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 +#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 +#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define SUM X0 +#define P_SUM X1 +#define IDX AX +#define I_IDX DX +#define NEG1 X15 +#define P_NEG1 X14 + +// func DotcUnitary(x, y []complex128) (sum complex128) +TEXT ·DotcUnitary(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+32(FP), LEN + CMOVQLE y_len+32(FP), LEN + PXOR SUM, SUM // sum = 0 + CMPQ LEN, $0 // if LEN == 0 { return } + JE dot_end + XORPS P_SUM, P_SUM // psum = 0 + MOVSD $(-1.0), NEG1 + SHUFPD $0, NEG1, NEG1 // { -1, -1 } + XORQ IDX, IDX // i := 0 + MOVQ $1, I_IDX // j := 1 + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = floor( TAIL / 4 ) + SHRQ $2, LEN // LEN = TAIL % 4 + JZ dot_tail // if LEN == 0 { goto dot_tail } + + MOVAPS NEG1, P_NEG1 // Copy NEG1 to P_NEG1 for pipelining + +dot_loop: // do { + MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_16_XPTR_IDX_8__X5 + MOVDDUP_32_XPTR_IDX_8__X7 + MOVDDUP_48_XPTR_IDX_8__X9 + + MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]), imag(x[i]) } + MOVDDUP_16_XPTR_IIDX_8__X4 + MOVDDUP_32_XPTR_IIDX_8__X6 + MOVDDUP_48_XPTR_IIDX_8__X8 + + // X_i = { -imag(x[i]), -imag(x[i]) } + MULPD NEG1, X2 + MULPD P_NEG1, X4 + MULPD NEG1, X6 + MULPD P_NEG1, X8 + + // X_j = { imag(y[i]), real(y[i]) } + MOVUPS (Y_PTR)(IDX*8), X10 + MOVUPS 16(Y_PTR)(IDX*8), X11 + MOVUPS 32(Y_PTR)(IDX*8), X12 + MOVUPS 48(Y_PTR)(IDX*8), X13 + + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X10, X3 + MULPD X11, X5 + MULPD X12, X7 + MULPD X13, X9 + + // X_j = { real(y[i]), imag(y[i]) } + SHUFPD $0x1, X10, X10 + SHUFPD $0x1, X11, X11 + SHUFPD $0x1, X12, X12 + SHUFPD $0x1, X13, X13 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X10, X2 + MULPD X11, X4 + MULPD X12, X6 + MULPD X13, X8 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // psum += result[i] + ADDPD X3, SUM + ADDPD X5, P_SUM + ADDPD X7, SUM + ADDPD X9, P_SUM + + ADDQ $8, IDX // IDX += 8 + ADDQ $8, I_IDX // I_IDX += 8 + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + ADDPD P_SUM, SUM // sum += psum + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail: // do { + MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i]) , real(x[i]) } + MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]) , imag(x[i]) } + MULPD NEG1, X2 // X_i = { -imag(x[i]) , -imag(x[i]) } + MOVUPS (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]) , real(y[i]) } + MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) } + MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDPD X3, SUM // SUM += result[i] + ADDQ $2, IDX // IDX += 2 + ADDQ $2, I_IDX // I_IDX += 2 + DECQ TAIL + JNZ dot_tail // } while --TAIL > 0 + +dot_end: + MOVUPS SUM, sum+48(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuinc_amd64.s new file mode 100644 index 0000000000..386467fcbd --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuinc_amd64.s @@ -0,0 +1,141 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVDDUP_XPTR__X3 LONG $0x1E120FF2 // MOVDDUP (SI), X3 +#define MOVDDUP_XPTR_INCX__X5 LONG $0x120F42F2; WORD $0x062C // MOVDDUP (SI)(R8*1), X5 +#define MOVDDUP_XPTR_INCX_2__X7 LONG $0x120F42F2; WORD $0x463C // MOVDDUP (SI)(R8*2), X7 +#define MOVDDUP_XPTR_INCx3X__X9 LONG $0x120F46F2; WORD $0x0E0C // MOVDDUP (SI)(R9*1), X9 + +#define MOVDDUP_8_XPTR__X2 LONG $0x56120FF2; BYTE $0x08 // MOVDDUP 8(SI), X2 +#define MOVDDUP_8_XPTR_INCX__X4 LONG $0x120F42F2; WORD $0x0664; BYTE $0x08 // MOVDDUP 8(SI)(R8*1), X4 +#define MOVDDUP_8_XPTR_INCX_2__X6 LONG $0x120F42F2; WORD $0x4674; BYTE $0x08 // MOVDDUP 8(SI)(R8*2), X6 +#define MOVDDUP_8_XPTR_INCx3X__X8 LONG $0x120F46F2; WORD $0x0E44; BYTE $0x08 // MOVDDUP 8(SI)(R9*1), X8 + +#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 +#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 +#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 +#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define SUM X0 +#define P_SUM X1 +#define INC_X R8 +#define INCx3_X R9 +#define INC_Y R10 +#define INCx3_Y R11 + +// func DotuInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) +TEXT ·DotuInc(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + MOVQ n+48(FP), LEN // LEN = n + PXOR SUM, SUM // sum = 0 + CMPQ LEN, $0 // if LEN == 0 { return } + JE dot_end + MOVQ ix+72(FP), INC_X // INC_X = ix * sizeof(complex128) + SHLQ $4, INC_X + MOVQ iy+80(FP), INC_Y // INC_Y = iy * sizeof(complex128) + SHLQ $4, INC_Y + LEAQ (X_PTR)(INC_X*1), X_PTR // X_PTR = &(X_PTR[ix]) + LEAQ (Y_PTR)(INC_Y*1), Y_PTR // Y_PTR = &(Y_PTR[iy]) + MOVQ incX+56(FP), INC_X // INC_X = incX + SHLQ $4, INC_X // INC_X *= sizeof(complex128) + MOVQ incY+64(FP), INC_Y // INC_Y = incY + SHLQ $4, INC_Y // INC_Y *= sizeof(complex128) + MOVQ LEN, TAIL + ANDQ $3, TAIL // LEN = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ dot_tail // if LEN <= 4 { goto dot_tail } + PXOR P_SUM, P_SUM // psum = 0 + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = 3 * incX * sizeof(complex128) + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = 3 * incY * sizeof(complex128) + +dot_loop: // do { + MOVDDUP_XPTR__X3 // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_XPTR_INCX__X5 + MOVDDUP_XPTR_INCX_2__X7 + MOVDDUP_XPTR_INCx3X__X9 + + MOVDDUP_8_XPTR__X2 // X_i = { imag(x[i]), imag(x[i]) } + MOVDDUP_8_XPTR_INCX__X4 + MOVDDUP_8_XPTR_INCX_2__X6 + MOVDDUP_8_XPTR_INCx3X__X8 + + // X_j = { imag(y[i]), real(y[i]) } + MOVUPS (Y_PTR), X10 + MOVUPS (Y_PTR)(INC_Y*1), X11 + MOVUPS (Y_PTR)(INC_Y*2), X12 + MOVUPS (Y_PTR)(INCx3_Y*1), X13 + + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X10, X3 + MULPD X11, X5 + MULPD X12, X7 + MULPD X13, X9 + + // X_j = { real(y[i]), imag(y[i]) } + SHUFPD $0x1, X10, X10 + SHUFPD $0x1, X11, X11 + SHUFPD $0x1, X12, X12 + SHUFPD $0x1, X13, X13 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X10, X2 + MULPD X11, X4 + MULPD X12, X6 + MULPD X13, X8 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // psum += result[i] + ADDPD X3, SUM + ADDPD X5, P_SUM + ADDPD X7, SUM + ADDPD X9, P_SUM + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4]) + + DECQ LEN + JNZ dot_loop // } while --BX > 0 + ADDPD P_SUM, SUM // sum += psum + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail: // do { + MOVDDUP_XPTR__X3 // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_8_XPTR__X2 // X_i = { imag(x[i]), imag(x[i]) } + MOVUPS (Y_PTR), X10 // X_j = { imag(y[i]) , real(y[i]) } + MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) } + MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDPD X3, SUM // sum += result[i] + ADDQ INC_X, X_PTR // X_PTR += incX + ADDQ INC_Y, Y_PTR // Y_PTR += incY + DECQ TAIL // --TAIL + JNZ dot_tail // } while TAIL > 0 + +dot_end: + MOVUPS SUM, sum+88(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuunitary_amd64.s new file mode 100644 index 0000000000..d0d507cdcd --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/dotuunitary_amd64.s @@ -0,0 +1,130 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVDDUP_XPTR_IDX_8__X3 LONG $0x1C120FF2; BYTE $0xC6 // MOVDDUP (SI)(AX*8), X3 +#define MOVDDUP_16_XPTR_IDX_8__X5 LONG $0x6C120FF2; WORD $0x10C6 // MOVDDUP 16(SI)(AX*8), X5 +#define MOVDDUP_32_XPTR_IDX_8__X7 LONG $0x7C120FF2; WORD $0x20C6 // MOVDDUP 32(SI)(AX*8), X7 +#define MOVDDUP_48_XPTR_IDX_8__X9 LONG $0x120F44F2; WORD $0xC64C; BYTE $0x30 // MOVDDUP 48(SI)(AX*8), X9 + +#define MOVDDUP_XPTR_IIDX_8__X2 LONG $0x14120FF2; BYTE $0xD6 // MOVDDUP (SI)(DX*8), X2 +#define MOVDDUP_16_XPTR_IIDX_8__X4 LONG $0x64120FF2; WORD $0x10D6 // MOVDDUP 16(SI)(DX*8), X4 +#define MOVDDUP_32_XPTR_IIDX_8__X6 LONG $0x74120FF2; WORD $0x20D6 // MOVDDUP 32(SI)(DX*8), X6 +#define MOVDDUP_48_XPTR_IIDX_8__X8 LONG $0x120F44F2; WORD $0xD644; BYTE $0x30 // MOVDDUP 48(SI)(DX*8), X8 + +#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 +#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 +#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 +#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define SUM X0 +#define P_SUM X1 +#define IDX AX +#define I_IDX DX + +// func DotuUnitary(x, y []complex128) (sum complex128) +TEXT ·DotuUnitary(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+32(FP), LEN + CMOVQLE y_len+32(FP), LEN + PXOR SUM, SUM // SUM = 0 + CMPQ LEN, $0 // if LEN == 0 { return } + JE dot_end + PXOR P_SUM, P_SUM // P_SUM = 0 + XORQ IDX, IDX // IDX = 0 + MOVQ $1, DX // j = 1 + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = floor( LEN / 4 ) + SHRQ $2, LEN // LEN = LEN % 4 + JZ dot_tail // if LEN == 0 { goto dot_tail } + +dot_loop: // do { + MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_16_XPTR_IDX_8__X5 + MOVDDUP_32_XPTR_IDX_8__X7 + MOVDDUP_48_XPTR_IDX_8__X9 + + MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]), imag(x[i]) } + MOVDDUP_16_XPTR_IIDX_8__X4 + MOVDDUP_32_XPTR_IIDX_8__X6 + MOVDDUP_48_XPTR_IIDX_8__X8 + + // X_j = { imag(y[i]), real(y[i]) } + MOVUPS (Y_PTR)(IDX*8), X10 + MOVUPS 16(Y_PTR)(IDX*8), X11 + MOVUPS 32(Y_PTR)(IDX*8), X12 + MOVUPS 48(Y_PTR)(IDX*8), X13 + + // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPD X10, X3 + MULPD X11, X5 + MULPD X12, X7 + MULPD X13, X9 + + // X_j = { real(y[i]), imag(y[i]) } + SHUFPD $0x1, X10, X10 + SHUFPD $0x1, X11, X11 + SHUFPD $0x1, X12, X12 + SHUFPD $0x1, X13, X13 + + // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPD X10, X2 + MULPD X11, X4 + MULPD X12, X6 + MULPD X13, X8 + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + // psum += result[i] + ADDPD X3, SUM + ADDPD X5, P_SUM + ADDPD X7, SUM + ADDPD X9, P_SUM + + ADDQ $8, IDX // IDX += 8 + ADDQ $8, I_IDX // I_IDX += 8 + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + ADDPD P_SUM, SUM // SUM += P_SUM + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail: // do { + MOVDDUP_XPTR_IDX_8__X3 // X_(i+1) = { real(x[i] , real(x[i]) } + MOVDDUP_XPTR_IIDX_8__X2 // X_i = { imag(x[i]) , imag(x[i]) } + MOVUPS (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]) , real(y[i]) } + MULPD X10, X3 // X_(i+1) = { imag(a) * real(x[i]), real(a) * real(x[i]) } + SHUFPD $0x1, X10, X10 // X_j = { real(y[i]) , imag(y[i]) } + MULPD X10, X2 // X_i = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDPD X3, SUM // psum += result[i] + ADDQ $2, IDX // IDX += 2 + ADDQ $2, I_IDX // I_IDX += 2 + DECQ TAIL // --TAIL + JNZ dot_tail // } while TAIL > 0 + +dot_end: + MOVUPS SUM, sum+48(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalinc_amd64.s new file mode 100644 index 0000000000..40d5851a62 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalinc_amd64.s @@ -0,0 +1,69 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SRC SI +#define DST SI +#define LEN CX +#define TAIL BX +#define INC R9 +#define INC3 R10 +#define ALPHA X0 +#define ALPHA_2 X1 + +#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x0824 // MOVDDUP 8(SP), X0 + +// func DscalInc(alpha float64, x []complex128, n, inc uintptr) +TEXT ·DscalInc(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), SRC // SRC = &x + MOVQ n+32(FP), LEN // LEN = n + CMPQ LEN, $0 // if LEN == 0 { return } + JE dscal_end + + MOVDDUP_ALPHA // ALPHA = alpha + MOVQ inc+40(FP), INC // INC = inc + SHLQ $4, INC // INC = INC * sizeof(complex128) + LEAQ (INC)(INC*2), INC3 // INC3 = 3 * INC + MOVUPS ALPHA, ALPHA_2 // Copy ALPHA and ALPHA_2 for pipelining + MOVQ LEN, TAIL // TAIL = LEN + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ dscal_tail // if LEN == 0 { goto dscal_tail } + +dscal_loop: // do { + MOVUPS (SRC), X2 // X_i = x[i] + MOVUPS (SRC)(INC*1), X3 + MOVUPS (SRC)(INC*2), X4 + MOVUPS (SRC)(INC3*1), X5 + + MULPD ALPHA, X2 // X_i *= ALPHA + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + MOVUPS X2, (DST) // x[i] = X_i + MOVUPS X3, (DST)(INC*1) + MOVUPS X4, (DST)(INC*2) + MOVUPS X5, (DST)(INC3*1) + + LEAQ (SRC)(INC*4), SRC // SRC += INC*4 + DECQ LEN + JNZ dscal_loop // } while --LEN > 0 + +dscal_tail: + ANDQ $3, TAIL // TAIL = TAIL % 4 + JE dscal_end // if TAIL == 0 { return } + +dscal_tail_loop: // do { + MOVUPS (SRC), X2 // X_i = x[i] + MULPD ALPHA, X2 // X_i *= ALPHA + MOVUPS X2, (DST) // x[i] = X_i + ADDQ INC, SRC // SRC += INC + DECQ TAIL + JNZ dscal_tail_loop // } while --TAIL > 0 + +dscal_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalunitary_amd64.s new file mode 100644 index 0000000000..cbc0768aa0 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/dscalunitary_amd64.s @@ -0,0 +1,66 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SRC SI +#define DST SI +#define LEN CX +#define IDX AX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_2 X1 + +#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x0824 // MOVDDUP 8(SP), X0 + +// func DscalUnitary(alpha float64, x []complex128) +TEXT ·DscalUnitary(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), SRC // SRC = &x + MOVQ x_len+16(FP), LEN // LEN = len(x) + CMPQ LEN, $0 // if LEN == 0 { return } + JE dscal_end + + MOVDDUP_ALPHA // ALPHA = alpha + XORQ IDX, IDX // IDX = 0 + MOVUPS ALPHA, ALPHA_2 // Copy ALPHA to ALPHA_2 for pipelining + MOVQ LEN, TAIL // TAIL = LEN + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ dscal_tail // if LEN == 0 { goto dscal_tail } + +dscal_loop: // do { + MOVUPS (SRC)(IDX*8), X2 // X_i = x[i] + MOVUPS 16(SRC)(IDX*8), X3 + MOVUPS 32(SRC)(IDX*8), X4 + MOVUPS 48(SRC)(IDX*8), X5 + + MULPD ALPHA, X2 // X_i *= ALPHA + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + MOVUPS X2, (DST)(IDX*8) // x[i] = X_i + MOVUPS X3, 16(DST)(IDX*8) + MOVUPS X4, 32(DST)(IDX*8) + MOVUPS X5, 48(DST)(IDX*8) + + ADDQ $8, IDX // IDX += 8 + DECQ LEN + JNZ dscal_loop // } while --LEN > 0 + +dscal_tail: + ANDQ $3, TAIL // TAIL = TAIL % 4 + JZ dscal_end // if TAIL == 0 { return } + +dscal_tail_loop: // do { + MOVUPS (SRC)(IDX*8), X2 // X_i = x[i] + MULPD ALPHA, X2 // X_i *= ALPHA + MOVUPS X2, (DST)(IDX*8) // x[i] = X_i + ADDQ $2, IDX // IDX += 2 + DECQ TAIL + JNZ dscal_tail_loop // } while --TAIL > 0 + +dscal_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/scal.go b/vendor/gonum.org/v1/gonum/internal/asm/c128/scal.go new file mode 100644 index 0000000000..47a80e50c6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/scal.go @@ -0,0 +1,31 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package c128 + +// ScalUnitaryTo is +// for i, v := range x { +// dst[i] = alpha * v +// } +func ScalUnitaryTo(dst []complex128, alpha complex128, x []complex128) { + for i, v := range x { + dst[i] = alpha * v + } +} + +// ScalIncTo is +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } +func ScalIncTo(dst []complex128, incDst uintptr, alpha complex128, x []complex128, n, incX uintptr) { + var idst, ix uintptr + for i := 0; i < int(n); i++ { + dst[idst] = alpha * x[ix] + ix += incX + idst += incDst + } +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/scalUnitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/scalUnitary_amd64.s new file mode 100644 index 0000000000..7b807b3a45 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/scalUnitary_amd64.s @@ -0,0 +1,116 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SRC SI +#define DST SI +#define LEN CX +#define IDX AX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_C X1 +#define ALPHA2 X10 +#define ALPHA_C2 X11 + +#define MOVDDUP_X2_X3 LONG $0xDA120FF2 // MOVDDUP X2, X3 +#define MOVDDUP_X4_X5 LONG $0xEC120FF2 // MOVDDUP X4, X5 +#define MOVDDUP_X6_X7 LONG $0xFE120FF2 // MOVDDUP X6, X7 +#define MOVDDUP_X8_X9 LONG $0x120F45F2; BYTE $0xC8 // MOVDDUP X8, X9 + +#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 +#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 +#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 +#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 + +// func ScalUnitary(alpha complex128, x []complex128) +TEXT ·ScalUnitary(SB), NOSPLIT, $0 + MOVQ x_base+16(FP), SRC // SRC = &x + MOVQ x_len+24(FP), LEN // LEN = len(x) + CMPQ LEN, $0 // if LEN == 0 { return } + JE scal_end + + MOVUPS alpha+0(FP), ALPHA // ALPHA = { imag(alpha), real(alpha) } + MOVAPS ALPHA, ALPHA_C + SHUFPD $0x1, ALPHA_C, ALPHA_C // ALPHA_C = { real(alpha), imag(alpha) } + + XORQ IDX, IDX // IDX = 0 + MOVAPS ALPHA, ALPHA2 // Copy ALPHA and ALPHA_C for pipelining + MOVAPS ALPHA_C, ALPHA_C2 + MOVQ LEN, TAIL + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ scal_tail // if BX == 0 { goto scal_tail } + +scal_loop: // do { + MOVUPS (SRC)(IDX*8), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVUPS 16(SRC)(IDX*8), X4 + MOVUPS 32(SRC)(IDX*8), X6 + MOVUPS 48(SRC)(IDX*8), X8 + + // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_X2_X3 + MOVDDUP_X4_X5 + MOVDDUP_X6_X7 + MOVDDUP_X8_X9 + + // X_i = { imag(x[i]), imag(x[i]) } + SHUFPD $0x3, X2, X2 + SHUFPD $0x3, X4, X4 + SHUFPD $0x3, X6, X6 + SHUFPD $0x3, X8, X8 + + // X_i = { real(ALPHA) * imag(x[i]), imag(ALPHA) * imag(x[i]) } + // X_(i+1) = { imag(ALPHA) * real(x[i]), real(ALPHA) * real(x[i]) } + MULPD ALPHA_C, X2 + MULPD ALPHA, X3 + MULPD ALPHA_C2, X4 + MULPD ALPHA2, X5 + MULPD ALPHA_C, X6 + MULPD ALPHA, X7 + MULPD ALPHA_C2, X8 + MULPD ALPHA2, X9 + + // X_(i+1) = { + // imag(result[i]): imag(ALPHA)*real(x[i]) + real(ALPHA)*imag(x[i]), + // real(result[i]): real(ALPHA)*real(x[i]) - imag(ALPHA)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + MOVUPS X3, (DST)(IDX*8) // x[i] = X_(i+1) + MOVUPS X5, 16(DST)(IDX*8) + MOVUPS X7, 32(DST)(IDX*8) + MOVUPS X9, 48(DST)(IDX*8) + ADDQ $8, IDX // IDX += 8 + DECQ LEN + JNZ scal_loop // } while --LEN > 0 + +scal_tail: + ANDQ $3, TAIL // TAIL = TAIL % 4 + JZ scal_end // if TAIL == 0 { return } + +scal_tail_loop: // do { + MOVUPS (SRC)(IDX*8), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } + SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD ALPHA_C, X2 // X_i = { real(ALPHA) * imag(x[i]), imag(ALPHA) * imag(x[i]) } + MULPD ALPHA, X3 // X_(i+1) = { imag(ALPHA) * real(x[i]), real(ALPHA) * real(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(ALPHA)*real(x[i]) + real(ALPHA)*imag(x[i]), + // real(result[i]): real(ALPHA)*real(x[i]) - imag(ALPHA)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + + MOVUPS X3, (DST)(IDX*8) // x[i] = X_(i+1) + ADDQ $2, IDX // IDX += 2 + DECQ TAIL + JNZ scal_tail_loop // } while --LEN > 0 + +scal_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/scalinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c128/scalinc_amd64.s new file mode 100644 index 0000000000..7857c1554f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/scalinc_amd64.s @@ -0,0 +1,121 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SRC SI +#define DST SI +#define LEN CX +#define TAIL BX +#define INC R9 +#define INC3 R10 +#define ALPHA X0 +#define ALPHA_C X1 +#define ALPHA2 X10 +#define ALPHA_C2 X11 + +#define MOVDDUP_X2_X3 LONG $0xDA120FF2 // MOVDDUP X2, X3 +#define MOVDDUP_X4_X5 LONG $0xEC120FF2 // MOVDDUP X4, X5 +#define MOVDDUP_X6_X7 LONG $0xFE120FF2 // MOVDDUP X6, X7 +#define MOVDDUP_X8_X9 LONG $0x120F45F2; BYTE $0xC8 // MOVDDUP X8, X9 + +#define ADDSUBPD_X2_X3 LONG $0xDAD00F66 // ADDSUBPD X2, X3 +#define ADDSUBPD_X4_X5 LONG $0xECD00F66 // ADDSUBPD X4, X5 +#define ADDSUBPD_X6_X7 LONG $0xFED00F66 // ADDSUBPD X6, X7 +#define ADDSUBPD_X8_X9 LONG $0xD00F4566; BYTE $0xC8 // ADDSUBPD X8, X9 + +// func ScalInc(alpha complex128, x []complex128, n, inc uintptr) +TEXT ·ScalInc(SB), NOSPLIT, $0 + MOVQ x_base+16(FP), SRC // SRC = &x + MOVQ n+40(FP), LEN // LEN = len(x) + CMPQ LEN, $0 + JE scal_end // if LEN == 0 { return } + + MOVQ inc+48(FP), INC // INC = inc + SHLQ $4, INC // INC = INC * sizeof(complex128) + LEAQ (INC)(INC*2), INC3 // INC3 = 3 * INC + + MOVUPS alpha+0(FP), ALPHA // ALPHA = { imag(alpha), real(alpha) } + MOVAPS ALPHA, ALPHA_C + SHUFPD $0x1, ALPHA_C, ALPHA_C // ALPHA_C = { real(alpha), imag(alpha) } + + MOVAPS ALPHA, ALPHA2 // Copy ALPHA and ALPHA_C for pipelining + MOVAPS ALPHA_C, ALPHA_C2 + MOVQ LEN, TAIL + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ scal_tail // if BX == 0 { goto scal_tail } + +scal_loop: // do { + MOVUPS (SRC), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVUPS (SRC)(INC*1), X4 + MOVUPS (SRC)(INC*2), X6 + MOVUPS (SRC)(INC3*1), X8 + + // X_(i+1) = { real(x[i], real(x[i]) } + MOVDDUP_X2_X3 + MOVDDUP_X4_X5 + MOVDDUP_X6_X7 + MOVDDUP_X8_X9 + + // X_i = { imag(x[i]), imag(x[i]) } + SHUFPD $0x3, X2, X2 + SHUFPD $0x3, X4, X4 + SHUFPD $0x3, X6, X6 + SHUFPD $0x3, X8, X8 + + // X_i = { real(ALPHA) * imag(x[i]), imag(ALPHA) * imag(x[i]) } + // X_(i+1) = { imag(ALPHA) * real(x[i]), real(ALPHA) * real(x[i]) } + MULPD ALPHA_C, X2 + MULPD ALPHA, X3 + MULPD ALPHA_C2, X4 + MULPD ALPHA2, X5 + MULPD ALPHA_C, X6 + MULPD ALPHA, X7 + MULPD ALPHA_C2, X8 + MULPD ALPHA2, X9 + + // X_(i+1) = { + // imag(result[i]): imag(ALPHA)*real(x[i]) + real(ALPHA)*imag(x[i]), + // real(result[i]): real(ALPHA)*real(x[i]) - imag(ALPHA)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + ADDSUBPD_X4_X5 + ADDSUBPD_X6_X7 + ADDSUBPD_X8_X9 + + MOVUPS X3, (DST) // x[i] = X_(i+1) + MOVUPS X5, (DST)(INC*1) + MOVUPS X7, (DST)(INC*2) + MOVUPS X9, (DST)(INC3*1) + + LEAQ (SRC)(INC*4), SRC // SRC = &(SRC[inc*4]) + DECQ LEN + JNZ scal_loop // } while --BX > 0 + +scal_tail: + ANDQ $3, TAIL // TAIL = TAIL % 4 + JE scal_end // if TAIL == 0 { return } + +scal_tail_loop: // do { + MOVUPS (SRC), X2 // X_i = { imag(x[i]), real(x[i]) } + MOVDDUP_X2_X3 // X_(i+1) = { real(x[i], real(x[i]) } + SHUFPD $0x3, X2, X2 // X_i = { imag(x[i]), imag(x[i]) } + MULPD ALPHA_C, X2 // X_i = { real(ALPHA) * imag(x[i]), imag(ALPHA) * imag(x[i]) } + MULPD ALPHA, X3 // X_(i+1) = { imag(ALPHA) * real(x[i]), real(ALPHA) * real(x[i]) } + + // X_(i+1) = { + // imag(result[i]): imag(ALPHA)*real(x[i]) + real(ALPHA)*imag(x[i]), + // real(result[i]): real(ALPHA)*real(x[i]) - imag(ALPHA)*imag(x[i]) + // } + ADDSUBPD_X2_X3 + + MOVUPS X3, (DST) // x[i] = X_i + ADDQ INC, SRC // SRC = &(SRC[incX]) + DECQ TAIL + JNZ scal_tail_loop // } while --TAIL > 0 + +scal_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_amd64.go b/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_amd64.go new file mode 100644 index 0000000000..ad6b23ca4c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_amd64.go @@ -0,0 +1,96 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +package c128 + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha complex128, x, y []complex128) + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []complex128, alpha complex128, x, y []complex128) + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []complex128, incDst, idst uintptr, alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) + +// DscalUnitary is +// for i, v := range x { +// x[i] = complex(real(v)*alpha, imag(v)*alpha) +// } +func DscalUnitary(alpha float64, x []complex128) + +// DscalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) +// ix += inc +// } +func DscalInc(alpha float64, x []complex128, n, inc uintptr) + +// ScalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } +func ScalInc(alpha complex128, x []complex128, n, inc uintptr) + +// ScalUnitary is +// for i := range x { +// x[i] *= alpha +// } +func ScalUnitary(alpha complex128, x []complex128) + +// DotcUnitary is +// for i, v := range x { +// sum += y[i] * cmplx.Conj(v) +// } +// return sum +func DotcUnitary(x, y []complex128) (sum complex128) + +// DotcInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * cmplx.Conj(x[ix]) +// ix += incX +// iy += incY +// } +// return sum +func DotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) + +// DotuUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotuUnitary(x, y []complex128) (sum complex128) + +// DotuInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotuInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_noasm.go new file mode 100644 index 0000000000..6313e571c0 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c128/stubs_noasm.go @@ -0,0 +1,163 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package c128 + +import "math/cmplx" + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha complex128, x, y []complex128) { + for i, v := range x { + y[i] += alpha * v + } +} + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []complex128, alpha complex128, x, y []complex128) { + for i, v := range x { + dst[i] = alpha*v + y[i] + } +} + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + y[iy] += alpha * x[ix] + ix += incX + iy += incY + } +} + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []complex128, incDst, idst uintptr, alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + dst[idst] = alpha*x[ix] + y[iy] + ix += incX + iy += incY + idst += incDst + } +} + +// DscalUnitary is +// for i, v := range x { +// x[i] = complex(real(v)*alpha, imag(v)*alpha) +// } +func DscalUnitary(alpha float64, x []complex128) { + for i, v := range x { + x[i] = complex(real(v)*alpha, imag(v)*alpha) + } +} + +// DscalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) +// ix += inc +// } +func DscalInc(alpha float64, x []complex128, n, inc uintptr) { + var ix uintptr + for i := 0; i < int(n); i++ { + x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) + ix += inc + } +} + +// ScalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } +func ScalInc(alpha complex128, x []complex128, n, inc uintptr) { + var ix uintptr + for i := 0; i < int(n); i++ { + x[ix] *= alpha + ix += inc + } +} + +// ScalUnitary is +// for i := range x { +// x[i] *= alpha +// } +func ScalUnitary(alpha complex128, x []complex128) { + for i := range x { + x[i] *= alpha + } +} + +// DotcUnitary is +// for i, v := range x { +// sum += y[i] * cmplx.Conj(v) +// } +// return sum +func DotcUnitary(x, y []complex128) (sum complex128) { + for i, v := range x { + sum += y[i] * cmplx.Conj(v) + } + return sum +} + +// DotcInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * cmplx.Conj(x[ix]) +// ix += incX +// iy += incY +// } +// return sum +func DotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) { + for i := 0; i < int(n); i++ { + sum += y[iy] * cmplx.Conj(x[ix]) + ix += incX + iy += incY + } + return sum +} + +// DotuUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotuUnitary(x, y []complex128) (sum complex128) { + for i, v := range x { + sum += y[i] * v + } + return sum +} + +// DotuInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotuInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) { + for i := 0; i < int(n); i++ { + sum += y[iy] * x[ix] + ix += incX + iy += incY + } + return sum +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyinc_amd64.s new file mode 100644 index 0000000000..841415dbc9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyinc_amd64.s @@ -0,0 +1,151 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// MOVSHDUP X3, X2 +#define MOVSHDUP_X3_X2 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xD3 +// MOVSLDUP X3, X3 +#define MOVSLDUP_X3_X3 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xDB +// ADDSUBPS X2, X3 +#define ADDSUBPS_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA + +// MOVSHDUP X5, X4 +#define MOVSHDUP_X5_X4 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xE5 +// MOVSLDUP X5, X5 +#define MOVSLDUP_X5_X5 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xED +// ADDSUBPS X4, X5 +#define ADDSUBPS_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC + +// MOVSHDUP X7, X6 +#define MOVSHDUP_X7_X6 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xF7 +// MOVSLDUP X7, X7 +#define MOVSLDUP_X7_X7 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xFF +// ADDSUBPS X6, X7 +#define ADDSUBPS_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE + +// MOVSHDUP X9, X8 +#define MOVSHDUP_X9_X8 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x16; BYTE $0xC1 +// MOVSLDUP X9, X9 +#define MOVSLDUP_X9_X9 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC9 +// ADDSUBPS X8, X9 +#define ADDSUBPS_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 + +// func AxpyInc(alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyInc(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), SI // SI = &x + MOVQ y_base+32(FP), DI // DI = &y + MOVQ n+56(FP), CX // CX = n + CMPQ CX, $0 // if n==0 { return } + JE axpyi_end + MOVQ ix+80(FP), R8 // R8 = ix + MOVQ iy+88(FP), R9 // R9 = iy + LEAQ (SI)(R8*8), SI // SI = &(x[ix]) + LEAQ (DI)(R9*8), DI // DI = &(y[iy]) + MOVQ DI, DX // DX = DI // Read/Write pointers + MOVQ incX+64(FP), R8 // R8 = incX + SHLQ $3, R8 // R8 *= sizeof(complex64) + MOVQ incY+72(FP), R9 // R9 = incY + SHLQ $3, R9 // R9 *= sizeof(complex64) + MOVSD alpha+0(FP), X0 // X0 = { 0, 0, imag(a), real(a) } + MOVAPS X0, X1 + SHUFPS $0x11, X1, X1 // X1 = { 0, 0, real(a), imag(a) } + MOVAPS X0, X10 // Copy X0 and X1 for pipelining + MOVAPS X1, X11 + MOVQ CX, BX + ANDQ $3, CX // CX = n % 4 + SHRQ $2, BX // BX = floor( n / 4 ) + JZ axpyi_tail // if BX == 0 { goto axpyi_tail } + +axpyi_loop: // do { + MOVSD (SI), X3 // X_i = { imag(x[i+1]), real(x[i+1]) } + MOVSD (SI)(R8*1), X5 + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + MOVSD (SI), X7 + MOVSD (SI)(R8*1), X9 + + // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSHDUP_X3_X2 + MOVSHDUP_X5_X4 + MOVSHDUP_X7_X6 + MOVSHDUP_X9_X8 + + // X_i = { real(x[i]), real(x[i]) } + MOVSLDUP_X3_X3 + MOVSLDUP_X5_X5 + MOVSLDUP_X7_X7 + MOVSLDUP_X9_X9 + + // X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + // X_i = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPS X1, X2 + MULPS X0, X3 + MULPS X11, X4 + MULPS X10, X5 + MULPS X1, X6 + MULPS X0, X7 + MULPS X11, X8 + MULPS X10, X9 + + // X_i = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]), + // } + ADDSUBPS_X2_X3 + ADDSUBPS_X4_X5 + ADDSUBPS_X6_X7 + ADDSUBPS_X8_X9 + + // X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + MOVSD (DX), X2 + MOVSD (DX)(R9*1), X4 + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + MOVSD (DX), X6 + MOVSD (DX)(R9*1), X8 + ADDPS X2, X3 + ADDPS X4, X5 + ADDPS X6, X7 + ADDPS X8, X9 + + MOVSD X3, (DI) // y[i] = X_i + MOVSD X5, (DI)(R9*1) + LEAQ (DI)(R9*2), DI // DI = &(DI[incDst]) + MOVSD X7, (DI) + MOVSD X9, (DI)(R9*1) + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + LEAQ (DI)(R9*2), DI // DI = &(DI[incDst]) + DECQ BX + JNZ axpyi_loop // } while --BX > 0 + CMPQ CX, $0 // if CX == 0 { return } + JE axpyi_end + +axpyi_tail: // do { + MOVSD (SI), X3 // X_i = { imag(x[i+1]), real(x[i+1]) } + MOVSHDUP_X3_X2 // X_(i-1) = { real(x[i]), real(x[i]) } + MOVSLDUP_X3_X3 // X_i = { imag(x[i]), imag(x[i]) } + + // X_i = { imag(a) * real(x[i]), real(a) * real(x[i]) } + // X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPS X1, X2 + MULPS X0, X3 + + // X_i = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]), + // } + ADDSUBPS_X2_X3 // (ai*x1r+ar*x1i, ar*x1r-ai*x1i) + + // X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + MOVSD (DI), X4 + ADDPS X4, X3 + MOVSD X3, (DI) // y[i] = X_i + ADDQ R8, SI // SI += incX + ADDQ R9, DI // DI += incY + LOOP axpyi_tail // } while --CX > 0 + +axpyi_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyincto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyincto_amd64.s new file mode 100644 index 0000000000..5c5228dc21 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyincto_amd64.s @@ -0,0 +1,156 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// MOVSHDUP X3, X2 +#define MOVSHDUP_X3_X2 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xD3 +// MOVSLDUP X3, X3 +#define MOVSLDUP_X3_X3 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xDB +// ADDSUBPS X2, X3 +#define ADDSUBPS_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA + +// MOVSHDUP X5, X4 +#define MOVSHDUP_X5_X4 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xE5 +// MOVSLDUP X5, X5 +#define MOVSLDUP_X5_X5 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xED +// ADDSUBPS X4, X5 +#define ADDSUBPS_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC + +// MOVSHDUP X7, X6 +#define MOVSHDUP_X7_X6 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xF7 +// MOVSLDUP X7, X7 +#define MOVSLDUP_X7_X7 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xFF +// ADDSUBPS X6, X7 +#define ADDSUBPS_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE + +// MOVSHDUP X9, X8 +#define MOVSHDUP_X9_X8 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x16; BYTE $0xC1 +// MOVSLDUP X9, X9 +#define MOVSLDUP_X9_X9 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC9 +// ADDSUBPS X8, X9 +#define ADDSUBPS_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 + +// func AxpyIncTo(dst []complex64, incDst, idst uintptr, alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyIncTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ x_base+48(FP), SI // SI = &x + MOVQ y_base+72(FP), DX // DX = &y + MOVQ n+96(FP), CX // CX = n + CMPQ CX, $0 // if n==0 { return } + JE axpyi_end + MOVQ ix+120(FP), R8 // Load the first index + MOVQ iy+128(FP), R9 + MOVQ idst+32(FP), R10 + LEAQ (SI)(R8*8), SI // SI = &(x[ix]) + LEAQ (DX)(R9*8), DX // DX = &(y[iy]) + LEAQ (DI)(R10*8), DI // DI = &(dst[idst]) + MOVQ incX+104(FP), R8 // Incrementors*8 for easy iteration (ADDQ) + SHLQ $3, R8 + MOVQ incY+112(FP), R9 + SHLQ $3, R9 + MOVQ incDst+24(FP), R10 + SHLQ $3, R10 + MOVSD alpha+40(FP), X0 // X0 = { 0, 0, imag(a), real(a) } + MOVAPS X0, X1 + SHUFPS $0x11, X1, X1 // X1 = { 0, 0, real(a), imag(a) } + MOVAPS X0, X10 // Copy X0 and X1 for pipelining + MOVAPS X1, X11 + MOVQ CX, BX + ANDQ $3, CX // CX = n % 4 + SHRQ $2, BX // BX = floor( n / 4 ) + JZ axpyi_tail // if BX == 0 { goto axpyi_tail } + +axpyi_loop: // do { + MOVSD (SI), X3 // X_i = { imag(x[i]), real(x[i]) } + MOVSD (SI)(R8*1), X5 + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + MOVSD (SI), X7 + MOVSD (SI)(R8*1), X9 + + // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSHDUP_X3_X2 + MOVSHDUP_X5_X4 + MOVSHDUP_X7_X6 + MOVSHDUP_X9_X8 + + // X_i = { real(x[i]), real(x[i]) } + MOVSLDUP_X3_X3 + MOVSLDUP_X5_X5 + MOVSLDUP_X7_X7 + MOVSLDUP_X9_X9 + + // X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + // X_i = { imag(a) * real(x[i]), real(a) * real(x[i]) } + MULPS X1, X2 + MULPS X0, X3 + MULPS X11, X4 + MULPS X10, X5 + MULPS X1, X6 + MULPS X0, X7 + MULPS X11, X8 + MULPS X10, X9 + + // X_i = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]), + // } + ADDSUBPS_X2_X3 + ADDSUBPS_X4_X5 + ADDSUBPS_X6_X7 + ADDSUBPS_X8_X9 + + // X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + MOVSD (DX), X2 + MOVSD (DX)(R9*1), X4 + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + MOVSD (DX), X6 + MOVSD (DX)(R9*1), X8 + ADDPS X2, X3 + ADDPS X4, X5 + ADDPS X6, X7 + ADDPS X8, X9 + + MOVSD X3, (DI) // y[i] = X_i + MOVSD X5, (DI)(R10*1) + LEAQ (DI)(R10*2), DI // DI = &(DI[incDst]) + MOVSD X7, (DI) + MOVSD X9, (DI)(R10*1) + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + LEAQ (DI)(R10*2), DI // DI = &(DI[incDst]) + DECQ BX + JNZ axpyi_loop // } while --BX > 0 + CMPQ CX, $0 // if CX == 0 { return } + JE axpyi_end + +axpyi_tail: + MOVSD (SI), X3 // X_i = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } + + // X_i = { imag(a) * real(x[i]), real(a) * real(x[i]) } + // X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPS X1, X2 + MULPS X0, X3 + + // X_i = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]), + // } + ADDSUBPS_X2_X3 + + // X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]) } + MOVSD (DX), X4 + ADDPS X4, X3 + MOVSD X3, (DI) // y[i] = X_i + ADDQ R8, SI // SI += incX + ADDQ R9, DX // DX += incY + ADDQ R10, DI // DI += incDst + LOOP axpyi_tail // } while --CX > 0 + +axpyi_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitary_amd64.s new file mode 100644 index 0000000000..ae744a4902 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitary_amd64.s @@ -0,0 +1,160 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// MOVSHDUP X3, X2 +#define MOVSHDUP_X3_X2 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xD3 +// MOVSLDUP X3, X3 +#define MOVSLDUP_X3_X3 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xDB +// ADDSUBPS X2, X3 +#define ADDSUBPS_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA + +// MOVSHDUP X5, X4 +#define MOVSHDUP_X5_X4 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xE5 +// MOVSLDUP X5, X5 +#define MOVSLDUP_X5_X5 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xED +// ADDSUBPS X4, X5 +#define ADDSUBPS_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC + +// MOVSHDUP X7, X6 +#define MOVSHDUP_X7_X6 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xF7 +// MOVSLDUP X7, X7 +#define MOVSLDUP_X7_X7 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xFF +// ADDSUBPS X6, X7 +#define ADDSUBPS_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE + +// MOVSHDUP X9, X8 +#define MOVSHDUP_X9_X8 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x16; BYTE $0xC1 +// MOVSLDUP X9, X9 +#define MOVSLDUP_X9_X9 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC9 +// ADDSUBPS X8, X9 +#define ADDSUBPS_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 + +// func AxpyUnitary(alpha complex64, x, y []complex64) +TEXT ·AxpyUnitary(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), SI // SI = &x + MOVQ y_base+32(FP), DI // DI = &y + MOVQ x_len+16(FP), CX // CX = min( len(x), len(y) ) + CMPQ y_len+40(FP), CX + CMOVQLE y_len+40(FP), CX + CMPQ CX, $0 // if CX == 0 { return } + JE caxy_end + PXOR X0, X0 // Clear work registers and cache-align loop + PXOR X1, X1 + MOVSD alpha+0(FP), X0 // X0 = { 0, 0, imag(a), real(a) } + SHUFPD $0, X0, X0 // X0 = { imag(a), real(a), imag(a), real(a) } + MOVAPS X0, X1 + SHUFPS $0x11, X1, X1 // X1 = { real(a), imag(a), real(a), imag(a) } + XORQ AX, AX // i = 0 + MOVQ DI, BX // Align on 16-byte boundary for ADDPS + ANDQ $15, BX // BX = &y & 15 + JZ caxy_no_trim // if BX == 0 { goto caxy_no_trim } + + // Trim first value in unaligned buffer + XORPS X2, X2 // Clear work registers and cache-align loop + XORPS X3, X3 + XORPS X4, X4 + MOVSD (SI)(AX*8), X3 // X3 = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X2 = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X3 = { real(x[i]), real(x[i]) } + MULPS X1, X2 // X2 = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPS X0, X3 // X3 = { imag(a) * real(x[i]), real(a) * real(x[i]) } + + // X3 = { imag(a)*real(x[i]) + real(a)*imag(x[i]), real(a)*real(x[i]) - imag(a)*imag(x[i]) } + ADDSUBPS_X2_X3 + MOVSD (DI)(AX*8), X4 // X3 += y[i] + ADDPS X4, X3 + MOVSD X3, (DI)(AX*8) // y[i] = X3 + INCQ AX // i++ + DECQ CX // --CX + JZ caxy_end // if CX == 0 { return } + +caxy_no_trim: + MOVAPS X0, X10 // Copy X0 and X1 for pipelineing + MOVAPS X1, X11 + MOVQ CX, BX + ANDQ $7, CX // CX = n % 8 + SHRQ $3, BX // BX = floor( n / 8 ) + JZ caxy_tail // if BX == 0 { goto caxy_tail } + +caxy_loop: // do { + // X_i = { imag(x[i]), real(x[i]), imag(x[i+1]), real(x[i+1]) } + MOVUPS (SI)(AX*8), X3 + MOVUPS 16(SI)(AX*8), X5 + MOVUPS 32(SI)(AX*8), X7 + MOVUPS 48(SI)(AX*8), X9 + + // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) } + MOVSHDUP_X3_X2 + MOVSHDUP_X5_X4 + MOVSHDUP_X7_X6 + MOVSHDUP_X9_X8 + + // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) } + MOVSLDUP_X3_X3 + MOVSLDUP_X5_X5 + MOVSLDUP_X7_X7 + MOVSLDUP_X9_X9 + + // X_i = { imag(a) * real(x[i]), real(a) * real(x[i]), + // imag(a) * real(x[i+1]), real(a) * real(x[i+1]) } + // X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]), + // real(a) * imag(x[i+1]), imag(a) * imag(x[i+1]) } + MULPS X1, X2 + MULPS X0, X3 + MULPS X11, X4 + MULPS X10, X5 + MULPS X1, X6 + MULPS X0, X7 + MULPS X11, X8 + MULPS X10, X9 + + // X_i = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]), + // imag(result[i+1]): imag(a)*real(x[i+1]) + real(a)*imag(x[i+1]), + // real(result[i+1]): real(a)*real(x[i+1]) - imag(a)*imag(x[i+1]), + // } + ADDSUBPS_X2_X3 + ADDSUBPS_X4_X5 + ADDSUBPS_X6_X7 + ADDSUBPS_X8_X9 + + // X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]), + // imag(result[i+1]) + imag(y[i+1]), real(result[i+1]) + real(y[i+1]) } + ADDPS (DI)(AX*8), X3 + ADDPS 16(DI)(AX*8), X5 + ADDPS 32(DI)(AX*8), X7 + ADDPS 48(DI)(AX*8), X9 + MOVUPS X3, (DI)(AX*8) // y[i:i+1] = X_i + MOVUPS X5, 16(DI)(AX*8) + MOVUPS X7, 32(DI)(AX*8) + MOVUPS X9, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + DECQ BX // --BX + JNZ caxy_loop // } while BX > 0 + CMPQ CX, $0 // if CX == 0 { return } + JE caxy_end + +caxy_tail: // do { + MOVSD (SI)(AX*8), X3 // X3 = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X2 = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X3 = { real(x[i]), real(x[i]) } + MULPS X1, X2 // X2 = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPS X0, X3 // X3 = { imag(a) * real(x[i]), real(a) * real(x[i]) } + + // X3 = { imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(a)*real(x[i]) - imag(a)*imag(x[i]) } + ADDSUBPS_X2_X3 + MOVSD (DI)(AX*8), X4 // X3 += y[i] + ADDPS X4, X3 + MOVSD X3, (DI)(AX*8) // y[i] = X3 + INCQ AX // ++i + LOOP caxy_tail // } while --CX > 0 + +caxy_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitaryto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitaryto_amd64.s new file mode 100644 index 0000000000..a5d702092d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitaryto_amd64.s @@ -0,0 +1,157 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// MOVSHDUP X3, X2 +#define MOVSHDUP_X3_X2 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xD3 +// MOVSLDUP X3, X3 +#define MOVSLDUP_X3_X3 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xDB +// ADDSUBPS X2, X3 +#define ADDSUBPS_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA + +// MOVSHDUP X5, X4 +#define MOVSHDUP_X5_X4 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xE5 +// MOVSLDUP X5, X5 +#define MOVSLDUP_X5_X5 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xED +// ADDSUBPS X4, X5 +#define ADDSUBPS_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC + +// MOVSHDUP X7, X6 +#define MOVSHDUP_X7_X6 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xF7 +// MOVSLDUP X7, X7 +#define MOVSLDUP_X7_X7 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xFF +// ADDSUBPS X6, X7 +#define ADDSUBPS_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE + +// MOVSHDUP X9, X8 +#define MOVSHDUP_X9_X8 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x16; BYTE $0xC1 +// MOVSLDUP X9, X9 +#define MOVSLDUP_X9_X9 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC9 +// ADDSUBPS X8, X9 +#define ADDSUBPS_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8 + +// func AxpyUnitaryTo(dst []complex64, alpha complex64, x, y []complex64) +TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ x_base+32(FP), SI // SI = &x + MOVQ y_base+56(FP), DX // DX = &y + MOVQ x_len+40(FP), CX + CMPQ y_len+64(FP), CX // CX = min( len(x), len(y), len(dst) ) + CMOVQLE y_len+64(FP), CX + CMPQ dst_len+8(FP), CX + CMOVQLE dst_len+8(FP), CX + CMPQ CX, $0 // if CX == 0 { return } + JE caxy_end + MOVSD alpha+24(FP), X0 // X0 = { 0, 0, imag(a), real(a) } + SHUFPD $0, X0, X0 // X0 = { imag(a), real(a), imag(a), real(a) } + MOVAPS X0, X1 + SHUFPS $0x11, X1, X1 // X1 = { real(a), imag(a), real(a), imag(a) } + XORQ AX, AX // i = 0 + MOVQ DX, BX // Align on 16-byte boundary for ADDPS + ANDQ $15, BX // BX = &y & 15 + JZ caxy_no_trim // if BX == 0 { goto caxy_no_trim } + + MOVSD (SI)(AX*8), X3 // X3 = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X2 = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X3 = { real(x[i]), real(x[i]) } + MULPS X1, X2 // X2 = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPS X0, X3 // X3 = { imag(a) * real(x[i]), real(a) * real(x[i]) } + + // X3 = { imag(a)*real(x[i]) + real(a)*imag(x[i]), real(a)*real(x[i]) - imag(a)*imag(x[i]) } + ADDSUBPS_X2_X3 + MOVSD (DX)(AX*8), X4 // X3 += y[i] + ADDPS X4, X3 + MOVSD X3, (DI)(AX*8) // dst[i] = X3 + INCQ AX // i++ + DECQ CX // --CX + JZ caxy_tail // if BX == 0 { goto caxy_tail } + +caxy_no_trim: + MOVAPS X0, X10 // Copy X0 and X1 for pipelineing + MOVAPS X1, X11 + MOVQ CX, BX + ANDQ $7, CX // CX = n % 8 + SHRQ $3, BX // BX = floor( n / 8 ) + JZ caxy_tail // if BX == 0 { goto caxy_tail } + +caxy_loop: + // X_i = { imag(x[i]), real(x[i]), imag(x[i+1]), real(x[i+1]) } + MOVUPS (SI)(AX*8), X3 + MOVUPS 16(SI)(AX*8), X5 + MOVUPS 32(SI)(AX*8), X7 + MOVUPS 48(SI)(AX*8), X9 + + // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) } + MOVSHDUP_X3_X2 + MOVSHDUP_X5_X4 + MOVSHDUP_X7_X6 + MOVSHDUP_X9_X8 + + // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) } + MOVSLDUP_X3_X3 + MOVSLDUP_X5_X5 + MOVSLDUP_X7_X7 + MOVSLDUP_X9_X9 + + // X_i = { imag(a) * real(x[i]), real(a) * real(x[i]), + // imag(a) * real(x[i+1]), real(a) * real(x[i+1]) } + // X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]), + // real(a) * imag(x[i+1]), imag(a) * imag(x[i+1]) } + MULPS X1, X2 + MULPS X0, X3 + MULPS X11, X4 + MULPS X10, X5 + MULPS X1, X6 + MULPS X0, X7 + MULPS X11, X8 + MULPS X10, X9 + + // X_i = { + // imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]), + // imag(result[i+1]): imag(a)*real(x[i+1]) + real(a)*imag(x[i+1]), + // real(result[i+1]): real(a)*real(x[i+1]) - imag(a)*imag(x[i+1]), + // } + ADDSUBPS_X2_X3 + ADDSUBPS_X4_X5 + ADDSUBPS_X6_X7 + ADDSUBPS_X8_X9 + + // X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]), + // imag(result[i+1]) + imag(y[i+1]), real(result[i+1]) + real(y[i+1]) } + ADDPS (DX)(AX*8), X3 + ADDPS 16(DX)(AX*8), X5 + ADDPS 32(DX)(AX*8), X7 + ADDPS 48(DX)(AX*8), X9 + MOVUPS X3, (DI)(AX*8) // y[i:i+1] = X_i + MOVUPS X5, 16(DI)(AX*8) + MOVUPS X7, 32(DI)(AX*8) + MOVUPS X9, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + DECQ BX // --BX + JNZ caxy_loop // } while BX > 0 + CMPQ CX, $0 // if CX == 0 { return } + JE caxy_end + +caxy_tail: // do { + MOVSD (SI)(AX*8), X3 // X3 = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X2 = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X3 = { real(x[i]), real(x[i]) } + MULPS X1, X2 // X2 = { real(a) * imag(x[i]), imag(a) * imag(x[i]) } + MULPS X0, X3 // X3 = { imag(a) * real(x[i]), real(a) * real(x[i]) } + + // X3 = { imag(a)*real(x[i]) + real(a)*imag(x[i]), + // real(a)*real(x[i]) - imag(a)*imag(x[i]) } + ADDSUBPS_X2_X3 + MOVSD (DX)(AX*8), X4 // X3 += y[i] + ADDPS X4, X3 + MOVSD X3, (DI)(AX*8) // y[i] = X3 + INCQ AX // ++i + LOOP caxy_tail // } while --CX > 0 + +caxy_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/conj.go b/vendor/gonum.org/v1/gonum/internal/asm/c64/conj.go new file mode 100644 index 0000000000..910e1e5c73 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/conj.go @@ -0,0 +1,7 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package c64 + +func conj(c complex64) complex64 { return complex(real(c), -imag(c)) } diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/doc.go b/vendor/gonum.org/v1/gonum/internal/asm/c64/doc.go new file mode 100644 index 0000000000..35f1b2a26b --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package c64 provides complex64 vector primitives. +package c64 // import "gonum.org/v1/gonum/internal/asm/c64" diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/dotcinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/dotcinc_amd64.s new file mode 100644 index 0000000000..87de31d32a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/dotcinc_amd64.s @@ -0,0 +1,160 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVSHDUP_X3_X2 LONG $0xD3160FF3 // MOVSHDUP X3, X2 +#define MOVSHDUP_X5_X4 LONG $0xE5160FF3 // MOVSHDUP X5, X4 +#define MOVSHDUP_X7_X6 LONG $0xF7160FF3 // MOVSHDUP X7, X6 +#define MOVSHDUP_X9_X8 LONG $0x160F45F3; BYTE $0xC1 // MOVSHDUP X9, X8 + +#define MOVSLDUP_X3_X3 LONG $0xDB120FF3 // MOVSLDUP X3, X3 +#define MOVSLDUP_X5_X5 LONG $0xED120FF3 // MOVSLDUP X5, X5 +#define MOVSLDUP_X7_X7 LONG $0xFF120FF3 // MOVSLDUP X7, X7 +#define MOVSLDUP_X9_X9 LONG $0x120F45F3; BYTE $0xC9 // MOVSLDUP X9, X9 + +#define ADDSUBPS_X2_X3 LONG $0xDAD00FF2 // ADDSUBPS X2, X3 +#define ADDSUBPS_X4_X5 LONG $0xECD00FF2 // ADDSUBPS X4, X5 +#define ADDSUBPS_X6_X7 LONG $0xFED00FF2 // ADDSUBPS X6, X7 +#define ADDSUBPS_X8_X9 LONG $0xD00F45F2; BYTE $0xC8 // ADDSUBPS X8, X9 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define SUM X0 +#define P_SUM X1 +#define INC_X R8 +#define INCx3_X R9 +#define INC_Y R10 +#define INCx3_Y R11 +#define NEG1 X15 +#define P_NEG1 X14 + +// func DotcInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) +TEXT ·DotcInc(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + PXOR SUM, SUM // SUM = 0 + PXOR P_SUM, P_SUM // P_SUM = 0 + MOVQ n+48(FP), LEN // LEN = n + CMPQ LEN, $0 // if LEN == 0 { return } + JE dotc_end + MOVQ ix+72(FP), INC_X + MOVQ iy+80(FP), INC_Y + LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(X_PTR[ix]) + LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(Y_PTR[iy]) + MOVQ incX+56(FP), INC_X // INC_X = incX * sizeof(complex64) + SHLQ $3, INC_X + MOVQ incY+64(FP), INC_Y // INC_Y = incY * sizeof(complex64) + SHLQ $3, INC_Y + MOVSS $(-1.0), NEG1 + SHUFPS $0, NEG1, NEG1 // { -1, -1, -1, -1 } + + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ dotc_tail // if LEN == 0 { goto dotc_tail } + + MOVUPS NEG1, P_NEG1 // Copy NEG1 for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 + +dotc_loop: // do { + MOVSD (X_PTR), X3 // X_i = { imag(x[i]), real(x[i]) } + MOVSD (X_PTR)(INC_X*1), X5 + MOVSD (X_PTR)(INC_X*2), X7 + MOVSD (X_PTR)(INCx3_X*1), X9 + + // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSHDUP_X3_X2 + MOVSHDUP_X5_X4 + MOVSHDUP_X7_X6 + MOVSHDUP_X9_X8 + + // X_i = { real(x[i]), real(x[i]) } + MOVSLDUP_X3_X3 + MOVSLDUP_X5_X5 + MOVSLDUP_X7_X7 + MOVSLDUP_X9_X9 + + // X_(i-1) = { -imag(x[i]), -imag(x[i]) } + MULPS NEG1, X2 + MULPS P_NEG1, X4 + MULPS NEG1, X6 + MULPS P_NEG1, X8 + + // X_j = { imag(y[i]), real(y[i]) } + MOVSD (Y_PTR), X10 + MOVSD (Y_PTR)(INC_Y*1), X11 + MOVSD (Y_PTR)(INC_Y*2), X12 + MOVSD (Y_PTR)(INCx3_Y*1), X13 + + // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } + MULPS X10, X3 + MULPS X11, X5 + MULPS X12, X7 + MULPS X13, X9 + + // X_j = { real(y[i]), imag(y[i]) } + SHUFPS $0xB1, X10, X10 + SHUFPS $0xB1, X11, X11 + SHUFPS $0xB1, X12, X12 + SHUFPS $0xB1, X13, X13 + + // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } + MULPS X10, X2 + MULPS X11, X4 + MULPS X12, X6 + MULPS X13, X8 + + // X_i = { + // imag(result[i]): imag(y[i]) * real(x[i]) + real(y[i]) * imag(x[i]), + // real(result[i]): real(y[i]) * real(x[i]) - imag(y[i]) * imag(x[i]) } + ADDSUBPS_X2_X3 + ADDSUBPS_X4_X5 + ADDSUBPS_X6_X7 + ADDSUBPS_X8_X9 + + // SUM += X_i + ADDPS X3, SUM + ADDPS X5, P_SUM + ADDPS X7, SUM + ADDPS X9, P_SUM + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[INC_X*4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[INC_Y*4]) + + DECQ LEN + JNZ dotc_loop // } while --LEN > 0 + + ADDPS P_SUM, SUM // SUM = { P_SUM + SUM } + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dotc_end + +dotc_tail: // do { + MOVSD (X_PTR), X3 // X_i = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } + MULPS NEG1, X2 // X_(i-1) = { -imag(x[i]), imag(x[i]) } + MOVUPS (Y_PTR), X10 // X_j = { imag(y[i]), real(y[i]) } + MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } + SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } + MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } + + // X_i = { + // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), + // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } + ADDSUBPS_X2_X3 + ADDPS X3, SUM // SUM += X_i + ADDQ INC_X, X_PTR // X_PTR += INC_X + ADDQ INC_Y, Y_PTR // Y_PTR += INC_Y + DECQ TAIL + JNZ dotc_tail // } while --TAIL > 0 + +dotc_end: + MOVSD SUM, sum+88(FP) // return SUM + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/dotcunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/dotcunitary_amd64.s new file mode 100644 index 0000000000..d53479ca49 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/dotcunitary_amd64.s @@ -0,0 +1,208 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVSLDUP_XPTR_IDX_8__X3 LONG $0x1C120FF3; BYTE $0xC6 // MOVSLDUP (SI)(AX*8), X3 +#define MOVSLDUP_16_XPTR_IDX_8__X5 LONG $0x6C120FF3; WORD $0x10C6 // MOVSLDUP 16(SI)(AX*8), X5 +#define MOVSLDUP_32_XPTR_IDX_8__X7 LONG $0x7C120FF3; WORD $0x20C6 // MOVSLDUP 32(SI)(AX*8), X7 +#define MOVSLDUP_48_XPTR_IDX_8__X9 LONG $0x120F44F3; WORD $0xC64C; BYTE $0x30 // MOVSLDUP 48(SI)(AX*8), X9 + +#define MOVSHDUP_XPTR_IDX_8__X2 LONG $0x14160FF3; BYTE $0xC6 // MOVSHDUP (SI)(AX*8), X2 +#define MOVSHDUP_16_XPTR_IDX_8__X4 LONG $0x64160FF3; WORD $0x10C6 // MOVSHDUP 16(SI)(AX*8), X4 +#define MOVSHDUP_32_XPTR_IDX_8__X6 LONG $0x74160FF3; WORD $0x20C6 // MOVSHDUP 32(SI)(AX*8), X6 +#define MOVSHDUP_48_XPTR_IDX_8__X8 LONG $0x160F44F3; WORD $0xC644; BYTE $0x30 // MOVSHDUP 48(SI)(AX*8), X8 + +#define MOVSHDUP_X3_X2 LONG $0xD3160FF3 // MOVSHDUP X3, X2 +#define MOVSLDUP_X3_X3 LONG $0xDB120FF3 // MOVSLDUP X3, X3 + +#define ADDSUBPS_X2_X3 LONG $0xDAD00FF2 // ADDSUBPS X2, X3 +#define ADDSUBPS_X4_X5 LONG $0xECD00FF2 // ADDSUBPS X4, X5 +#define ADDSUBPS_X6_X7 LONG $0xFED00FF2 // ADDSUBPS X6, X7 +#define ADDSUBPS_X8_X9 LONG $0xD00F45F2; BYTE $0xC8 // ADDSUBPS X8, X9 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define SUM X0 +#define P_SUM X1 +#define IDX AX +#define I_IDX DX +#define NEG1 X15 +#define P_NEG1 X14 + +// func DotcUnitary(x, y []complex64) (sum complex64) +TEXT ·DotcUnitary(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + PXOR SUM, SUM // SUM = 0 + PXOR P_SUM, P_SUM // P_SUM = 0 + MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+32(FP), LEN + CMOVQLE y_len+32(FP), LEN + CMPQ LEN, $0 // if LEN == 0 { return } + JE dotc_end + XORQ IDX, IDX // i = 0 + MOVSS $(-1.0), NEG1 + SHUFPS $0, NEG1, NEG1 // { -1, -1, -1, -1 } + + MOVQ X_PTR, DX + ANDQ $15, DX // DX = &x & 15 + JZ dotc_aligned // if DX == 0 { goto dotc_aligned } + + MOVSD (X_PTR)(IDX*8), X3 // X_i = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } + MOVSD (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) } + MULPS NEG1, X2 // X_(i-1) = { -imag(x[i]), imag(x[i]) } + MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } + SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } + MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } + + // X_i = { + // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), + // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } + ADDSUBPS_X2_X3 + + MOVAPS X3, SUM // SUM = X_i + INCQ IDX // IDX++ + DECQ LEN // LEN-- + JZ dotc_ret // if LEN == 0 { goto dotc_ret } + +dotc_aligned: + MOVQ LEN, TAIL + ANDQ $7, TAIL // TAIL = LEN % 8 + SHRQ $3, LEN // LEN = floor( LEN / 8 ) + JZ dotc_tail // if LEN == 0 { return } + MOVUPS NEG1, P_NEG1 // Copy NEG1 for pipelining + +dotc_loop: // do { + MOVSLDUP_XPTR_IDX_8__X3 // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) } + MOVSLDUP_16_XPTR_IDX_8__X5 + MOVSLDUP_32_XPTR_IDX_8__X7 + MOVSLDUP_48_XPTR_IDX_8__X9 + + MOVSHDUP_XPTR_IDX_8__X2 // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i+1]), imag(x[i+1]) } + MOVSHDUP_16_XPTR_IDX_8__X4 + MOVSHDUP_32_XPTR_IDX_8__X6 + MOVSHDUP_48_XPTR_IDX_8__X8 + + // X_j = { imag(y[i]), real(y[i]), imag(y[i+1]), real(y[i+1]) } + MOVUPS (Y_PTR)(IDX*8), X10 + MOVUPS 16(Y_PTR)(IDX*8), X11 + MOVUPS 32(Y_PTR)(IDX*8), X12 + MOVUPS 48(Y_PTR)(IDX*8), X13 + + // X_(i-1) = { -imag(x[i]), -imag(x[i]), -imag(x[i]+1), -imag(x[i]+1) } + MULPS NEG1, X2 + MULPS P_NEG1, X4 + MULPS NEG1, X6 + MULPS P_NEG1, X8 + + // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]), + // imag(y[i+1]) * real(x[i+1]), real(y[i+1]) * real(x[i+1]) } + MULPS X10, X3 + MULPS X11, X5 + MULPS X12, X7 + MULPS X13, X9 + + // X_j = { real(y[i]), imag(y[i]), real(y[i+1]), imag(y[i+1]) } + SHUFPS $0xB1, X10, X10 + SHUFPS $0xB1, X11, X11 + SHUFPS $0xB1, X12, X12 + SHUFPS $0xB1, X13, X13 + + // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]), + // real(y[i+1]) * imag(x[i+1]), imag(y[i+1]) * imag(x[i+1]) } + MULPS X10, X2 + MULPS X11, X4 + MULPS X12, X6 + MULPS X13, X8 + + // X_i = { + // imag(result[i]): imag(y[i]) * real(x[i]) + real(y[i]) * imag(x[i]), + // real(result[i]): real(y[i]) * real(x[i]) - imag(y[i]) * imag(x[i]), + // imag(result[i+1]): imag(y[i+1]) * real(x[i+1]) + real(y[i+1]) * imag(x[i+1]), + // real(result[i+1]): real(y[i+1]) * real(x[i+1]) - imag(y[i+1]) * imag(x[i+1]), + // } + ADDSUBPS_X2_X3 + ADDSUBPS_X4_X5 + ADDSUBPS_X6_X7 + ADDSUBPS_X8_X9 + + // SUM += X_i + ADDPS X3, SUM + ADDPS X5, P_SUM + ADDPS X7, SUM + ADDPS X9, P_SUM + + ADDQ $8, IDX // IDX += 8 + DECQ LEN + JNZ dotc_loop // } while --LEN > 0 + + ADDPS SUM, P_SUM // P_SUM = { P_SUM[1] + SUM[1], P_SUM[0] + SUM[0] } + XORPS SUM, SUM // SUM = 0 + + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dotc_end + +dotc_tail: + MOVQ TAIL, LEN + SHRQ $1, LEN // LEN = floor( LEN / 2 ) + JZ dotc_tail_one // if LEN == 0 { goto dotc_tail_one } + +dotc_tail_two: // do { + MOVSLDUP_XPTR_IDX_8__X3 // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) } + MOVSHDUP_XPTR_IDX_8__X2 // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) } + MOVUPS (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) } + MULPS NEG1, X2 // X_(i-1) = { -imag(x[i]), imag(x[i]) } + MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } + SHUFPS $0xB1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } + MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } + + // X_i = { + // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), + // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } + ADDSUBPS_X2_X3 + + ADDPS X3, SUM // SUM += X_i + + ADDQ $2, IDX // IDX += 2 + DECQ LEN + JNZ dotc_tail_two // } while --LEN > 0 + + ADDPS SUM, P_SUM // P_SUM = { P_SUM[1] + SUM[1], P_SUM[0] + SUM[0] } + XORPS SUM, SUM // SUM = 0 + + ANDQ $1, TAIL + JZ dotc_end + +dotc_tail_one: + MOVSD (X_PTR)(IDX*8), X3 // X_i = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } + MOVSD (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) } + MULPS NEG1, X2 // X_(i-1) = { -imag(x[i]), imag(x[i]) } + MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } + SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } + MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } + + // X_i = { + // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), + // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } + ADDSUBPS_X2_X3 + + ADDPS X3, SUM // SUM += X_i + +dotc_end: + ADDPS P_SUM, SUM // SUM = { P_SUM[0] + SUM[0] } + MOVHLPS P_SUM, P_SUM // P_SUM = { P_SUM[1], P_SUM[1] } + ADDPS P_SUM, SUM // SUM = { P_SUM[1] + SUM[0] } + +dotc_ret: + MOVSD SUM, sum+48(FP) // return SUM + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/dotuinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/dotuinc_amd64.s new file mode 100644 index 0000000000..bdee59becd --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/dotuinc_amd64.s @@ -0,0 +1,148 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVSHDUP_X3_X2 LONG $0xD3160FF3 // MOVSHDUP X3, X2 +#define MOVSHDUP_X5_X4 LONG $0xE5160FF3 // MOVSHDUP X5, X4 +#define MOVSHDUP_X7_X6 LONG $0xF7160FF3 // MOVSHDUP X7, X6 +#define MOVSHDUP_X9_X8 LONG $0x160F45F3; BYTE $0xC1 // MOVSHDUP X9, X8 + +#define MOVSLDUP_X3_X3 LONG $0xDB120FF3 // MOVSLDUP X3, X3 +#define MOVSLDUP_X5_X5 LONG $0xED120FF3 // MOVSLDUP X5, X5 +#define MOVSLDUP_X7_X7 LONG $0xFF120FF3 // MOVSLDUP X7, X7 +#define MOVSLDUP_X9_X9 LONG $0x120F45F3; BYTE $0xC9 // MOVSLDUP X9, X9 + +#define ADDSUBPS_X2_X3 LONG $0xDAD00FF2 // ADDSUBPS X2, X3 +#define ADDSUBPS_X4_X5 LONG $0xECD00FF2 // ADDSUBPS X4, X5 +#define ADDSUBPS_X6_X7 LONG $0xFED00FF2 // ADDSUBPS X6, X7 +#define ADDSUBPS_X8_X9 LONG $0xD00F45F2; BYTE $0xC8 // ADDSUBPS X8, X9 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define SUM X0 +#define P_SUM X1 +#define INC_X R8 +#define INCx3_X R9 +#define INC_Y R10 +#define INCx3_Y R11 + +// func DotuInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) +TEXT ·DotuInc(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + PXOR SUM, SUM // SUM = 0 + PXOR P_SUM, P_SUM // P_SUM = 0 + MOVQ n+48(FP), LEN // LEN = n + CMPQ LEN, $0 // if LEN == 0 { return } + JE dotu_end + MOVQ ix+72(FP), INC_X + MOVQ iy+80(FP), INC_Y + LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(X_PTR[ix]) + LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(Y_PTR[iy]) + MOVQ incX+56(FP), INC_X // INC_X = incX * sizeof(complex64) + SHLQ $3, INC_X + MOVQ incY+64(FP), INC_Y // INC_Y = incY * sizeof(complex64) + SHLQ $3, INC_Y + + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ dotu_tail // if TAIL == 0 { goto dotu_tail } + + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 + +dotu_loop: // do { + MOVSD (X_PTR), X3 // X_i = { imag(x[i]), real(x[i]) } + MOVSD (X_PTR)(INC_X*1), X5 + MOVSD (X_PTR)(INC_X*2), X7 + MOVSD (X_PTR)(INCx3_X*1), X9 + + // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSHDUP_X3_X2 + MOVSHDUP_X5_X4 + MOVSHDUP_X7_X6 + MOVSHDUP_X9_X8 + + // X_i = { real(x[i]), real(x[i]) } + MOVSLDUP_X3_X3 + MOVSLDUP_X5_X5 + MOVSLDUP_X7_X7 + MOVSLDUP_X9_X9 + + // X_j = { imag(y[i]), real(y[i]) } + MOVSD (Y_PTR), X10 + MOVSD (Y_PTR)(INC_Y*1), X11 + MOVSD (Y_PTR)(INC_Y*2), X12 + MOVSD (Y_PTR)(INCx3_Y*1), X13 + + // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } + MULPS X10, X3 + MULPS X11, X5 + MULPS X12, X7 + MULPS X13, X9 + + // X_j = { real(y[i]), imag(y[i]) } + SHUFPS $0xB1, X10, X10 + SHUFPS $0xB1, X11, X11 + SHUFPS $0xB1, X12, X12 + SHUFPS $0xB1, X13, X13 + + // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } + MULPS X10, X2 + MULPS X11, X4 + MULPS X12, X6 + MULPS X13, X8 + + // X_i = { + // imag(result[i]): imag(y[i]) * real(x[i]) + real(y[i]) * imag(x[i]), + // real(result[i]): real(y[i]) * real(x[i]) - imag(y[i]) * imag(x[i]) } + ADDSUBPS_X2_X3 + ADDSUBPS_X4_X5 + ADDSUBPS_X6_X7 + ADDSUBPS_X8_X9 + + // SUM += X_i + ADDPS X3, SUM + ADDPS X5, P_SUM + ADDPS X7, SUM + ADDPS X9, P_SUM + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[INC_X*4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[INC_Y*4]) + + DECQ LEN + JNZ dotu_loop // } while --LEN > 0 + + ADDPS P_SUM, SUM // SUM = { P_SUM + SUM } + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dotu_end + +dotu_tail: // do { + MOVSD (X_PTR), X3 // X_i = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } + MOVUPS (Y_PTR), X10 // X_j = { imag(y[i]), real(y[i]) } + MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } + SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } + MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } + + // X_i = { + // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), + // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } + ADDSUBPS_X2_X3 + ADDPS X3, SUM // SUM += X_i + ADDQ INC_X, X_PTR // X_PTR += INC_X + ADDQ INC_Y, Y_PTR // Y_PTR += INC_Y + DECQ TAIL + JNZ dotu_tail // } while --TAIL > 0 + +dotu_end: + MOVSD SUM, sum+88(FP) // return SUM + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/dotuunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/c64/dotuunitary_amd64.s new file mode 100644 index 0000000000..dce83a4671 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/dotuunitary_amd64.s @@ -0,0 +1,197 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVSLDUP_XPTR_IDX_8__X3 LONG $0x1C120FF3; BYTE $0xC6 // MOVSLDUP (SI)(AX*8), X3 +#define MOVSLDUP_16_XPTR_IDX_8__X5 LONG $0x6C120FF3; WORD $0x10C6 // MOVSLDUP 16(SI)(AX*8), X5 +#define MOVSLDUP_32_XPTR_IDX_8__X7 LONG $0x7C120FF3; WORD $0x20C6 // MOVSLDUP 32(SI)(AX*8), X7 +#define MOVSLDUP_48_XPTR_IDX_8__X9 LONG $0x120F44F3; WORD $0xC64C; BYTE $0x30 // MOVSLDUP 48(SI)(AX*8), X9 + +#define MOVSHDUP_XPTR_IDX_8__X2 LONG $0x14160FF3; BYTE $0xC6 // MOVSHDUP (SI)(AX*8), X2 +#define MOVSHDUP_16_XPTR_IDX_8__X4 LONG $0x64160FF3; WORD $0x10C6 // MOVSHDUP 16(SI)(AX*8), X4 +#define MOVSHDUP_32_XPTR_IDX_8__X6 LONG $0x74160FF3; WORD $0x20C6 // MOVSHDUP 32(SI)(AX*8), X6 +#define MOVSHDUP_48_XPTR_IDX_8__X8 LONG $0x160F44F3; WORD $0xC644; BYTE $0x30 // MOVSHDUP 48(SI)(AX*8), X8 + +#define MOVSHDUP_X3_X2 LONG $0xD3160FF3 // MOVSHDUP X3, X2 +#define MOVSLDUP_X3_X3 LONG $0xDB120FF3 // MOVSLDUP X3, X3 + +#define ADDSUBPS_X2_X3 LONG $0xDAD00FF2 // ADDSUBPS X2, X3 +#define ADDSUBPS_X4_X5 LONG $0xECD00FF2 // ADDSUBPS X4, X5 +#define ADDSUBPS_X6_X7 LONG $0xFED00FF2 // ADDSUBPS X6, X7 +#define ADDSUBPS_X8_X9 LONG $0xD00F45F2; BYTE $0xC8 // ADDSUBPS X8, X9 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define SUM X0 +#define P_SUM X1 +#define IDX AX +#define I_IDX DX +#define NEG1 X15 +#define P_NEG1 X14 + +// func DotuUnitary(x, y []complex64) (sum complex64) +TEXT ·DotuUnitary(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + PXOR SUM, SUM // SUM = 0 + PXOR P_SUM, P_SUM // P_SUM = 0 + MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+32(FP), LEN + CMOVQLE y_len+32(FP), LEN + CMPQ LEN, $0 // if LEN == 0 { return } + JE dotu_end + XORQ IDX, IDX // IDX = 0 + + MOVQ X_PTR, DX + ANDQ $15, DX // DX = &x & 15 + JZ dotu_aligned // if DX == 0 { goto dotu_aligned } + + MOVSD (X_PTR)(IDX*8), X3 // X_i = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } + MOVSD (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) } + MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } + SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } + MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } + + // X_i = { + // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), + // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } + ADDSUBPS_X2_X3 + + MOVAPS X3, SUM // SUM = X_i + INCQ IDX // IDX++ + DECQ LEN // LEN-- + JZ dotu_end // if LEN == 0 { goto dotu_end } + +dotu_aligned: + MOVQ LEN, TAIL + ANDQ $7, TAIL // TAIL = LEN % 8 + SHRQ $3, LEN // LEN = floor( LEN / 8 ) + JZ dotu_tail // if LEN == 0 { goto dotu_tail } + PXOR P_SUM, P_SUM + +dotu_loop: // do { + MOVSLDUP_XPTR_IDX_8__X3 // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) } + MOVSLDUP_16_XPTR_IDX_8__X5 + MOVSLDUP_32_XPTR_IDX_8__X7 + MOVSLDUP_48_XPTR_IDX_8__X9 + + MOVSHDUP_XPTR_IDX_8__X2 // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) } + MOVSHDUP_16_XPTR_IDX_8__X4 + MOVSHDUP_32_XPTR_IDX_8__X6 + MOVSHDUP_48_XPTR_IDX_8__X8 + + // X_j = { imag(y[i]), real(y[i]), imag(y[i+1]), real(y[i+1]) } + MOVUPS (Y_PTR)(IDX*8), X10 + MOVUPS 16(Y_PTR)(IDX*8), X11 + MOVUPS 32(Y_PTR)(IDX*8), X12 + MOVUPS 48(Y_PTR)(IDX*8), X13 + + // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]), + // imag(y[i+1]) * real(x[i+1]), real(y[i+1]) * real(x[i+1]) } + MULPS X10, X3 + MULPS X11, X5 + MULPS X12, X7 + MULPS X13, X9 + + // X_j = { real(y[i]), imag(y[i]), real(y[i+1]), imag(y[i+1]) } + SHUFPS $0xB1, X10, X10 + SHUFPS $0xB1, X11, X11 + SHUFPS $0xB1, X12, X12 + SHUFPS $0xB1, X13, X13 + + // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]), + // real(y[i+1]) * imag(x[i+1]), imag(y[i+1]) * imag(x[i+1]) } + MULPS X10, X2 + MULPS X11, X4 + MULPS X12, X6 + MULPS X13, X8 + + // X_i = { + // imag(result[i]): imag(y[i]) * real(x[i]) + real(y[i]) * imag(x[i]), + // real(result[i]): real(y[i]) * real(x[i]) - imag(y[i]) * imag(x[i]), + // imag(result[i+1]): imag(y[i+1]) * real(x[i+1]) + real(y[i+1]) * imag(x[i+1]), + // real(result[i+1]): real(y[i+1]) * real(x[i+1]) - imag(y[i+1]) * imag(x[i+1]), + // } + ADDSUBPS_X2_X3 + ADDSUBPS_X4_X5 + ADDSUBPS_X6_X7 + ADDSUBPS_X8_X9 + + // SUM += X_i + ADDPS X3, SUM + ADDPS X5, P_SUM + ADDPS X7, SUM + ADDPS X9, P_SUM + + ADDQ $8, IDX // IDX += 8 + DECQ LEN + JNZ dotu_loop // } while --LEN > 0 + + ADDPS SUM, P_SUM // P_SUM = { P_SUM[1] + SUM[1], P_SUM[0] + SUM[0] } + XORPS SUM, SUM // SUM = 0 + + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dotu_end + +dotu_tail: + MOVQ TAIL, LEN + SHRQ $1, LEN // LEN = floor( LEN / 2 ) + JZ dotu_tail_one // if LEN == 0 { goto dotc_tail_one } + +dotu_tail_two: // do { + MOVSLDUP_XPTR_IDX_8__X3 // X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) } + MOVSHDUP_XPTR_IDX_8__X2 // X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) } + MOVUPS (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) } + MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } + SHUFPS $0xB1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } + MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } + + // X_i = { + // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), + // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } + ADDSUBPS_X2_X3 + + ADDPS X3, SUM // SUM += X_i + + ADDQ $2, IDX // IDX += 2 + DECQ LEN + JNZ dotu_tail_two // } while --LEN > 0 + + ADDPS SUM, P_SUM // P_SUM = { P_SUM[1] + SUM[1], P_SUM[0] + SUM[0] } + XORPS SUM, SUM // SUM = 0 + + ANDQ $1, TAIL + JZ dotu_end + +dotu_tail_one: + MOVSD (X_PTR)(IDX*8), X3 // X_i = { imag(x[i]), real(x[i]) } + MOVSHDUP_X3_X2 // X_(i-1) = { imag(x[i]), imag(x[i]) } + MOVSLDUP_X3_X3 // X_i = { real(x[i]), real(x[i]) } + MOVSD (Y_PTR)(IDX*8), X10 // X_j = { imag(y[i]), real(y[i]) } + MULPS X10, X3 // X_i = { imag(y[i]) * real(x[i]), real(y[i]) * real(x[i]) } + SHUFPS $0x1, X10, X10 // X_j = { real(y[i]), imag(y[i]) } + MULPS X10, X2 // X_(i-1) = { real(y[i]) * imag(x[i]), imag(y[i]) * imag(x[i]) } + + // X_i = { + // imag(result[i]): imag(y[i])*real(x[i]) + real(y[i])*imag(x[i]), + // real(result[i]): real(y[i])*real(x[i]) - imag(y[i])*imag(x[i]) } + ADDSUBPS_X2_X3 + + ADDPS X3, SUM // SUM += X_i + +dotu_end: + ADDPS P_SUM, SUM // SUM = { P_SUM[0] + SUM[0] } + MOVHLPS P_SUM, P_SUM // P_SUM = { P_SUM[1], P_SUM[1] } + ADDPS P_SUM, SUM // SUM = { P_SUM[1] + SUM[0] } + +dotu_ret: + MOVSD SUM, sum+48(FP) // return SUM + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/scal.go b/vendor/gonum.org/v1/gonum/internal/asm/c64/scal.go new file mode 100644 index 0000000000..a84def8761 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/scal.go @@ -0,0 +1,79 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package c64 + +// ScalUnitary is +// for i := range x { +// x[i] *= alpha +// } +func ScalUnitary(alpha complex64, x []complex64) { + for i := range x { + x[i] *= alpha + } +} + +// ScalUnitaryTo is +// for i, v := range x { +// dst[i] = alpha * v +// } +func ScalUnitaryTo(dst []complex64, alpha complex64, x []complex64) { + for i, v := range x { + dst[i] = alpha * v + } +} + +// ScalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } +func ScalInc(alpha complex64, x []complex64, n, incX uintptr) { + var ix uintptr + for i := 0; i < int(n); i++ { + x[ix] *= alpha + ix += incX + } +} + +// ScalIncTo is +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } +func ScalIncTo(dst []complex64, incDst uintptr, alpha complex64, x []complex64, n, incX uintptr) { + var idst, ix uintptr + for i := 0; i < int(n); i++ { + dst[idst] = alpha * x[ix] + ix += incX + idst += incDst + } +} + +// SscalUnitary is +// for i, v := range x { +// x[i] = complex(real(v)*alpha, imag(v)*alpha) +// } +func SscalUnitary(alpha float32, x []complex64) { + for i, v := range x { + x[i] = complex(real(v)*alpha, imag(v)*alpha) + } +} + +// SscalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) +// ix += inc +// } +func SscalInc(alpha float32, x []complex64, n, inc uintptr) { + var ix uintptr + for i := 0; i < int(n); i++ { + x[ix] = complex(real(x[ix])*alpha, imag(x[ix])*alpha) + ix += inc + } +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_amd64.go b/vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_amd64.go new file mode 100644 index 0000000000..3e12d6bcd9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_amd64.go @@ -0,0 +1,68 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +package c64 + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha complex64, x, y []complex64) + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []complex64, alpha complex64, x, y []complex64) + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []complex64, incDst, idst uintptr, alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) + +// DotcUnitary is +// for i, v := range x { +// sum += y[i] * conj(v) +// } +// return sum +func DotcUnitary(x, y []complex64) (sum complex64) + +// DotcInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * conj(x[ix]) +// ix += incX +// iy += incY +// } +// return sum +func DotcInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) + +// DotuUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotuUnitary(x, y []complex64) (sum complex64) + +// DotuInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotuInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) diff --git a/vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_noasm.go new file mode 100644 index 0000000000..411afcb2a0 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/c64/stubs_noasm.go @@ -0,0 +1,113 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package c64 + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha complex64, x, y []complex64) { + for i, v := range x { + y[i] += alpha * v + } +} + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []complex64, alpha complex64, x, y []complex64) { + for i, v := range x { + dst[i] = alpha*v + y[i] + } +} + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + y[iy] += alpha * x[ix] + ix += incX + iy += incY + } +} + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []complex64, incDst, idst uintptr, alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + dst[idst] = alpha*x[ix] + y[iy] + ix += incX + iy += incY + idst += incDst + } +} + +// DotcUnitary is +// for i, v := range x { +// sum += y[i] * conj(v) +// } +// return sum +func DotcUnitary(x, y []complex64) (sum complex64) { + for i, v := range x { + sum += y[i] * conj(v) + } + return sum +} + +// DotcInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * conj(x[ix]) +// ix += incX +// iy += incY +// } +// return sum +func DotcInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) { + for i := 0; i < int(n); i++ { + sum += y[iy] * conj(x[ix]) + ix += incX + iy += incY + } + return sum +} + +// DotuUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotuUnitary(x, y []complex64) (sum complex64) { + for i, v := range x { + sum += y[i] * v + } + return sum +} + +// DotuInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotuInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) { + for i := 0; i < int(n); i++ { + sum += y[iy] * x[ix] + ix += incX + iy += incY + } + return sum +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyinc_amd64.s new file mode 100644 index 0000000000..2d167c08f3 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyinc_amd64.s @@ -0,0 +1,73 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// func AxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyInc(SB), NOSPLIT, $0 + MOVQ n+56(FP), CX // CX = n + CMPQ CX, $0 // if n==0 { return } + JLE axpyi_end + MOVQ x_base+8(FP), SI // SI = &x + MOVQ y_base+32(FP), DI // DI = &y + MOVQ ix+80(FP), R8 // R8 = ix + MOVQ iy+88(FP), R9 // R9 = iy + LEAQ (SI)(R8*4), SI // SI = &(x[ix]) + LEAQ (DI)(R9*4), DI // DI = &(y[iy]) + MOVQ DI, DX // DX = DI Read Pointer for y + MOVQ incX+64(FP), R8 // R8 = incX + SHLQ $2, R8 // R8 *= sizeof(float32) + MOVQ incY+72(FP), R9 // R9 = incY + SHLQ $2, R9 // R9 *= sizeof(float32) + MOVSS alpha+0(FP), X0 // X0 = alpha + MOVSS X0, X1 // X1 = X0 // for pipelining + MOVQ CX, BX + ANDQ $3, BX // BX = n % 4 + SHRQ $2, CX // CX = floor( n / 4 ) + JZ axpyi_tail_start // if CX == 0 { goto axpyi_tail_start } + +axpyi_loop: // Loop unrolled 4x do { + MOVSS (SI), X2 // X_i = x[i] + MOVSS (SI)(R8*1), X3 + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + MOVSS (SI), X4 + MOVSS (SI)(R8*1), X5 + MULSS X1, X2 // X_i *= a + MULSS X0, X3 + MULSS X1, X4 + MULSS X0, X5 + ADDSS (DX), X2 // X_i += y[i] + ADDSS (DX)(R9*1), X3 + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + ADDSS (DX), X4 + ADDSS (DX)(R9*1), X5 + MOVSS X2, (DI) // y[i] = X_i + MOVSS X3, (DI)(R9*1) + LEAQ (DI)(R9*2), DI // DI = &(DI[incY*2]) + MOVSS X4, (DI) + MOVSS X5, (DI)(R9*1) + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) // Increment addresses + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + LEAQ (DI)(R9*2), DI // DI = &(DI[incY*2]) + LOOP axpyi_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE axpyi_end + +axpyi_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +axpyi_tail: // do { + MOVSS (SI), X2 // X2 = x[i] + MULSS X1, X2 // X2 *= a + ADDSS (DI), X2 // X2 += y[i] + MOVSS X2, (DI) // y[i] = X2 + ADDQ R8, SI // SI = &(SI[incX]) + ADDQ R9, DI // DI = &(DI[incY]) + LOOP axpyi_tail // } while --CX > 0 + +axpyi_end: + RET + diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyincto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyincto_amd64.s new file mode 100644 index 0000000000..b79f9926c9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyincto_amd64.s @@ -0,0 +1,78 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// func AxpyIncTo(dst []float32, incDst, idst uintptr, alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyIncTo(SB), NOSPLIT, $0 + MOVQ n+96(FP), CX // CX = n + CMPQ CX, $0 // if n==0 { return } + JLE axpyi_end + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ x_base+48(FP), SI // SI = &x + MOVQ y_base+72(FP), DX // DX = &y + MOVQ ix+120(FP), R8 // R8 = ix // Load the first index + MOVQ iy+128(FP), R9 // R9 = iy + MOVQ idst+32(FP), R10 // R10 = idst + LEAQ (SI)(R8*4), SI // SI = &(x[ix]) + LEAQ (DX)(R9*4), DX // DX = &(y[iy]) + LEAQ (DI)(R10*4), DI // DI = &(dst[idst]) + MOVQ incX+104(FP), R8 // R8 = incX + SHLQ $2, R8 // R8 *= sizeof(float32) + MOVQ incY+112(FP), R9 // R9 = incY + SHLQ $2, R9 // R9 *= sizeof(float32) + MOVQ incDst+24(FP), R10 // R10 = incDst + SHLQ $2, R10 // R10 *= sizeof(float32) + MOVSS alpha+40(FP), X0 // X0 = alpha + MOVSS X0, X1 // X1 = X0 // for pipelining + MOVQ CX, BX + ANDQ $3, BX // BX = n % 4 + SHRQ $2, CX // CX = floor( n / 4 ) + JZ axpyi_tail_start // if CX == 0 { goto axpyi_tail_start } + +axpyi_loop: // Loop unrolled 4x do { + MOVSS (SI), X2 // X_i = x[i] + MOVSS (SI)(R8*1), X3 + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) + MOVSS (SI), X4 + MOVSS (SI)(R8*1), X5 + MULSS X1, X2 // X_i *= a + MULSS X0, X3 + MULSS X1, X4 + MULSS X0, X5 + ADDSS (DX), X2 // X_i += y[i] + ADDSS (DX)(R9*1), X3 + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + ADDSS (DX), X4 + ADDSS (DX)(R9*1), X5 + MOVSS X2, (DI) // dst[i] = X_i + MOVSS X3, (DI)(R10*1) + LEAQ (DI)(R10*2), DI // DI = &(DI[incDst*2]) + MOVSS X4, (DI) + MOVSS X5, (DI)(R10*1) + LEAQ (SI)(R8*2), SI // SI = &(SI[incX*2]) // Increment addresses + LEAQ (DX)(R9*2), DX // DX = &(DX[incY*2]) + LEAQ (DI)(R10*2), DI // DI = &(DI[incDst*2]) + LOOP axpyi_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE axpyi_end + +axpyi_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +axpyi_tail: // do { + MOVSS (SI), X2 // X2 = x[i] + MULSS X1, X2 // X2 *= a + ADDSS (DX), X2 // X2 += y[i] + MOVSS X2, (DI) // dst[i] = X2 + ADDQ R8, SI // SI = &(SI[incX]) + ADDQ R9, DX // DX = &(DX[incY]) + ADDQ R10, DI // DI = &(DI[incY]) + LOOP axpyi_tail // } while --CX > 0 + +axpyi_end: + RET + diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitary_amd64.s new file mode 100644 index 0000000000..97df90a07f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitary_amd64.s @@ -0,0 +1,97 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// func AxpyUnitary(alpha float32, x, y []float32) +TEXT ·AxpyUnitary(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), SI // SI = &x + MOVQ y_base+32(FP), DI // DI = &y + MOVQ x_len+16(FP), BX // BX = min( len(x), len(y) ) + CMPQ y_len+40(FP), BX + CMOVQLE y_len+40(FP), BX + CMPQ BX, $0 // if BX == 0 { return } + JE axpy_end + MOVSS alpha+0(FP), X0 + SHUFPS $0, X0, X0 // X0 = { a, a, a, a } + XORQ AX, AX // i = 0 + PXOR X2, X2 // 2 NOP instructions (PXOR) to align + PXOR X3, X3 // loop to cache line + MOVQ DI, CX + ANDQ $0xF, CX // Align on 16-byte boundary for ADDPS + JZ axpy_no_trim // if CX == 0 { goto axpy_no_trim } + + XORQ $0xF, CX // CX = 4 - floor( BX % 16 / 4 ) + INCQ CX + SHRQ $2, CX + +axpy_align: // Trim first value(s) in unaligned buffer do { + MOVSS (SI)(AX*4), X2 // X2 = x[i] + MULSS X0, X2 // X2 *= a + ADDSS (DI)(AX*4), X2 // X2 += y[i] + MOVSS X2, (DI)(AX*4) // y[i] = X2 + INCQ AX // i++ + DECQ BX + JZ axpy_end // if --BX == 0 { return } + LOOP axpy_align // } while --CX > 0 + +axpy_no_trim: + MOVUPS X0, X1 // Copy X0 to X1 for pipelining + MOVQ BX, CX + ANDQ $0xF, BX // BX = len % 16 + SHRQ $4, CX // CX = int( len / 16 ) + JZ axpy_tail4_start // if CX == 0 { return } + +axpy_loop: // Loop unrolled 16x do { + MOVUPS (SI)(AX*4), X2 // X2 = x[i:i+4] + MOVUPS 16(SI)(AX*4), X3 + MOVUPS 32(SI)(AX*4), X4 + MOVUPS 48(SI)(AX*4), X5 + MULPS X0, X2 // X2 *= a + MULPS X1, X3 + MULPS X0, X4 + MULPS X1, X5 + ADDPS (DI)(AX*4), X2 // X2 += y[i:i+4] + ADDPS 16(DI)(AX*4), X3 + ADDPS 32(DI)(AX*4), X4 + ADDPS 48(DI)(AX*4), X5 + MOVUPS X2, (DI)(AX*4) // dst[i:i+4] = X2 + MOVUPS X3, 16(DI)(AX*4) + MOVUPS X4, 32(DI)(AX*4) + MOVUPS X5, 48(DI)(AX*4) + ADDQ $16, AX // i += 16 + LOOP axpy_loop // while (--CX) > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE axpy_end + +axpy_tail4_start: // Reset loop counter for 4-wide tail loop + MOVQ BX, CX // CX = floor( BX / 4 ) + SHRQ $2, CX + JZ axpy_tail_start // if CX == 0 { goto axpy_tail_start } + +axpy_tail4: // Loop unrolled 4x do { + MOVUPS (SI)(AX*4), X2 // X2 = x[i] + MULPS X0, X2 // X2 *= a + ADDPS (DI)(AX*4), X2 // X2 += y[i] + MOVUPS X2, (DI)(AX*4) // y[i] = X2 + ADDQ $4, AX // i += 4 + LOOP axpy_tail4 // } while --CX > 0 + +axpy_tail_start: // Reset loop counter for 1-wide tail loop + MOVQ BX, CX // CX = BX % 4 + ANDQ $3, CX + JZ axpy_end // if CX == 0 { return } + +axpy_tail: + MOVSS (SI)(AX*4), X1 // X1 = x[i] + MULSS X0, X1 // X1 *= a + ADDSS (DI)(AX*4), X1 // X1 += y[i] + MOVSS X1, (DI)(AX*4) // y[i] = X1 + INCQ AX // i++ + LOOP axpy_tail // } while --CX > 0 + +axpy_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitaryto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitaryto_amd64.s new file mode 100644 index 0000000000..a826ca3125 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/axpyunitaryto_amd64.s @@ -0,0 +1,98 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// func AxpyUnitaryTo(dst []float32, alpha float32, x, y []float32) +TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ x_base+32(FP), SI // SI = &x + MOVQ y_base+56(FP), DX // DX = &y + MOVQ x_len+40(FP), BX // BX = min( len(x), len(y), len(dst) ) + CMPQ y_len+64(FP), BX + CMOVQLE y_len+64(FP), BX + CMPQ dst_len+8(FP), BX + CMOVQLE dst_len+8(FP), BX + CMPQ BX, $0 // if BX == 0 { return } + JE axpy_end + MOVSS alpha+24(FP), X0 + SHUFPS $0, X0, X0 // X0 = { a, a, a, a, } + XORQ AX, AX // i = 0 + MOVQ DX, CX + ANDQ $0xF, CX // Align on 16-byte boundary for ADDPS + JZ axpy_no_trim // if CX == 0 { goto axpy_no_trim } + + XORQ $0xF, CX // CX = 4 - floor ( B % 16 / 4 ) + INCQ CX + SHRQ $2, CX + +axpy_align: // Trim first value(s) in unaligned buffer do { + MOVSS (SI)(AX*4), X2 // X2 = x[i] + MULSS X0, X2 // X2 *= a + ADDSS (DX)(AX*4), X2 // X2 += y[i] + MOVSS X2, (DI)(AX*4) // y[i] = X2 + INCQ AX // i++ + DECQ BX + JZ axpy_end // if --BX == 0 { return } + LOOP axpy_align // } while --CX > 0 + +axpy_no_trim: + MOVUPS X0, X1 // Copy X0 to X1 for pipelining + MOVQ BX, CX + ANDQ $0xF, BX // BX = len % 16 + SHRQ $4, CX // CX = floor( len / 16 ) + JZ axpy_tail4_start // if CX == 0 { return } + +axpy_loop: // Loop unrolled 16x do { + MOVUPS (SI)(AX*4), X2 // X2 = x[i:i+4] + MOVUPS 16(SI)(AX*4), X3 + MOVUPS 32(SI)(AX*4), X4 + MOVUPS 48(SI)(AX*4), X5 + MULPS X0, X2 // X2 *= a + MULPS X1, X3 + MULPS X0, X4 + MULPS X1, X5 + ADDPS (DX)(AX*4), X2 // X2 += y[i:i+4] + ADDPS 16(DX)(AX*4), X3 + ADDPS 32(DX)(AX*4), X4 + ADDPS 48(DX)(AX*4), X5 + MOVUPS X2, (DI)(AX*4) // dst[i:i+4] = X2 + MOVUPS X3, 16(DI)(AX*4) + MOVUPS X4, 32(DI)(AX*4) + MOVUPS X5, 48(DI)(AX*4) + ADDQ $16, AX // i += 16 + LOOP axpy_loop // while (--CX) > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE axpy_end + +axpy_tail4_start: // Reset loop counter for 4-wide tail loop + MOVQ BX, CX // CX = floor( BX / 4 ) + SHRQ $2, CX + JZ axpy_tail_start // if CX == 0 { goto axpy_tail_start } + +axpy_tail4: // Loop unrolled 4x do { + MOVUPS (SI)(AX*4), X2 // X2 = x[i] + MULPS X0, X2 // X2 *= a + ADDPS (DX)(AX*4), X2 // X2 += y[i] + MOVUPS X2, (DI)(AX*4) // y[i] = X2 + ADDQ $4, AX // i += 4 + LOOP axpy_tail4 // } while --CX > 0 + +axpy_tail_start: // Reset loop counter for 1-wide tail loop + MOVQ BX, CX // CX = BX % 4 + ANDQ $3, CX + JZ axpy_end // if CX == 0 { return } + +axpy_tail: + MOVSS (SI)(AX*4), X1 // X1 = x[i] + MULSS X0, X1 // X1 *= a + ADDSS (DX)(AX*4), X1 // X1 += y[i] + MOVSS X1, (DI)(AX*4) // y[i] = X1 + INCQ AX // i++ + LOOP axpy_tail // } while --CX > 0 + +axpy_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotinc_amd64.s new file mode 100644 index 0000000000..4518e04952 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotinc_amd64.s @@ -0,0 +1,91 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R10 +#define INC_Y R9 +#define INCx3_Y R11 +#define SUM X0 +#define P_SUM X1 + +// func DdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64) +TEXT ·DdotInc(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + MOVQ n+48(FP), LEN // LEN = n + PXOR SUM, SUM // SUM = 0 + CMPQ LEN, $0 + JE dot_end + + MOVQ ix+72(FP), INC_X // INC_X = ix + MOVQ iy+80(FP), INC_Y // INC_Y = iy + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(x[ix]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(y[iy]) + + MOVQ incX+56(FP), INC_X // INC_X = incX * sizeof(float32) + SHLQ $2, INC_X + MOVQ incY+64(FP), INC_Y // INC_Y = incY * sizeof(float32) + SHLQ $2, INC_Y + + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ dot_tail // if LEN == 0 { goto dot_tail } + + PXOR P_SUM, P_SUM // P_SUM = 0 for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 + +dot_loop: // Loop unrolled 4x do { + CVTSS2SD (X_PTR), X2 // X_i = x[i:i+1] + CVTSS2SD (X_PTR)(INC_X*1), X3 + CVTSS2SD (X_PTR)(INC_X*2), X4 + CVTSS2SD (X_PTR)(INCx3_X*1), X5 + + CVTSS2SD (Y_PTR), X6 // X_j = y[i:i+1] + CVTSS2SD (Y_PTR)(INC_Y*1), X7 + CVTSS2SD (Y_PTR)(INC_Y*2), X8 + CVTSS2SD (Y_PTR)(INCx3_Y*1), X9 + + MULSD X6, X2 // X_i *= X_j + MULSD X7, X3 + MULSD X8, X4 + MULSD X9, X5 + + ADDSD X2, SUM // SUM += X_i + ADDSD X3, P_SUM + ADDSD X4, SUM + ADDSD X5, P_SUM + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[INC_X * 4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[INC_Y * 4]) + + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + + ADDSD P_SUM, SUM // SUM += P_SUM + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail: // do { + CVTSS2SD (X_PTR), X2 // X2 = x[i] + CVTSS2SD (Y_PTR), X3 // X2 *= y[i] + MULSD X3, X2 + ADDSD X2, SUM // SUM += X2 + ADDQ INC_X, X_PTR // X_PTR += INC_X + ADDQ INC_Y, Y_PTR // Y_PTR += INC_Y + DECQ TAIL + JNZ dot_tail // } while --TAIL > 0 + +dot_end: + MOVSD SUM, sum+88(FP) // return SUM + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotunitary_amd64.s new file mode 100644 index 0000000000..231cbd3bfb --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/ddotunitary_amd64.s @@ -0,0 +1,110 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define HADDPD_SUM_SUM LONG $0xC07C0F66 // @ HADDPD X0, X0 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define IDX AX +#define SUM X0 +#define P_SUM X1 + +// func DdotUnitary(x, y []float32) (sum float32) +TEXT ·DdotUnitary(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+32(FP), LEN + CMOVQLE y_len+32(FP), LEN + PXOR SUM, SUM // psum = 0 + CMPQ LEN, $0 + JE dot_end + + XORQ IDX, IDX + MOVQ Y_PTR, DX + ANDQ $0xF, DX // Align on 16-byte boundary for ADDPS + JZ dot_no_trim // if DX == 0 { goto dot_no_trim } + + SUBQ $16, DX + +dot_align: // Trim first value(s) in unaligned buffer do { + CVTSS2SD (X_PTR)(IDX*4), X2 // X2 = float64(x[i]) + CVTSS2SD (Y_PTR)(IDX*4), X3 // X3 = float64(y[i]) + MULSD X3, X2 + ADDSD X2, SUM // SUM += X2 + INCQ IDX // IDX++ + DECQ LEN + JZ dot_end // if --TAIL == 0 { return } + ADDQ $4, DX + JNZ dot_align // } while --LEN > 0 + +dot_no_trim: + PXOR P_SUM, P_SUM // P_SUM = 0 for pipelining + MOVQ LEN, TAIL + ANDQ $0x7, TAIL // TAIL = LEN % 8 + SHRQ $3, LEN // LEN = floor( LEN / 8 ) + JZ dot_tail_start // if LEN == 0 { goto dot_tail_start } + +dot_loop: // Loop unrolled 8x do { + CVTPS2PD (X_PTR)(IDX*4), X2 // X_i = x[i:i+1] + CVTPS2PD 8(X_PTR)(IDX*4), X3 + CVTPS2PD 16(X_PTR)(IDX*4), X4 + CVTPS2PD 24(X_PTR)(IDX*4), X5 + + CVTPS2PD (Y_PTR)(IDX*4), X6 // X_j = y[i:i+1] + CVTPS2PD 8(Y_PTR)(IDX*4), X7 + CVTPS2PD 16(Y_PTR)(IDX*4), X8 + CVTPS2PD 24(Y_PTR)(IDX*4), X9 + + MULPD X6, X2 // X_i *= X_j + MULPD X7, X3 + MULPD X8, X4 + MULPD X9, X5 + + ADDPD X2, SUM // SUM += X_i + ADDPD X3, P_SUM + ADDPD X4, SUM + ADDPD X5, P_SUM + + ADDQ $8, IDX // IDX += 8 + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + + ADDPD P_SUM, SUM // SUM += P_SUM + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail_start: + MOVQ TAIL, LEN + SHRQ $1, LEN + JZ dot_tail_one + +dot_tail_two: + CVTPS2PD (X_PTR)(IDX*4), X2 // X_i = x[i:i+1] + CVTPS2PD (Y_PTR)(IDX*4), X6 // X_j = y[i:i+1] + MULPD X6, X2 // X_i *= X_j + ADDPD X2, SUM // SUM += X_i + ADDQ $2, IDX // IDX += 2 + DECQ LEN + JNZ dot_tail_two // } while --LEN > 0 + + ANDQ $1, TAIL + JZ dot_end + +dot_tail_one: + CVTSS2SD (X_PTR)(IDX*4), X2 // X2 = float64(x[i]) + CVTSS2SD (Y_PTR)(IDX*4), X3 // X3 = float64(y[i]) + MULSD X3, X2 // X2 *= X3 + ADDSD X2, SUM // SUM += X2 + +dot_end: + HADDPD_SUM_SUM // SUM = \sum{ SUM[i] } + MOVSD SUM, sum+48(FP) // return SUM + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/doc.go b/vendor/gonum.org/v1/gonum/internal/asm/f32/doc.go new file mode 100644 index 0000000000..408847a698 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package f32 provides float32 vector primitives. +package f32 // import "gonum.org/v1/gonum/internal/asm/f32" diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/dotinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/dotinc_amd64.s new file mode 100644 index 0000000000..4d36b289c6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/dotinc_amd64.s @@ -0,0 +1,85 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R10 +#define INC_Y R9 +#define INCx3_Y R11 +#define SUM X0 +#define P_SUM X1 + +// func DotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32) +TEXT ·DotInc(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + PXOR SUM, SUM // SUM = 0 + MOVQ n+48(FP), LEN // LEN = n + CMPQ LEN, $0 + JE dot_end + + MOVQ ix+72(FP), INC_X // INC_X = ix + MOVQ iy+80(FP), INC_Y // INC_Y = iy + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(x[ix]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(y[iy]) + + MOVQ incX+56(FP), INC_X // INC_X := incX * sizeof(float32) + SHLQ $2, INC_X + MOVQ incY+64(FP), INC_Y // INC_Y := incY * sizeof(float32) + SHLQ $2, INC_Y + + MOVQ LEN, TAIL + ANDQ $0x3, TAIL // TAIL = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ dot_tail // if LEN == 0 { goto dot_tail } + + PXOR P_SUM, P_SUM // P_SUM = 0 for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 + +dot_loop: // Loop unrolled 4x do { + MOVSS (X_PTR), X2 // X_i = x[i:i+1] + MOVSS (X_PTR)(INC_X*1), X3 + MOVSS (X_PTR)(INC_X*2), X4 + MOVSS (X_PTR)(INCx3_X*1), X5 + + MULSS (Y_PTR), X2 // X_i *= y[i:i+1] + MULSS (Y_PTR)(INC_Y*1), X3 + MULSS (Y_PTR)(INC_Y*2), X4 + MULSS (Y_PTR)(INCx3_Y*1), X5 + + ADDSS X2, SUM // SUM += X_i + ADDSS X3, P_SUM + ADDSS X4, SUM + ADDSS X5, P_SUM + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[INC_X * 4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[INC_Y * 4]) + + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + + ADDSS P_SUM, SUM // P_SUM += SUM + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail: // do { + MOVSS (X_PTR), X2 // X2 = x[i] + MULSS (Y_PTR), X2 // X2 *= y[i] + ADDSS X2, SUM // SUM += X2 + ADDQ INC_X, X_PTR // X_PTR += INC_X + ADDQ INC_Y, Y_PTR // Y_PTR += INC_Y + DECQ TAIL + JNZ dot_tail // } while --TAIL > 0 + +dot_end: + MOVSS SUM, sum+88(FP) // return SUM + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/dotunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/dotunitary_amd64.s new file mode 100644 index 0000000000..c32ede5a93 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/dotunitary_amd64.s @@ -0,0 +1,106 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define HADDPS_SUM_SUM LONG $0xC07C0FF2 // @ HADDPS X0, X0 + +#define X_PTR SI +#define Y_PTR DI +#define LEN CX +#define TAIL BX +#define IDX AX +#define SUM X0 +#define P_SUM X1 + +// func DotUnitary(x, y []float32) (sum float32) +TEXT ·DotUnitary(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ y_base+24(FP), Y_PTR // Y_PTR = &y + PXOR SUM, SUM // SUM = 0 + MOVQ x_len+8(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+32(FP), LEN + CMOVQLE y_len+32(FP), LEN + CMPQ LEN, $0 + JE dot_end + + XORQ IDX, IDX + MOVQ Y_PTR, DX + ANDQ $0xF, DX // Align on 16-byte boundary for MULPS + JZ dot_no_trim // if DX == 0 { goto dot_no_trim } + SUBQ $16, DX + +dot_align: // Trim first value(s) in unaligned buffer do { + MOVSS (X_PTR)(IDX*4), X2 // X2 = x[i] + MULSS (Y_PTR)(IDX*4), X2 // X2 *= y[i] + ADDSS X2, SUM // SUM += X2 + INCQ IDX // IDX++ + DECQ LEN + JZ dot_end // if --TAIL == 0 { return } + ADDQ $4, DX + JNZ dot_align // } while --DX > 0 + +dot_no_trim: + PXOR P_SUM, P_SUM // P_SUM = 0 for pipelining + MOVQ LEN, TAIL + ANDQ $0xF, TAIL // TAIL = LEN % 16 + SHRQ $4, LEN // LEN = floor( LEN / 16 ) + JZ dot_tail4_start // if LEN == 0 { goto dot_tail4_start } + +dot_loop: // Loop unrolled 16x do { + MOVUPS (X_PTR)(IDX*4), X2 // X_i = x[i:i+1] + MOVUPS 16(X_PTR)(IDX*4), X3 + MOVUPS 32(X_PTR)(IDX*4), X4 + MOVUPS 48(X_PTR)(IDX*4), X5 + + MULPS (Y_PTR)(IDX*4), X2 // X_i *= y[i:i+1] + MULPS 16(Y_PTR)(IDX*4), X3 + MULPS 32(Y_PTR)(IDX*4), X4 + MULPS 48(Y_PTR)(IDX*4), X5 + + ADDPS X2, SUM // SUM += X_i + ADDPS X3, P_SUM + ADDPS X4, SUM + ADDPS X5, P_SUM + + ADDQ $16, IDX // IDX += 16 + DECQ LEN + JNZ dot_loop // } while --LEN > 0 + + ADDPS P_SUM, SUM // SUM += P_SUM + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE dot_end + +dot_tail4_start: // Reset loop counter for 4-wide tail loop + MOVQ TAIL, LEN // LEN = floor( TAIL / 4 ) + SHRQ $2, LEN + JZ dot_tail_start // if LEN == 0 { goto dot_tail_start } + +dot_tail4_loop: // Loop unrolled 4x do { + MOVUPS (X_PTR)(IDX*4), X2 // X_i = x[i:i+1] + MULPS (Y_PTR)(IDX*4), X2 // X_i *= y[i:i+1] + ADDPS X2, SUM // SUM += X_i + ADDQ $4, IDX // i += 4 + DECQ LEN + JNZ dot_tail4_loop // } while --LEN > 0 + +dot_tail_start: // Reset loop counter for 1-wide tail loop + ANDQ $3, TAIL // TAIL = TAIL % 4 + JZ dot_end // if TAIL == 0 { return } + +dot_tail: // do { + MOVSS (X_PTR)(IDX*4), X2 // X2 = x[i] + MULSS (Y_PTR)(IDX*4), X2 // X2 *= y[i] + ADDSS X2, SUM // psum += X2 + INCQ IDX // IDX++ + DECQ TAIL + JNZ dot_tail // } while --TAIL > 0 + +dot_end: + HADDPS_SUM_SUM // SUM = \sum{ SUM[i] } + HADDPS_SUM_SUM + MOVSS SUM, sum+48(FP) // return SUM + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.go b/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.go new file mode 100644 index 0000000000..2b336a2af9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.go @@ -0,0 +1,15 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +package f32 + +// Ger performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Ger(m, n uintptr, alpha float32, + x []float32, incX uintptr, + y []float32, incY uintptr, + a []float32, lda uintptr) diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.s new file mode 100644 index 0000000000..e5e80c52c6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_amd64.s @@ -0,0 +1,757 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SIZE 4 +#define BITSIZE 2 +#define KERNELSIZE 3 + +#define M_DIM m+0(FP) +#define M CX +#define N_DIM n+8(FP) +#define N BX + +#define TMP1 R14 +#define TMP2 R15 + +#define X_PTR SI +#define Y y_base+56(FP) +#define Y_PTR DX +#define A_ROW AX +#define A_PTR DI + +#define INC_X R8 +#define INC3_X R9 + +#define INC_Y R10 +#define INC3_Y R11 + +#define LDA R12 +#define LDA3 R13 + +#define ALPHA X0 +#define ALPHA_SPILL al-16(SP) + +#define LOAD_ALPHA \ + MOVSS alpha+16(FP), ALPHA \ + SHUFPS $0, ALPHA, ALPHA + +#define LOAD_SCALED4 \ + PREFETCHNTA 16*SIZE(X_PTR) \ + MOVDDUP (X_PTR), X1 \ + MOVDDUP 2*SIZE(X_PTR), X3 \ + MOVSHDUP X1, X2 \ + MOVSHDUP X3, X4 \ + MOVSLDUP X1, X1 \ + MOVSLDUP X3, X3 \ + MULPS ALPHA, X1 \ + MULPS ALPHA, X2 \ + MULPS ALPHA, X3 \ + MULPS ALPHA, X4 + +#define LOAD_SCALED2 \ + MOVDDUP (X_PTR), X1 \ + MOVSHDUP X1, X2 \ + MOVSLDUP X1, X1 \ + MULPS ALPHA, X1 \ + MULPS ALPHA, X2 + +#define LOAD_SCALED1 \ + MOVSS (X_PTR), X1 \ + SHUFPS $0, X1, X1 \ + MULPS ALPHA, X1 + +#define LOAD_SCALED4_INC \ + PREFETCHNTA (X_PTR)(INC_X*8) \ + MOVSS (X_PTR), X1 \ + MOVSS (X_PTR)(INC_X*1), X2 \ + MOVSS (X_PTR)(INC_X*2), X3 \ + MOVSS (X_PTR)(INC3_X*1), X4 \ + SHUFPS $0, X1, X1 \ + SHUFPS $0, X2, X2 \ + SHUFPS $0, X3, X3 \ + SHUFPS $0, X4, X4 \ + MULPS ALPHA, X1 \ + MULPS ALPHA, X2 \ + MULPS ALPHA, X3 \ + MULPS ALPHA, X4 + +#define LOAD_SCALED2_INC \ + MOVSS (X_PTR), X1 \ + MOVSS (X_PTR)(INC_X*1), X2 \ + SHUFPS $0, X1, X1 \ + SHUFPS $0, X2, X2 \ + MULPS ALPHA, X1 \ + MULPS ALPHA, X2 + +#define KERNEL_LOAD8 \ + MOVUPS (Y_PTR), X5 \ + MOVUPS 4*SIZE(Y_PTR), X6 + +#define KERNEL_LOAD8_INC \ + MOVSS (Y_PTR), X5 \ + MOVSS (Y_PTR)(INC_Y*1), X6 \ + MOVSS (Y_PTR)(INC_Y*2), X7 \ + MOVSS (Y_PTR)(INC3_Y*1), X8 \ + UNPCKLPS X6, X5 \ + UNPCKLPS X8, X7 \ + MOVLHPS X7, X5 \ + LEAQ (Y_PTR)(INC_Y*4), Y_PTR \ + MOVSS (Y_PTR), X6 \ + MOVSS (Y_PTR)(INC_Y*1), X7 \ + MOVSS (Y_PTR)(INC_Y*2), X8 \ + MOVSS (Y_PTR)(INC3_Y*1), X9 \ + UNPCKLPS X7, X6 \ + UNPCKLPS X9, X8 \ + MOVLHPS X8, X6 + +#define KERNEL_LOAD4 \ + MOVUPS (Y_PTR), X5 + +#define KERNEL_LOAD4_INC \ + MOVSS (Y_PTR), X5 \ + MOVSS (Y_PTR)(INC_Y*1), X6 \ + MOVSS (Y_PTR)(INC_Y*2), X7 \ + MOVSS (Y_PTR)(INC3_Y*1), X8 \ + UNPCKLPS X6, X5 \ + UNPCKLPS X8, X7 \ + MOVLHPS X7, X5 + +#define KERNEL_LOAD2 \ + MOVSD (Y_PTR), X5 + +#define KERNEL_LOAD2_INC \ + MOVSS (Y_PTR), X5 \ + MOVSS (Y_PTR)(INC_Y*1), X6 \ + UNPCKLPS X6, X5 + +#define KERNEL_4x8 \ + MOVUPS X5, X7 \ + MOVUPS X6, X8 \ + MOVUPS X5, X9 \ + MOVUPS X6, X10 \ + MOVUPS X5, X11 \ + MOVUPS X6, X12 \ + MULPS X1, X5 \ + MULPS X1, X6 \ + MULPS X2, X7 \ + MULPS X2, X8 \ + MULPS X3, X9 \ + MULPS X3, X10 \ + MULPS X4, X11 \ + MULPS X4, X12 + +#define STORE_4x8 \ + MOVUPS ALPHA, ALPHA_SPILL \ + MOVUPS (A_PTR), X13 \ + ADDPS X13, X5 \ + MOVUPS 4*SIZE(A_PTR), X14 \ + ADDPS X14, X6 \ + MOVUPS (A_PTR)(LDA*1), X15 \ + ADDPS X15, X7 \ + MOVUPS 4*SIZE(A_PTR)(LDA*1), X0 \ + ADDPS X0, X8 \ + MOVUPS (A_PTR)(LDA*2), X13 \ + ADDPS X13, X9 \ + MOVUPS 4*SIZE(A_PTR)(LDA*2), X14 \ + ADDPS X14, X10 \ + MOVUPS (A_PTR)(LDA3*1), X15 \ + ADDPS X15, X11 \ + MOVUPS 4*SIZE(A_PTR)(LDA3*1), X0 \ + ADDPS X0, X12 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 4*SIZE(A_PTR) \ + MOVUPS X7, (A_PTR)(LDA*1) \ + MOVUPS X8, 4*SIZE(A_PTR)(LDA*1) \ + MOVUPS X9, (A_PTR)(LDA*2) \ + MOVUPS X10, 4*SIZE(A_PTR)(LDA*2) \ + MOVUPS X11, (A_PTR)(LDA3*1) \ + MOVUPS X12, 4*SIZE(A_PTR)(LDA3*1) \ + MOVUPS ALPHA_SPILL, ALPHA \ + ADDQ $8*SIZE, A_PTR + +#define KERNEL_4x4 \ + MOVUPS X5, X6 \ + MOVUPS X5, X7 \ + MOVUPS X5, X8 \ + MULPS X1, X5 \ + MULPS X2, X6 \ + MULPS X3, X7 \ + MULPS X4, X8 + +#define STORE_4x4 \ + MOVUPS (A_PTR), X13 \ + ADDPS X13, X5 \ + MOVUPS (A_PTR)(LDA*1), X14 \ + ADDPS X14, X6 \ + MOVUPS (A_PTR)(LDA*2), X15 \ + ADDPS X15, X7 \ + MOVUPS (A_PTR)(LDA3*1), X13 \ + ADDPS X13, X8 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, (A_PTR)(LDA*1) \ + MOVUPS X7, (A_PTR)(LDA*2) \ + MOVUPS X8, (A_PTR)(LDA3*1) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_4x2 \ + MOVUPS X5, X6 \ + MOVUPS X5, X7 \ + MOVUPS X5, X8 \ + MULPS X1, X5 \ + MULPS X2, X6 \ + MULPS X3, X7 \ + MULPS X4, X8 + +#define STORE_4x2 \ + MOVSD (A_PTR), X9 \ + ADDPS X9, X5 \ + MOVSD (A_PTR)(LDA*1), X10 \ + ADDPS X10, X6 \ + MOVSD (A_PTR)(LDA*2), X11 \ + ADDPS X11, X7 \ + MOVSD (A_PTR)(LDA3*1), X12 \ + ADDPS X12, X8 \ + MOVSD X5, (A_PTR) \ + MOVSD X6, (A_PTR)(LDA*1) \ + MOVSD X7, (A_PTR)(LDA*2) \ + MOVSD X8, (A_PTR)(LDA3*1) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_4x1 \ + MOVSS (Y_PTR), X5 \ + MOVSS X5, X6 \ + MOVSS X5, X7 \ + MOVSS X5, X8 \ + MULSS X1, X5 \ + MULSS X2, X6 \ + MULSS X3, X7 \ + MULSS X4, X8 + +#define STORE_4x1 \ + ADDSS (A_PTR), X5 \ + ADDSS (A_PTR)(LDA*1), X6 \ + ADDSS (A_PTR)(LDA*2), X7 \ + ADDSS (A_PTR)(LDA3*1), X8 \ + MOVSS X5, (A_PTR) \ + MOVSS X6, (A_PTR)(LDA*1) \ + MOVSS X7, (A_PTR)(LDA*2) \ + MOVSS X8, (A_PTR)(LDA3*1) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_2x8 \ + MOVUPS X5, X7 \ + MOVUPS X6, X8 \ + MULPS X1, X5 \ + MULPS X1, X6 \ + MULPS X2, X7 \ + MULPS X2, X8 + +#define STORE_2x8 \ + MOVUPS (A_PTR), X9 \ + ADDPS X9, X5 \ + MOVUPS 4*SIZE(A_PTR), X10 \ + ADDPS X10, X6 \ + MOVUPS (A_PTR)(LDA*1), X11 \ + ADDPS X11, X7 \ + MOVUPS 4*SIZE(A_PTR)(LDA*1), X12 \ + ADDPS X12, X8 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 4*SIZE(A_PTR) \ + MOVUPS X7, (A_PTR)(LDA*1) \ + MOVUPS X8, 4*SIZE(A_PTR)(LDA*1) \ + ADDQ $8*SIZE, A_PTR + +#define KERNEL_2x4 \ + MOVUPS X5, X6 \ + MULPS X1, X5 \ + MULPS X2, X6 + +#define STORE_2x4 \ + MOVUPS (A_PTR), X9 \ + ADDPS X9, X5 \ + MOVUPS (A_PTR)(LDA*1), X11 \ + ADDPS X11, X6 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, (A_PTR)(LDA*1) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_2x2 \ + MOVSD X5, X6 \ + MULPS X1, X5 \ + MULPS X2, X6 + +#define STORE_2x2 \ + MOVSD (A_PTR), X7 \ + ADDPS X7, X5 \ + MOVSD (A_PTR)(LDA*1), X8 \ + ADDPS X8, X6 \ + MOVSD X5, (A_PTR) \ + MOVSD X6, (A_PTR)(LDA*1) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_2x1 \ + MOVSS (Y_PTR), X5 \ + MOVSS X5, X6 \ + MULSS X1, X5 \ + MULSS X2, X6 + +#define STORE_2x1 \ + ADDSS (A_PTR), X5 \ + ADDSS (A_PTR)(LDA*1), X6 \ + MOVSS X5, (A_PTR) \ + MOVSS X6, (A_PTR)(LDA*1) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_1x8 \ + MULPS X1, X5 \ + MULPS X1, X6 + +#define STORE_1x8 \ + MOVUPS (A_PTR), X7 \ + ADDPS X7, X5 \ + MOVUPS 4*SIZE(A_PTR), X8 \ + ADDPS X8, X6 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 4*SIZE(A_PTR) \ + ADDQ $8*SIZE, A_PTR + +#define KERNEL_1x4 \ + MULPS X1, X5 \ + MULPS X1, X6 + +#define STORE_1x4 \ + MOVUPS (A_PTR), X7 \ + ADDPS X7, X5 \ + MOVUPS X5, (A_PTR) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_1x2 \ + MULPS X1, X5 + +#define STORE_1x2 \ + MOVSD (A_PTR), X6 \ + ADDPS X6, X5 \ + MOVSD X5, (A_PTR) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_1x1 \ + MOVSS (Y_PTR), X5 \ + MULSS X1, X5 + +#define STORE_1x1 \ + ADDSS (A_PTR), X5 \ + MOVSS X5, (A_PTR) \ + ADDQ $SIZE, A_PTR + +// func Ger(m, n uintptr, alpha float32, +// x []float32, incX uintptr, +// y []float32, incY uintptr, +// a []float32, lda uintptr) +TEXT ·Ger(SB), 0, $16-120 + MOVQ M_DIM, M + MOVQ N_DIM, N + CMPQ M, $0 + JE end + CMPQ N, $0 + JE end + + LOAD_ALPHA + + MOVQ x_base+24(FP), X_PTR + MOVQ y_base+56(FP), Y_PTR + MOVQ a_base+88(FP), A_ROW + MOVQ A_ROW, A_PTR + MOVQ lda+112(FP), LDA // LDA = LDA * sizeof(float32) + SHLQ $BITSIZE, LDA + LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3 + + CMPQ incY+80(FP), $1 // Check for dense vector Y (fast-path) + JNE inc + CMPQ incX+48(FP), $1 // Check for dense vector X (fast-path) + JNE inc + + SHRQ $2, M + JZ r2 + +r4: + + // LOAD 4 + LOAD_SCALED4 + + MOVQ N_DIM, N + SHRQ $KERNELSIZE, N + JZ r4c4 + +r4c8: + // 4x8 KERNEL + KERNEL_LOAD8 + KERNEL_4x8 + STORE_4x8 + + ADDQ $8*SIZE, Y_PTR + + DECQ N + JNZ r4c8 + +r4c4: + TESTQ $4, N_DIM + JZ r4c2 + + // 4x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x4 + STORE_4x4 + + ADDQ $4*SIZE, Y_PTR + +r4c2: + TESTQ $2, N_DIM + JZ r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2 + KERNEL_4x2 + STORE_4x2 + + ADDQ $2*SIZE, Y_PTR + +r4c1: + TESTQ $1, N_DIM + JZ r4end + + // 4x1 KERNEL + KERNEL_4x1 + STORE_4x1 + + ADDQ $SIZE, Y_PTR + +r4end: + ADDQ $4*SIZE, X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ r4 + +r2: + TESTQ $2, M_DIM + JZ r1 + + // LOAD 2 + LOAD_SCALED2 + + MOVQ N_DIM, N + SHRQ $KERNELSIZE, N + JZ r2c4 + +r2c8: + // 2x8 KERNEL + KERNEL_LOAD8 + KERNEL_2x8 + STORE_2x8 + + ADDQ $8*SIZE, Y_PTR + + DECQ N + JNZ r2c8 + +r2c4: + TESTQ $4, N_DIM + JZ r2c2 + + // 2x4 KERNEL + KERNEL_LOAD4 + KERNEL_2x4 + STORE_2x4 + + ADDQ $4*SIZE, Y_PTR + +r2c2: + TESTQ $2, N_DIM + JZ r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x2 + STORE_2x2 + + ADDQ $2*SIZE, Y_PTR + +r2c1: + TESTQ $1, N_DIM + JZ r2end + + // 2x1 KERNEL + KERNEL_2x1 + STORE_2x1 + + ADDQ $SIZE, Y_PTR + +r2end: + ADDQ $2*SIZE, X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +r1: + TESTQ $1, M_DIM + JZ end + + // LOAD 1 + LOAD_SCALED1 + + MOVQ N_DIM, N + SHRQ $KERNELSIZE, N + JZ r1c4 + +r1c8: + // 1x8 KERNEL + KERNEL_LOAD8 + KERNEL_1x8 + STORE_1x8 + + ADDQ $8*SIZE, Y_PTR + + DECQ N + JNZ r1c8 + +r1c4: + TESTQ $4, N_DIM + JZ r1c2 + + // 1x4 KERNEL + KERNEL_LOAD4 + KERNEL_1x4 + STORE_1x4 + + ADDQ $4*SIZE, Y_PTR + +r1c2: + TESTQ $2, N_DIM + JZ r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2 + KERNEL_1x2 + STORE_1x2 + + ADDQ $2*SIZE, Y_PTR + +r1c1: + TESTQ $1, N_DIM + JZ end + + // 1x1 KERNEL + KERNEL_1x1 + STORE_1x1 + +end: + RET + +inc: // Algorithm for incY != 0 ( split loads in kernel ) + + MOVQ incX+48(FP), INC_X // INC_X = incX * sizeof(float32) + SHLQ $BITSIZE, INC_X + MOVQ incY+80(FP), INC_Y // INC_Y = incY * sizeof(float32) + SHLQ $BITSIZE, INC_Y + LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3 + + XORQ TMP2, TMP2 + MOVQ M, TMP1 + SUBQ $1, TMP1 + IMULQ INC_X, TMP1 + NEGQ TMP1 + CMPQ INC_X, $0 + CMOVQLT TMP1, TMP2 + LEAQ (X_PTR)(TMP2*SIZE), X_PTR + + XORQ TMP2, TMP2 + MOVQ N, TMP1 + SUBQ $1, TMP1 + IMULQ INC_Y, TMP1 + NEGQ TMP1 + CMPQ INC_Y, $0 + CMOVQLT TMP1, TMP2 + LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR + + SHRQ $2, M + JZ inc_r2 + +inc_r4: + // LOAD 4 + LOAD_SCALED4_INC + + MOVQ N_DIM, N + SHRQ $KERNELSIZE, N + JZ inc_r4c4 + +inc_r4c8: + // 4x4 KERNEL + KERNEL_LOAD8_INC + KERNEL_4x8 + STORE_4x8 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r4c8 + +inc_r4c4: + TESTQ $4, N_DIM + JZ inc_r4c2 + + // 4x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x4 + STORE_4x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + +inc_r4c2: + TESTQ $2, N_DIM + JZ inc_r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_4x2 + STORE_4x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r4c1: + TESTQ $1, N_DIM + JZ inc_r4end + + // 4x1 KERNEL + KERNEL_4x1 + STORE_4x1 + + ADDQ INC_Y, Y_PTR + +inc_r4end: + LEAQ (X_PTR)(INC_X*4), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ inc_r4 + +inc_r2: + TESTQ $2, M_DIM + JZ inc_r1 + + // LOAD 2 + LOAD_SCALED2_INC + + MOVQ N_DIM, N + SHRQ $KERNELSIZE, N + JZ inc_r2c4 + +inc_r2c8: + // 2x8 KERNEL + KERNEL_LOAD8_INC + KERNEL_2x8 + STORE_2x8 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r2c8 + +inc_r2c4: + TESTQ $4, N_DIM + JZ inc_r2c2 + + // 2x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_2x4 + STORE_2x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + +inc_r2c2: + TESTQ $2, N_DIM + JZ inc_r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x2 + STORE_2x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r2c1: + TESTQ $1, N_DIM + JZ inc_r2end + + // 2x1 KERNEL + KERNEL_2x1 + STORE_2x1 + + ADDQ INC_Y, Y_PTR + +inc_r2end: + LEAQ (X_PTR)(INC_X*2), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +inc_r1: + TESTQ $1, M_DIM + JZ end + + // LOAD 1 + LOAD_SCALED1 + + MOVQ N_DIM, N + SHRQ $KERNELSIZE, N + JZ inc_r1c4 + +inc_r1c8: + // 1x8 KERNEL + KERNEL_LOAD8_INC + KERNEL_1x8 + STORE_1x8 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r1c8 + +inc_r1c4: + TESTQ $4, N_DIM + JZ inc_r1c2 + + // 1x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_1x4 + STORE_1x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + +inc_r1c2: + TESTQ $2, N_DIM + JZ inc_r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_1x2 + STORE_1x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r1c1: + TESTQ $1, N_DIM + JZ inc_end + + // 1x1 KERNEL + KERNEL_1x1 + STORE_1x1 + +inc_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_noasm.go new file mode 100644 index 0000000000..d92f9968d0 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/ge_noasm.go @@ -0,0 +1,36 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f32 + +// Ger performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Ger(m, n uintptr, alpha float32, x []float32, incX uintptr, y []float32, incY uintptr, a []float32, lda uintptr) { + + if incX == 1 && incY == 1 { + x = x[:m] + y = y[:n] + for i, xv := range x { + AxpyUnitary(alpha*xv, y, a[uintptr(i)*lda:uintptr(i)*lda+n]) + } + return + } + + var ky, kx uintptr + if int(incY) < 0 { + ky = uintptr(-int(n-1) * int(incY)) + } + if int(incX) < 0 { + kx = uintptr(-int(m-1) * int(incX)) + } + + ix := kx + for i := 0; i < int(m); i++ { + AxpyInc(alpha*x[ix], y, a[uintptr(i)*lda:uintptr(i)*lda+n], uintptr(n), uintptr(incY), 1, uintptr(ky), 0) + ix += incX + } +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/scal.go b/vendor/gonum.org/v1/gonum/internal/asm/f32/scal.go new file mode 100644 index 0000000000..d0867a4609 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/scal.go @@ -0,0 +1,55 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package f32 + +// ScalUnitary is +// for i := range x { +// x[i] *= alpha +// } +func ScalUnitary(alpha float32, x []float32) { + for i := range x { + x[i] *= alpha + } +} + +// ScalUnitaryTo is +// for i, v := range x { +// dst[i] = alpha * v +// } +func ScalUnitaryTo(dst []float32, alpha float32, x []float32) { + for i, v := range x { + dst[i] = alpha * v + } +} + +// ScalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } +func ScalInc(alpha float32, x []float32, n, incX uintptr) { + var ix uintptr + for i := 0; i < int(n); i++ { + x[ix] *= alpha + ix += incX + } +} + +// ScalIncTo is +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } +func ScalIncTo(dst []float32, incDst uintptr, alpha float32, x []float32, n, incX uintptr) { + var idst, ix uintptr + for i := 0; i < int(n); i++ { + dst[idst] = alpha * x[ix] + ix += incX + idst += incDst + } +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_amd64.go b/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_amd64.go new file mode 100644 index 0000000000..fcbce09e7c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_amd64.go @@ -0,0 +1,68 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +package f32 + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha float32, x, y []float32) + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []float32, alpha float32, x, y []float32) + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []float32, incDst, idst uintptr, alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) + +// DdotUnitary is +// for i, v := range x { +// sum += float64(y[i]) * float64(v) +// } +// return +func DdotUnitary(x, y []float32) (sum float64) + +// DdotInc is +// for i := 0; i < int(n); i++ { +// sum += float64(y[iy]) * float64(x[ix]) +// ix += incX +// iy += incY +// } +// return +func DdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64) + +// DotUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotUnitary(x, y []float32) (sum float32) + +// DotInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32) diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_noasm.go new file mode 100644 index 0000000000..3b5b09702c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f32/stubs_noasm.go @@ -0,0 +1,113 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f32 + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha float32, x, y []float32) { + for i, v := range x { + y[i] += alpha * v + } +} + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []float32, alpha float32, x, y []float32) { + for i, v := range x { + dst[i] = alpha*v + y[i] + } +} + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + y[iy] += alpha * x[ix] + ix += incX + iy += incY + } +} + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []float32, incDst, idst uintptr, alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + dst[idst] = alpha*x[ix] + y[iy] + ix += incX + iy += incY + idst += incDst + } +} + +// DotUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotUnitary(x, y []float32) (sum float32) { + for i, v := range x { + sum += y[i] * v + } + return sum +} + +// DotInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32) { + for i := 0; i < int(n); i++ { + sum += y[iy] * x[ix] + ix += incX + iy += incY + } + return sum +} + +// DdotUnitary is +// for i, v := range x { +// sum += float64(y[i]) * float64(v) +// } +// return +func DdotUnitary(x, y []float32) (sum float64) { + for i, v := range x { + sum += float64(y[i]) * float64(v) + } + return +} + +// DdotInc is +// for i := 0; i < int(n); i++ { +// sum += float64(y[iy]) * float64(x[ix]) +// ix += incX +// iy += incY +// } +// return +func DdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64) { + for i := 0; i < int(n); i++ { + sum += float64(y[iy]) * float64(x[ix]) + ix += incX + iy += incY + } + return +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/abssum_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/abssum_amd64.s new file mode 100644 index 0000000000..d9d61bb7b9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/abssum_amd64.s @@ -0,0 +1,82 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func L1Norm(x []float64) float64 +TEXT ·L1Norm(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), SI // SI = &x + MOVQ x_len+8(FP), CX // CX = len(x) + XORQ AX, AX // i = 0 + PXOR X0, X0 // p_sum_i = 0 + PXOR X1, X1 + PXOR X2, X2 + PXOR X3, X3 + PXOR X4, X4 + PXOR X5, X5 + PXOR X6, X6 + PXOR X7, X7 + CMPQ CX, $0 // if CX == 0 { return 0 } + JE absum_end + MOVQ CX, BX + ANDQ $7, BX // BX = len(x) % 8 + SHRQ $3, CX // CX = floor( len(x) / 8 ) + JZ absum_tail_start // if CX == 0 { goto absum_tail_start } + +absum_loop: // do { + // p_sum += max( p_sum + x[i], p_sum - x[i] ) + MOVUPS (SI)(AX*8), X8 // X_i = x[i:i+1] + MOVUPS 16(SI)(AX*8), X9 + MOVUPS 32(SI)(AX*8), X10 + MOVUPS 48(SI)(AX*8), X11 + ADDPD X8, X0 // p_sum_i += X_i ( positive values ) + ADDPD X9, X2 + ADDPD X10, X4 + ADDPD X11, X6 + SUBPD X8, X1 // p_sum_(i+1) -= X_i ( negative values ) + SUBPD X9, X3 + SUBPD X10, X5 + SUBPD X11, X7 + MAXPD X1, X0 // p_sum_i = max( p_sum_i, p_sum_(i+1) ) + MAXPD X3, X2 + MAXPD X5, X4 + MAXPD X7, X6 + MOVAPS X0, X1 // p_sum_(i+1) = p_sum_i + MOVAPS X2, X3 + MOVAPS X4, X5 + MOVAPS X6, X7 + ADDQ $8, AX // i += 8 + LOOP absum_loop // } while --CX > 0 + + // p_sum_0 = \sum_{i=1}^{3}( p_sum_(i*2) ) + ADDPD X3, X0 + ADDPD X5, X7 + ADDPD X7, X0 + + // p_sum_0[0] = p_sum_0[0] + p_sum_0[1] + MOVAPS X0, X1 + SHUFPD $0x3, X0, X0 // lower( p_sum_0 ) = upper( p_sum_0 ) + ADDSD X1, X0 + CMPQ BX, $0 + JE absum_end // if BX == 0 { goto absum_end } + +absum_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + XORPS X8, X8 // X_8 = 0 + +absum_tail: // do { + // p_sum += max( p_sum + x[i], p_sum - x[i] ) + MOVSD (SI)(AX*8), X8 // X_8 = x[i] + MOVSD X0, X1 // p_sum_1 = p_sum_0 + ADDSD X8, X0 // p_sum_0 += X_8 + SUBSD X8, X1 // p_sum_1 -= X_8 + MAXSD X1, X0 // p_sum_0 = max( p_sum_0, p_sum_1 ) + INCQ AX // i++ + LOOP absum_tail // } while --CX > 0 + +absum_end: // return p_sum_0 + MOVSD X0, sum+24(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/abssuminc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/abssuminc_amd64.s new file mode 100644 index 0000000000..cac19aa64c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/abssuminc_amd64.s @@ -0,0 +1,90 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func L1NormInc(x []float64, n, incX int) (sum float64) +TEXT ·L1NormInc(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), SI // SI = &x + MOVQ n+24(FP), CX // CX = n + MOVQ incX+32(FP), AX // AX = increment * sizeof( float64 ) + SHLQ $3, AX + MOVQ AX, DX // DX = AX * 3 + IMULQ $3, DX + PXOR X0, X0 // p_sum_i = 0 + PXOR X1, X1 + PXOR X2, X2 + PXOR X3, X3 + PXOR X4, X4 + PXOR X5, X5 + PXOR X6, X6 + PXOR X7, X7 + CMPQ CX, $0 // if CX == 0 { return 0 } + JE absum_end + MOVQ CX, BX + ANDQ $7, BX // BX = n % 8 + SHRQ $3, CX // CX = floor( n / 8 ) + JZ absum_tail_start // if CX == 0 { goto absum_tail_start } + +absum_loop: // do { + // p_sum = max( p_sum + x[i], p_sum - x[i] ) + MOVSD (SI), X8 // X_i[0] = x[i] + MOVSD (SI)(AX*1), X9 + MOVSD (SI)(AX*2), X10 + MOVSD (SI)(DX*1), X11 + LEAQ (SI)(AX*4), SI // SI = SI + 4 + MOVHPD (SI), X8 // X_i[1] = x[i+4] + MOVHPD (SI)(AX*1), X9 + MOVHPD (SI)(AX*2), X10 + MOVHPD (SI)(DX*1), X11 + ADDPD X8, X0 // p_sum_i += X_i ( positive values ) + ADDPD X9, X2 + ADDPD X10, X4 + ADDPD X11, X6 + SUBPD X8, X1 // p_sum_(i+1) -= X_i ( negative values ) + SUBPD X9, X3 + SUBPD X10, X5 + SUBPD X11, X7 + MAXPD X1, X0 // p_sum_i = max( p_sum_i, p_sum_(i+1) ) + MAXPD X3, X2 + MAXPD X5, X4 + MAXPD X7, X6 + MOVAPS X0, X1 // p_sum_(i+1) = p_sum_i + MOVAPS X2, X3 + MOVAPS X4, X5 + MOVAPS X6, X7 + LEAQ (SI)(AX*4), SI // SI = SI + 4 + LOOP absum_loop // } while --CX > 0 + + // p_sum_0 = \sum_{i=1}^{3}( p_sum_(i*2) ) + ADDPD X3, X0 + ADDPD X5, X7 + ADDPD X7, X0 + + // p_sum_0[0] = p_sum_0[0] + p_sum_0[1] + MOVAPS X0, X1 + SHUFPD $0x3, X0, X0 // lower( p_sum_0 ) = upper( p_sum_0 ) + ADDSD X1, X0 + CMPQ BX, $0 + JE absum_end // if BX == 0 { goto absum_end } + +absum_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + XORPS X8, X8 // X_8 = 0 + +absum_tail: // do { + // p_sum += max( p_sum + x[i], p_sum - x[i] ) + MOVSD (SI), X8 // X_8 = x[i] + MOVSD X0, X1 // p_sum_1 = p_sum_0 + ADDSD X8, X0 // p_sum_0 += X_8 + SUBSD X8, X1 // p_sum_1 -= X_8 + MAXSD X1, X0 // p_sum_0 = max( p_sum_0, p_sum_1 ) + ADDQ AX, SI // i++ + LOOP absum_tail // } while --CX > 0 + +absum_end: // return p_sum_0 + MOVSD X0, sum+40(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/add_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/add_amd64.s new file mode 100644 index 0000000000..bc0ea6a407 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/add_amd64.s @@ -0,0 +1,66 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func Add(dst, s []float64) +TEXT ·Add(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ dst_len+8(FP), CX // CX = len(dst) + MOVQ s_base+24(FP), SI // SI = &s + CMPQ s_len+32(FP), CX // CX = max( CX, len(s) ) + CMOVQLE s_len+32(FP), CX + CMPQ CX, $0 // if CX == 0 { return } + JE add_end + XORQ AX, AX + MOVQ DI, BX + ANDQ $0x0F, BX // BX = &dst & 15 + JZ add_no_trim // if BX == 0 { goto add_no_trim } + + // Align on 16-bit boundary + MOVSD (SI)(AX*8), X0 // X0 = s[i] + ADDSD (DI)(AX*8), X0 // X0 += dst[i] + MOVSD X0, (DI)(AX*8) // dst[i] = X0 + INCQ AX // i++ + DECQ CX // --CX + JE add_end // if CX == 0 { return } + +add_no_trim: + MOVQ CX, BX + ANDQ $7, BX // BX = len(dst) % 8 + SHRQ $3, CX // CX = floor( len(dst) / 8 ) + JZ add_tail_start // if CX == 0 { goto add_tail_start } + +add_loop: // Loop unrolled 8x do { + MOVUPS (SI)(AX*8), X0 // X_i = s[i:i+1] + MOVUPS 16(SI)(AX*8), X1 + MOVUPS 32(SI)(AX*8), X2 + MOVUPS 48(SI)(AX*8), X3 + ADDPD (DI)(AX*8), X0 // X_i += dst[i:i+1] + ADDPD 16(DI)(AX*8), X1 + ADDPD 32(DI)(AX*8), X2 + ADDPD 48(DI)(AX*8), X3 + MOVUPS X0, (DI)(AX*8) // dst[i:i+1] = X_i + MOVUPS X1, 16(DI)(AX*8) + MOVUPS X2, 32(DI)(AX*8) + MOVUPS X3, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + LOOP add_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE add_end + +add_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +add_tail: // do { + MOVSD (SI)(AX*8), X0 // X0 = s[i] + ADDSD (DI)(AX*8), X0 // X0 += dst[i] + MOVSD X0, (DI)(AX*8) // dst[i] = X0 + INCQ AX // ++i + LOOP add_tail // } while --CX > 0 + +add_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/addconst_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/addconst_amd64.s new file mode 100644 index 0000000000..7cc68c78c9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/addconst_amd64.s @@ -0,0 +1,53 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func Addconst(alpha float64, x []float64) +TEXT ·AddConst(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), SI // SI = &x + MOVQ x_len+16(FP), CX // CX = len(x) + CMPQ CX, $0 // if len(x) == 0 { return } + JE ac_end + MOVSD alpha+0(FP), X4 // X4 = { a, a } + SHUFPD $0, X4, X4 + MOVUPS X4, X5 // X5 = X4 + XORQ AX, AX // i = 0 + MOVQ CX, BX + ANDQ $7, BX // BX = len(x) % 8 + SHRQ $3, CX // CX = floor( len(x) / 8 ) + JZ ac_tail_start // if CX == 0 { goto ac_tail_start } + +ac_loop: // Loop unrolled 8x do { + MOVUPS (SI)(AX*8), X0 // X_i = s[i:i+1] + MOVUPS 16(SI)(AX*8), X1 + MOVUPS 32(SI)(AX*8), X2 + MOVUPS 48(SI)(AX*8), X3 + ADDPD X4, X0 // X_i += a + ADDPD X5, X1 + ADDPD X4, X2 + ADDPD X5, X3 + MOVUPS X0, (SI)(AX*8) // s[i:i+1] = X_i + MOVUPS X1, 16(SI)(AX*8) + MOVUPS X2, 32(SI)(AX*8) + MOVUPS X3, 48(SI)(AX*8) + ADDQ $8, AX // i += 8 + LOOP ac_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE ac_end + +ac_tail_start: // Reset loop counters + MOVQ BX, CX // Loop counter: CX = BX + +ac_tail: // do { + MOVSD (SI)(AX*8), X0 // X0 = s[i] + ADDSD X4, X0 // X0 += a + MOVSD X0, (SI)(AX*8) // s[i] = X0 + INCQ AX // ++i + LOOP ac_tail // } while --CX > 0 + +ac_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpy.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpy.go new file mode 100644 index 0000000000..b832213981 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpy.go @@ -0,0 +1,57 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f64 + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha float64, x, y []float64) { + for i, v := range x { + y[i] += alpha * v + } +} + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64) { + for i, v := range x { + dst[i] = alpha*v + y[i] + } +} + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + y[iy] += alpha * x[ix] + ix += incX + iy += incY + } +} + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) { + for i := 0; i < int(n); i++ { + dst[idst] = alpha*x[ix] + y[iy] + ix += incX + iy += incY + idst += incDst + } +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyinc_amd64.s new file mode 100644 index 0000000000..95fe9f9044 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyinc_amd64.s @@ -0,0 +1,142 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DI +#define DST_PTR DI +#define IDX AX +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R11 +#define INC_Y R9 +#define INCx3_Y R12 +#define INC_DST R9 +#define INCx3_DST R12 +#define ALPHA X0 +#define ALPHA_2 X1 + +// func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyInc(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), X_PTR // X_PTR = &x + MOVQ y_base+32(FP), Y_PTR // Y_PTR = &y + MOVQ n+56(FP), LEN // LEN = n + CMPQ LEN, $0 // if LEN == 0 { return } + JE end + + MOVQ ix+80(FP), INC_X + MOVQ iy+88(FP), INC_Y + LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(x[ix]) + LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(y[iy]) + MOVQ Y_PTR, DST_PTR // DST_PTR = Y_PTR // Write pointer + + MOVQ incX+64(FP), INC_X // INC_X = incX * sizeof(float64) + SHLQ $3, INC_X + MOVQ incY+72(FP), INC_Y // INC_Y = incY * sizeof(float64) + SHLQ $3, INC_Y + + MOVSD alpha+0(FP), ALPHA // ALPHA = alpha + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = n % 4 + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVAPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 + +loop: // do { // y[i] += alpha * x[i] unrolled 4x. + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MOVSD (X_PTR)(INC_X*2), X4 + MOVSD (X_PTR)(INCx3_X*1), X5 + + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA_2, X3 + MULSD ALPHA, X4 + MULSD ALPHA_2, X5 + + ADDSD (Y_PTR), X2 // X_i += y[i] + ADDSD (Y_PTR)(INC_Y*1), X3 + ADDSD (Y_PTR)(INC_Y*2), X4 + ADDSD (Y_PTR)(INCx3_Y*1), X5 + + MOVSD X2, (DST_PTR) // y[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + MOVSD X4, (DST_PTR)(INC_DST*2) + MOVSD X5, (DST_PTR)(INCx3_DST*1) + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4]) + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE end + +tail_start: // Reset Loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( LEN / 2 ) + JZ tail_one + +tail_two: + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA, X3 + ADDSD (Y_PTR), X2 // X_i += y[i] + ADDSD (Y_PTR)(INC_Y*1), X3 + MOVSD X2, (DST_PTR) // y[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + + LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2]) + LEAQ (Y_PTR)(INC_Y*2), Y_PTR // Y_PTR = &(Y_PTR[incY*2]) + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { goto end } + +tail_one: + // y[i] += alpha * x[i] for the last n % 4 iterations. + MOVSD (X_PTR), X2 // X2 = x[i] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR), X2 // X2 += y[i] + MOVSD X2, (DST_PTR) // y[i] = X2 + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyincto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyincto_amd64.s new file mode 100644 index 0000000000..dcb79d878e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyincto_amd64.s @@ -0,0 +1,148 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DI +#define DST_PTR DX +#define IDX AX +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R11 +#define INC_Y R9 +#define INCx3_Y R12 +#define INC_DST R10 +#define INCx3_DST R13 +#define ALPHA X0 +#define ALPHA_2 X1 + +// func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) +TEXT ·AxpyIncTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DST_PTR // DST_PTR := &dst + MOVQ x_base+48(FP), X_PTR // X_PTR := &x + MOVQ y_base+72(FP), Y_PTR // Y_PTR := &y + MOVQ n+96(FP), LEN // LEN := n + CMPQ LEN, $0 // if LEN == 0 { return } + JE end + + MOVQ ix+120(FP), INC_X + LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(x[ix]) + MOVQ iy+128(FP), INC_Y + LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(dst[idst]) + MOVQ idst+32(FP), INC_DST + LEAQ (DST_PTR)(INC_DST*8), DST_PTR // DST_PTR = &(y[iy]) + + MOVQ incX+104(FP), INC_X // INC_X = incX * sizeof(float64) + SHLQ $3, INC_X + MOVQ incY+112(FP), INC_Y // INC_Y = incY * sizeof(float64) + SHLQ $3, INC_Y + MOVQ incDst+24(FP), INC_DST // INC_DST = incDst * sizeof(float64) + SHLQ $3, INC_DST + MOVSD alpha+40(FP), ALPHA + + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = n % 4 + SHRQ $2, LEN // LEN = floor( n / 4 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVSD ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3 + LEAQ (INC_DST)(INC_DST*2), INCx3_DST // INCx3_DST = INC_DST * 3 + +loop: // do { // y[i] += alpha * x[i] unrolled 2x. + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MOVSD (X_PTR)(INC_X*2), X4 + MOVSD (X_PTR)(INCx3_X*1), X5 + + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA_2, X3 + MULSD ALPHA, X4 + MULSD ALPHA_2, X5 + + ADDSD (Y_PTR), X2 // X_i += y[i] + ADDSD (Y_PTR)(INC_Y*1), X3 + ADDSD (Y_PTR)(INC_Y*2), X4 + ADDSD (Y_PTR)(INCx3_Y*1), X5 + + MOVSD X2, (DST_PTR) // y[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + MOVSD X4, (DST_PTR)(INC_DST*2) + MOVSD X5, (DST_PTR)(INCx3_DST*1) + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4]) + LEAQ (DST_PTR)(INC_DST*4), DST_PTR // DST_PTR = &(DST_PTR[incDst*4] + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE end + +tail_start: // Reset Loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( LEN / 2 ) + JZ tail_one + +tail_two: + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA, X3 + ADDSD (Y_PTR), X2 // X_i += y[i] + ADDSD (Y_PTR)(INC_Y*1), X3 + MOVSD X2, (DST_PTR) // y[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + + LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2]) + LEAQ (Y_PTR)(INC_Y*2), Y_PTR // Y_PTR = &(Y_PTR[incY*2]) + LEAQ (DST_PTR)(INC_DST*2), DST_PTR // DST_PTR = &(DST_PTR[incY*2] + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { goto end } + +tail_one: + MOVSD (X_PTR), X2 // X2 = x[i] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR), X2 // X2 += y[i] + MOVSD X2, (DST_PTR) // y[i] = X2 + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitary_amd64.s new file mode 100644 index 0000000000..bc290a1528 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitary_amd64.s @@ -0,0 +1,134 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DI +#define DST_PTR DI +#define IDX AX +#define LEN CX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_2 X1 + +// func AxpyUnitary(alpha float64, x, y []float64) +TEXT ·AxpyUnitary(SB), NOSPLIT, $0 + MOVQ x_base+8(FP), X_PTR // X_PTR := &x + MOVQ y_base+32(FP), Y_PTR // Y_PTR := &y + MOVQ x_len+16(FP), LEN // LEN = min( len(x), len(y) ) + CMPQ y_len+40(FP), LEN + CMOVQLE y_len+40(FP), LEN + CMPQ LEN, $0 // if LEN == 0 { return } + JE end + XORQ IDX, IDX + MOVSD alpha+0(FP), ALPHA // ALPHA := { alpha, alpha } + SHUFPD $0, ALPHA, ALPHA + MOVUPS ALPHA, ALPHA_2 // ALPHA_2 := ALPHA for pipelining + MOVQ Y_PTR, TAIL // Check memory alignment + ANDQ $15, TAIL // TAIL = &y % 16 + JZ no_trim // if TAIL == 0 { goto no_trim } + + // Align on 16-byte boundary + MOVSD (X_PTR), X2 // X2 := x[0] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR), X2 // X2 += y[0] + MOVSD X2, (DST_PTR) // y[0] = X2 + INCQ IDX // i++ + DECQ LEN // LEN-- + JZ end // if LEN == 0 { return } + +no_trim: + MOVQ LEN, TAIL + ANDQ $7, TAIL // TAIL := n % 8 + SHRQ $3, LEN // LEN = floor( n / 8 ) + JZ tail_start // if LEN == 0 { goto tail2_start } + +loop: // do { + // y[i] += alpha * x[i] unrolled 8x. + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MOVUPS 16(X_PTR)(IDX*8), X3 + MOVUPS 32(X_PTR)(IDX*8), X4 + MOVUPS 48(X_PTR)(IDX*8), X5 + + MULPD ALPHA, X2 // X_i *= a + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + ADDPD (Y_PTR)(IDX*8), X2 // X_i += y[i] + ADDPD 16(Y_PTR)(IDX*8), X3 + ADDPD 32(Y_PTR)(IDX*8), X4 + ADDPD 48(Y_PTR)(IDX*8), X5 + + MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X_i + MOVUPS X3, 16(DST_PTR)(IDX*8) + MOVUPS X4, 32(DST_PTR)(IDX*8) + MOVUPS X5, 48(DST_PTR)(IDX*8) + + ADDQ $8, IDX // i += 8 + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE end + +tail_start: // Reset loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( TAIL / 2 ) + JZ tail_one // if TAIL == 0 { goto tail } + +tail_two: // do { + MOVUPS (X_PTR)(IDX*8), X2 // X2 = x[i] + MULPD ALPHA, X2 // X2 *= a + ADDPD (Y_PTR)(IDX*8), X2 // X2 += y[i] + MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X2 + ADDQ $2, IDX // i += 2 + DECQ LEN + JNZ tail_two // } while --LEN > 0 + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { goto end } + +tail_one: + MOVSD (X_PTR)(IDX*8), X2 // X2 = x[i] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR)(IDX*8), X2 // X2 += y[i] + MOVSD X2, (DST_PTR)(IDX*8) // y[i] = X2 + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitaryto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitaryto_amd64.s new file mode 100644 index 0000000000..16798ebaab --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/axpyunitaryto_amd64.s @@ -0,0 +1,140 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define Y_PTR DX +#define DST_PTR DI +#define IDX AX +#define LEN CX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_2 X1 + +// func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64) +TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DST_PTR // DST_PTR := &dst + MOVQ x_base+32(FP), X_PTR // X_PTR := &x + MOVQ y_base+56(FP), Y_PTR // Y_PTR := &y + MOVQ x_len+40(FP), LEN // LEN = min( len(x), len(y), len(dst) ) + CMPQ y_len+64(FP), LEN + CMOVQLE y_len+64(FP), LEN + CMPQ dst_len+8(FP), LEN + CMOVQLE dst_len+8(FP), LEN + + CMPQ LEN, $0 + JE end // if LEN == 0 { return } + + XORQ IDX, IDX // IDX = 0 + MOVSD alpha+24(FP), ALPHA + SHUFPD $0, ALPHA, ALPHA // ALPHA := { alpha, alpha } + MOVQ Y_PTR, TAIL // Check memory alignment + ANDQ $15, TAIL // TAIL = &y % 16 + JZ no_trim // if TAIL == 0 { goto no_trim } + + // Align on 16-byte boundary + MOVSD (X_PTR), X2 // X2 := x[0] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR), X2 // X2 += y[0] + MOVSD X2, (DST_PTR) // y[0] = X2 + INCQ IDX // i++ + DECQ LEN // LEN-- + JZ end // if LEN == 0 { return } + +no_trim: + MOVQ LEN, TAIL + ANDQ $7, TAIL // TAIL := n % 8 + SHRQ $3, LEN // LEN = floor( n / 8 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVUPS ALPHA, ALPHA_2 // ALPHA_2 := ALPHA for pipelining + +loop: // do { + // y[i] += alpha * x[i] unrolled 8x. + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MOVUPS 16(X_PTR)(IDX*8), X3 + MOVUPS 32(X_PTR)(IDX*8), X4 + MOVUPS 48(X_PTR)(IDX*8), X5 + + MULPD ALPHA, X2 // X_i *= alpha + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + ADDPD (Y_PTR)(IDX*8), X2 // X_i += y[i] + ADDPD 16(Y_PTR)(IDX*8), X3 + ADDPD 32(Y_PTR)(IDX*8), X4 + ADDPD 48(Y_PTR)(IDX*8), X5 + + MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X_i + MOVUPS X3, 16(DST_PTR)(IDX*8) + MOVUPS X4, 32(DST_PTR)(IDX*8) + MOVUPS X5, 48(DST_PTR)(IDX*8) + + ADDQ $8, IDX // i += 8 + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 // if TAIL == 0 { return } + JE end + +tail_start: // Reset loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( TAIL / 2 ) + JZ tail_one // if LEN == 0 { goto tail } + +tail_two: // do { + MOVUPS (X_PTR)(IDX*8), X2 // X2 = x[i] + MULPD ALPHA, X2 // X2 *= alpha + ADDPD (Y_PTR)(IDX*8), X2 // X2 += y[i] + MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X2 + ADDQ $2, IDX // i += 2 + DECQ LEN + JNZ tail_two // } while --LEN > 0 + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { goto end } + +tail_one: + MOVSD (X_PTR)(IDX*8), X2 // X2 = x[i] + MULSD ALPHA, X2 // X2 *= a + ADDSD (Y_PTR)(IDX*8), X2 // X2 += y[i] + MOVSD X2, (DST_PTR)(IDX*8) // y[i] = X2 + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/cumprod_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/cumprod_amd64.s new file mode 100644 index 0000000000..32bd1572b7 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/cumprod_amd64.s @@ -0,0 +1,71 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +TEXT ·CumProd(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ dst_len+8(FP), CX // CX = len(dst) + MOVQ s_base+24(FP), SI // SI = &s + CMPQ s_len+32(FP), CX // CX = max( CX, len(s) ) + CMOVQLE s_len+32(FP), CX + MOVQ CX, ret_len+56(FP) // len(ret) = CX + CMPQ CX, $0 // if CX == 0 { return } + JE cp_end + XORQ AX, AX // i = 0 + + MOVSD (SI), X5 // p_prod = { s[0], s[0] } + SHUFPD $0, X5, X5 + MOVSD X5, (DI) // dst[0] = s[0] + INCQ AX // ++i + DECQ CX // -- CX + JZ cp_end // if CX == 0 { return } + + MOVQ CX, BX + ANDQ $3, BX // BX = CX % 4 + SHRQ $2, CX // CX = floor( CX / 4 ) + JZ cp_tail_start // if CX == 0 { goto cp_tail_start } + +cp_loop: // Loop unrolled 4x do { + MOVUPS (SI)(AX*8), X0 // X0 = s[i:i+1] + MOVUPS 16(SI)(AX*8), X2 + MOVAPS X0, X1 // X1 = X0 + MOVAPS X2, X3 + SHUFPD $1, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[0] } + SHUFPD $1, X3, X3 + MULPD X0, X1 // X1 *= X0 + MULPD X2, X3 + SHUFPD $2, X1, X0 // { X0[0], X0[1] } = { X0[0], X1[1] } + SHUFPD $3, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[1] } + SHUFPD $2, X3, X2 + SHUFPD $3, X3, X3 + MULPD X5, X0 // X0 *= p_prod + MULPD X1, X5 // p_prod *= X1 + MULPD X5, X2 + MOVUPS X0, (DI)(AX*8) // dst[i] = X0 + MOVUPS X2, 16(DI)(AX*8) + MULPD X3, X5 + ADDQ $4, AX // i += 4 + LOOP cp_loop // } while --CX > 0 + + // if BX == 0 { return } + CMPQ BX, $0 + JE cp_end + +cp_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +cp_tail: // do { + MULSD (SI)(AX*8), X5 // p_prod *= s[i] + MOVSD X5, (DI)(AX*8) // dst[i] = p_prod + INCQ AX // ++i + LOOP cp_tail // } while --CX > 0 + +cp_end: + MOVQ DI, ret_base+48(FP) // &ret = &dst + MOVQ dst_cap+16(FP), SI // cap(ret) = cap(dst) + MOVQ SI, ret_cap+64(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/cumsum_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/cumsum_amd64.s new file mode 100644 index 0000000000..10d7fdab91 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/cumsum_amd64.s @@ -0,0 +1,64 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +TEXT ·CumSum(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ dst_len+8(FP), CX // CX = len(dst) + MOVQ s_base+24(FP), SI // SI = &s + CMPQ s_len+32(FP), CX // CX = max( CX, len(s) ) + CMOVQLE s_len+32(FP), CX + MOVQ CX, ret_len+56(FP) // len(ret) = CX + CMPQ CX, $0 // if CX == 0 { return } + JE cs_end + XORQ AX, AX // i = 0 + PXOR X5, X5 // p_sum = 0 + MOVQ CX, BX + ANDQ $3, BX // BX = CX % 4 + SHRQ $2, CX // CX = floor( CX / 4 ) + JZ cs_tail_start // if CX == 0 { goto cs_tail_start } + +cs_loop: // Loop unrolled 4x do { + MOVUPS (SI)(AX*8), X0 // X0 = s[i:i+1] + MOVUPS 16(SI)(AX*8), X2 + MOVAPS X0, X1 // X1 = X0 + MOVAPS X2, X3 + SHUFPD $1, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[0] } + SHUFPD $1, X3, X3 + ADDPD X0, X1 // X1 += X0 + ADDPD X2, X3 + SHUFPD $2, X1, X0 // { X0[0], X0[1] } = { X0[0], X1[1] } + SHUFPD $3, X1, X1 // { X1[0], X1[1] } = { X1[1], X1[1] } + SHUFPD $2, X3, X2 + SHUFPD $3, X3, X3 + ADDPD X5, X0 // X0 += p_sum + ADDPD X1, X5 // p_sum += X1 + ADDPD X5, X2 + MOVUPS X0, (DI)(AX*8) // dst[i] = X0 + MOVUPS X2, 16(DI)(AX*8) + ADDPD X3, X5 + ADDQ $4, AX // i += 4 + LOOP cs_loop // } while --CX > 0 + + // if BX == 0 { return } + CMPQ BX, $0 + JE cs_end + +cs_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +cs_tail: // do { + ADDSD (SI)(AX*8), X5 // p_sum *= s[i] + MOVSD X5, (DI)(AX*8) // dst[i] = p_sum + INCQ AX // ++i + LOOP cs_tail // } while --CX > 0 + +cs_end: + MOVQ DI, ret_base+48(FP) // &ret = &dst + MOVQ dst_cap+16(FP), SI // cap(ret) = cap(dst) + MOVQ SI, ret_cap+64(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/div_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/div_amd64.s new file mode 100644 index 0000000000..1a4e9eec9a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/div_amd64.s @@ -0,0 +1,67 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func Div(dst, s []float64) +TEXT ·Div(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ dst_len+8(FP), CX // CX = len(dst) + MOVQ s_base+24(FP), SI // SI = &s + CMPQ s_len+32(FP), CX // CX = max( CX, len(s) ) + CMOVQLE s_len+32(FP), CX + CMPQ CX, $0 // if CX == 0 { return } + JE div_end + XORQ AX, AX // i = 0 + MOVQ SI, BX + ANDQ $15, BX // BX = &s & 15 + JZ div_no_trim // if BX == 0 { goto div_no_trim } + + // Align on 16-bit boundary + MOVSD (DI)(AX*8), X0 // X0 = dst[i] + DIVSD (SI)(AX*8), X0 // X0 /= s[i] + MOVSD X0, (DI)(AX*8) // dst[i] = X0 + INCQ AX // ++i + DECQ CX // --CX + JZ div_end // if CX == 0 { return } + +div_no_trim: + MOVQ CX, BX + ANDQ $7, BX // BX = len(dst) % 8 + SHRQ $3, CX // CX = floor( len(dst) / 8 ) + JZ div_tail_start // if CX == 0 { goto div_tail_start } + +div_loop: // Loop unrolled 8x do { + MOVUPS (DI)(AX*8), X0 // X0 = dst[i:i+1] + MOVUPS 16(DI)(AX*8), X1 + MOVUPS 32(DI)(AX*8), X2 + MOVUPS 48(DI)(AX*8), X3 + DIVPD (SI)(AX*8), X0 // X0 /= s[i:i+1] + DIVPD 16(SI)(AX*8), X1 + DIVPD 32(SI)(AX*8), X2 + DIVPD 48(SI)(AX*8), X3 + MOVUPS X0, (DI)(AX*8) // dst[i] = X0 + MOVUPS X1, 16(DI)(AX*8) + MOVUPS X2, 32(DI)(AX*8) + MOVUPS X3, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + LOOP div_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE div_end + +div_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +div_tail: // do { + MOVSD (DI)(AX*8), X0 // X0 = dst[i] + DIVSD (SI)(AX*8), X0 // X0 /= s[i] + MOVSD X0, (DI)(AX*8) // dst[i] = X0 + INCQ AX // ++i + LOOP div_tail // } while --CX > 0 + +div_end: + RET + diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/divto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/divto_amd64.s new file mode 100644 index 0000000000..16ab9b7ec6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/divto_amd64.s @@ -0,0 +1,73 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func DivTo(dst, x, y []float64) +TEXT ·DivTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DI // DI = &dst + MOVQ dst_len+8(FP), CX // CX = len(dst) + MOVQ x_base+24(FP), SI // SI = &x + MOVQ y_base+48(FP), DX // DX = &y + CMPQ x_len+32(FP), CX // CX = max( len(dst), len(x), len(y) ) + CMOVQLE x_len+32(FP), CX + CMPQ y_len+56(FP), CX + CMOVQLE y_len+56(FP), CX + MOVQ CX, ret_len+80(FP) // len(ret) = CX + CMPQ CX, $0 // if CX == 0 { return } + JE div_end + XORQ AX, AX // i = 0 + MOVQ DX, BX + ANDQ $15, BX // BX = &y & OxF + JZ div_no_trim // if BX == 0 { goto div_no_trim } + + // Align on 16-bit boundary + MOVSD (SI)(AX*8), X0 // X0 = s[i] + DIVSD (DX)(AX*8), X0 // X0 /= t[i] + MOVSD X0, (DI)(AX*8) // dst[i] = X0 + INCQ AX // ++i + DECQ CX // --CX + JZ div_end // if CX == 0 { return } + +div_no_trim: + MOVQ CX, BX + ANDQ $7, BX // BX = len(dst) % 8 + SHRQ $3, CX // CX = floor( len(dst) / 8 ) + JZ div_tail_start // if CX == 0 { goto div_tail_start } + +div_loop: // Loop unrolled 8x do { + MOVUPS (SI)(AX*8), X0 // X0 = x[i:i+1] + MOVUPS 16(SI)(AX*8), X1 + MOVUPS 32(SI)(AX*8), X2 + MOVUPS 48(SI)(AX*8), X3 + DIVPD (DX)(AX*8), X0 // X0 /= y[i:i+1] + DIVPD 16(DX)(AX*8), X1 + DIVPD 32(DX)(AX*8), X2 + DIVPD 48(DX)(AX*8), X3 + MOVUPS X0, (DI)(AX*8) // dst[i:i+1] = X0 + MOVUPS X1, 16(DI)(AX*8) + MOVUPS X2, 32(DI)(AX*8) + MOVUPS X3, 48(DI)(AX*8) + ADDQ $8, AX // i += 8 + LOOP div_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE div_end + +div_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + +div_tail: // do { + MOVSD (SI)(AX*8), X0 // X0 = x[i] + DIVSD (DX)(AX*8), X0 // X0 /= y[i] + MOVSD X0, (DI)(AX*8) + INCQ AX // ++i + LOOP div_tail // } while --CX > 0 + +div_end: + MOVQ DI, ret_base+72(FP) // &ret = &dst + MOVQ dst_cap+16(FP), DI // cap(ret) = cap(dst) + MOVQ DI, ret_cap+88(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/doc.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/doc.go new file mode 100644 index 0000000000..33c76c1e03 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package f64 provides float64 vector primitives. +package f64 // import "gonum.org/v1/gonum/internal/asm/f64" diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/dot.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/dot.go new file mode 100644 index 0000000000..b77138d1a8 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/dot.go @@ -0,0 +1,35 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f64 + +// DotUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotUnitary(x, y []float64) (sum float64) { + for i, v := range x { + sum += y[i] * v + } + return sum +} + +// DotInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) { + for i := 0; i < int(n); i++ { + sum += y[iy] * x[ix] + ix += incX + iy += incY + } + return sum +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/dot_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/dot_amd64.s new file mode 100644 index 0000000000..eff25059f1 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/dot_amd64.s @@ -0,0 +1,145 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +// func DdotUnitary(x, y []float64) (sum float64) +// This function assumes len(y) >= len(x). +TEXT ·DotUnitary(SB), NOSPLIT, $0 + MOVQ x+0(FP), R8 + MOVQ x_len+8(FP), DI // n = len(x) + MOVQ y+24(FP), R9 + + MOVSD $(0.0), X7 // sum = 0 + MOVSD $(0.0), X8 // sum = 0 + + MOVQ $0, SI // i = 0 + SUBQ $4, DI // n -= 4 + JL tail_uni // if n < 0 goto tail_uni + +loop_uni: + // sum += x[i] * y[i] unrolled 4x. + MOVUPD 0(R8)(SI*8), X0 + MOVUPD 0(R9)(SI*8), X1 + MOVUPD 16(R8)(SI*8), X2 + MOVUPD 16(R9)(SI*8), X3 + MULPD X1, X0 + MULPD X3, X2 + ADDPD X0, X7 + ADDPD X2, X8 + + ADDQ $4, SI // i += 4 + SUBQ $4, DI // n -= 4 + JGE loop_uni // if n >= 0 goto loop_uni + +tail_uni: + ADDQ $4, DI // n += 4 + JLE end_uni // if n <= 0 goto end_uni + +onemore_uni: + // sum += x[i] * y[i] for the remaining 1-3 elements. + MOVSD 0(R8)(SI*8), X0 + MOVSD 0(R9)(SI*8), X1 + MULSD X1, X0 + ADDSD X0, X7 + + ADDQ $1, SI // i++ + SUBQ $1, DI // n-- + JNZ onemore_uni // if n != 0 goto onemore_uni + +end_uni: + // Add the four sums together. + ADDPD X8, X7 + MOVSD X7, X0 + UNPCKHPD X7, X7 + ADDSD X0, X7 + MOVSD X7, sum+48(FP) // Return final sum. + RET + +// func DdotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) +TEXT ·DotInc(SB), NOSPLIT, $0 + MOVQ x+0(FP), R8 + MOVQ y+24(FP), R9 + MOVQ n+48(FP), CX + MOVQ incX+56(FP), R11 + MOVQ incY+64(FP), R12 + MOVQ ix+72(FP), R13 + MOVQ iy+80(FP), R14 + + MOVSD $(0.0), X7 // sum = 0 + LEAQ (R8)(R13*8), SI // p = &x[ix] + LEAQ (R9)(R14*8), DI // q = &y[ix] + SHLQ $3, R11 // incX *= sizeof(float64) + SHLQ $3, R12 // indY *= sizeof(float64) + + SUBQ $2, CX // n -= 2 + JL tail_inc // if n < 0 goto tail_inc + +loop_inc: + // sum += *p * *q unrolled 2x. + MOVHPD (SI), X0 + MOVHPD (DI), X1 + ADDQ R11, SI // p += incX + ADDQ R12, DI // q += incY + MOVLPD (SI), X0 + MOVLPD (DI), X1 + ADDQ R11, SI // p += incX + ADDQ R12, DI // q += incY + + MULPD X1, X0 + ADDPD X0, X7 + + SUBQ $2, CX // n -= 2 + JGE loop_inc // if n >= 0 goto loop_inc + +tail_inc: + ADDQ $2, CX // n += 2 + JLE end_inc // if n <= 0 goto end_inc + + // sum += *p * *q for the last iteration if n is odd. + MOVSD (SI), X0 + MULSD (DI), X0 + ADDSD X0, X7 + +end_inc: + // Add the two sums together. + MOVSD X7, X0 + UNPCKHPD X7, X7 + ADDSD X0, X7 + MOVSD X7, sum+88(FP) // Return final sum. + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_amd64.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_amd64.go new file mode 100644 index 0000000000..00c99e9323 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_amd64.go @@ -0,0 +1,22 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +package f64 + +// Ger performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Ger(m, n uintptr, alpha float64, x []float64, incX uintptr, y []float64, incY uintptr, a []float64, lda uintptr) + +// GemvN computes +// y = alpha * A * x + beta * y +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func GemvN(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) + +// GemvT computes +// y = alpha * A^T * x + beta * y +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func GemvT(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_noasm.go new file mode 100644 index 0000000000..2a1cfd5cdd --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/ge_noasm.go @@ -0,0 +1,118 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f64 + +// Ger performs the rank-one operation +// A += alpha * x * y^T +// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. +func Ger(m, n uintptr, alpha float64, x []float64, incX uintptr, y []float64, incY uintptr, a []float64, lda uintptr) { + if incX == 1 && incY == 1 { + x = x[:m] + y = y[:n] + for i, xv := range x { + AxpyUnitary(alpha*xv, y, a[uintptr(i)*lda:uintptr(i)*lda+n]) + } + return + } + + var ky, kx uintptr + if int(incY) < 0 { + ky = uintptr(-int(n-1) * int(incY)) + } + if int(incX) < 0 { + kx = uintptr(-int(m-1) * int(incX)) + } + + ix := kx + for i := 0; i < int(m); i++ { + AxpyInc(alpha*x[ix], y, a[uintptr(i)*lda:uintptr(i)*lda+n], n, incY, 1, ky, 0) + ix += incX + } +} + +// GemvN computes +// y = alpha * A * x + beta * y +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func GemvN(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) { + var kx, ky, i uintptr + if int(incX) < 0 { + kx = uintptr(-int(n-1) * int(incX)) + } + if int(incY) < 0 { + ky = uintptr(-int(m-1) * int(incY)) + } + + if incX == 1 && incY == 1 { + if beta == 0 { + for i = 0; i < m; i++ { + y[i] = alpha * DotUnitary(a[lda*i:lda*i+n], x) + } + return + } + for i = 0; i < m; i++ { + y[i] = y[i]*beta + alpha*DotUnitary(a[lda*i:lda*i+n], x) + } + return + } + iy := ky + if beta == 0 { + for i = 0; i < m; i++ { + y[iy] = alpha * DotInc(x, a[lda*i:lda*i+n], n, incX, 1, kx, 0) + iy += incY + } + return + } + for i = 0; i < m; i++ { + y[iy] = y[iy]*beta + alpha*DotInc(x, a[lda*i:lda*i+n], n, incX, 1, kx, 0) + iy += incY + } +} + +// GemvT computes +// y = alpha * A^T * x + beta * y +// where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. +func GemvT(m, n uintptr, alpha float64, a []float64, lda uintptr, x []float64, incX uintptr, beta float64, y []float64, incY uintptr) { + var kx, ky, i uintptr + if int(incX) < 0 { + kx = uintptr(-int(m-1) * int(incX)) + } + if int(incY) < 0 { + ky = uintptr(-int(n-1) * int(incY)) + } + switch { + case beta == 0: // beta == 0 is special-cased to memclear + if incY == 1 { + for i := range y { + y[i] = 0 + } + } else { + iy := ky + for i := 0; i < int(n); i++ { + y[iy] = 0 + iy += incY + } + } + case int(incY) < 0: + ScalInc(beta, y, n, uintptr(int(-incY))) + case incY == 1: + ScalUnitary(beta, y[:n]) + default: + ScalInc(beta, y, n, incY) + } + + if incX == 1 && incY == 1 { + for i = 0; i < m; i++ { + AxpyUnitaryTo(y, alpha*x[i], a[lda*i:lda*i+n], y) + } + return + } + ix := kx + for i = 0; i < m; i++ { + AxpyInc(alpha*x[ix], a[lda*i:lda*i+n], y, n, 1, incY, 0, ky) + ix += incX + } +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvN_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvN_amd64.s new file mode 100644 index 0000000000..8d2a6a71a5 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvN_amd64.s @@ -0,0 +1,685 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SIZE 8 + +#define M_DIM m+0(FP) +#define M CX +#define N_DIM n+8(FP) +#define N BX + +#define TMP1 R14 +#define TMP2 R15 + +#define X_PTR SI +#define X x_base+56(FP) +#define INC_X R8 +#define INC3_X R9 + +#define Y_PTR DX +#define Y y_base+96(FP) +#define INC_Y R10 +#define INC3_Y R11 + +#define A_ROW AX +#define A_PTR DI +#define LDA R12 +#define LDA3 R13 + +#define ALPHA X15 +#define BETA X14 + +#define INIT4 \ + XORPS X0, X0 \ + XORPS X1, X1 \ + XORPS X2, X2 \ + XORPS X3, X3 + +#define INIT2 \ + XORPS X0, X0 \ + XORPS X1, X1 + +#define INIT1 \ + XORPS X0, X0 + +#define KERNEL_LOAD4 \ + MOVUPS (X_PTR), X12 \ + MOVUPS 2*SIZE(X_PTR), X13 + +#define KERNEL_LOAD2 \ + MOVUPS (X_PTR), X12 + +#define KERNEL_LOAD4_INC \ + MOVSD (X_PTR), X12 \ + MOVHPD (X_PTR)(INC_X*1), X12 \ + MOVSD (X_PTR)(INC_X*2), X13 \ + MOVHPD (X_PTR)(INC3_X*1), X13 + +#define KERNEL_LOAD2_INC \ + MOVSD (X_PTR), X12 \ + MOVHPD (X_PTR)(INC_X*1), X12 + +#define KERNEL_4x4 \ + MOVUPS (A_PTR), X4 \ + MOVUPS 2*SIZE(A_PTR), X5 \ + MOVUPS (A_PTR)(LDA*1), X6 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X7 \ + MOVUPS (A_PTR)(LDA*2), X8 \ + MOVUPS 2*SIZE(A_PTR)(LDA*2), X9 \ + MOVUPS (A_PTR)(LDA3*1), X10 \ + MOVUPS 2*SIZE(A_PTR)(LDA3*1), X11 \ + MULPD X12, X4 \ + MULPD X13, X5 \ + MULPD X12, X6 \ + MULPD X13, X7 \ + MULPD X12, X8 \ + MULPD X13, X9 \ + MULPD X12, X10 \ + MULPD X13, X11 \ + ADDPD X4, X0 \ + ADDPD X5, X0 \ + ADDPD X6, X1 \ + ADDPD X7, X1 \ + ADDPD X8, X2 \ + ADDPD X9, X2 \ + ADDPD X10, X3 \ + ADDPD X11, X3 \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_4x2 \ + MOVUPS (A_PTR), X4 \ + MOVUPS (A_PTR)(LDA*1), X5 \ + MOVUPS (A_PTR)(LDA*2), X6 \ + MOVUPS (A_PTR)(LDA3*1), X7 \ + MULPD X12, X4 \ + MULPD X12, X5 \ + MULPD X12, X6 \ + MULPD X12, X7 \ + ADDPD X4, X0 \ + ADDPD X5, X1 \ + ADDPD X6, X2 \ + ADDPD X7, X3 \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_4x1 \ + MOVDDUP (X_PTR), X12 \ + MOVSD (A_PTR), X4 \ + MOVHPD (A_PTR)(LDA*1), X4 \ + MOVSD (A_PTR)(LDA*2), X5 \ + MOVHPD (A_PTR)(LDA3*1), X5 \ + MULPD X12, X4 \ + MULPD X12, X5 \ + ADDPD X4, X0 \ + ADDPD X5, X2 \ + ADDQ $SIZE, A_PTR + +#define STORE4 \ + MOVUPS (Y_PTR), X4 \ + MOVUPS 2*SIZE(Y_PTR), X5 \ + MULPD ALPHA, X0 \ + MULPD ALPHA, X2 \ + MULPD BETA, X4 \ + MULPD BETA, X5 \ + ADDPD X0, X4 \ + ADDPD X2, X5 \ + MOVUPS X4, (Y_PTR) \ + MOVUPS X5, 2*SIZE(Y_PTR) + +#define STORE4_INC \ + MOVSD (Y_PTR), X4 \ + MOVHPD (Y_PTR)(INC_Y*1), X4 \ + MOVSD (Y_PTR)(INC_Y*2), X5 \ + MOVHPD (Y_PTR)(INC3_Y*1), X5 \ + MULPD ALPHA, X0 \ + MULPD ALPHA, X2 \ + MULPD BETA, X4 \ + MULPD BETA, X5 \ + ADDPD X0, X4 \ + ADDPD X2, X5 \ + MOVLPD X4, (Y_PTR) \ + MOVHPD X4, (Y_PTR)(INC_Y*1) \ + MOVLPD X5, (Y_PTR)(INC_Y*2) \ + MOVHPD X5, (Y_PTR)(INC3_Y*1) + +#define KERNEL_2x4 \ + MOVUPS (A_PTR), X8 \ + MOVUPS 2*SIZE(A_PTR), X9 \ + MOVUPS (A_PTR)(LDA*1), X10 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X11 \ + MULPD X12, X8 \ + MULPD X13, X9 \ + MULPD X12, X10 \ + MULPD X13, X11 \ + ADDPD X8, X0 \ + ADDPD X10, X1 \ + ADDPD X9, X0 \ + ADDPD X11, X1 \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_2x2 \ + MOVUPS (A_PTR), X8 \ + MOVUPS (A_PTR)(LDA*1), X9 \ + MULPD X12, X8 \ + MULPD X12, X9 \ + ADDPD X8, X0 \ + ADDPD X9, X1 \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_2x1 \ + MOVDDUP (X_PTR), X12 \ + MOVSD (A_PTR), X8 \ + MOVHPD (A_PTR)(LDA*1), X8 \ + MULPD X12, X8 \ + ADDPD X8, X0 \ + ADDQ $SIZE, A_PTR + +#define STORE2 \ + MOVUPS (Y_PTR), X4 \ + MULPD ALPHA, X0 \ + MULPD BETA, X4 \ + ADDPD X0, X4 \ + MOVUPS X4, (Y_PTR) + +#define STORE2_INC \ + MOVSD (Y_PTR), X4 \ + MOVHPD (Y_PTR)(INC_Y*1), X4 \ + MULPD ALPHA, X0 \ + MULPD BETA, X4 \ + ADDPD X0, X4 \ + MOVSD X4, (Y_PTR) \ + MOVHPD X4, (Y_PTR)(INC_Y*1) + +#define KERNEL_1x4 \ + MOVUPS (A_PTR), X8 \ + MOVUPS 2*SIZE(A_PTR), X9 \ + MULPD X12, X8 \ + MULPD X13, X9 \ + ADDPD X8, X0 \ + ADDPD X9, X0 \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_1x2 \ + MOVUPS (A_PTR), X8 \ + MULPD X12, X8 \ + ADDPD X8, X0 \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_1x1 \ + MOVSD (X_PTR), X12 \ + MOVSD (A_PTR), X8 \ + MULSD X12, X8 \ + ADDSD X8, X0 \ + ADDQ $SIZE, A_PTR + +#define STORE1 \ + HADDPD X0, X0 \ + MOVSD (Y_PTR), X4 \ + MULSD ALPHA, X0 \ + MULSD BETA, X4 \ + ADDSD X0, X4 \ + MOVSD X4, (Y_PTR) + +// func GemvN(m, n int, +// alpha float64, +// a []float64, lda int, +// x []float64, incX int, +// beta float64, +// y []float64, incY int) +TEXT ·GemvN(SB), NOSPLIT, $32-128 + MOVQ M_DIM, M + MOVQ N_DIM, N + CMPQ M, $0 + JE end + CMPQ N, $0 + JE end + + MOVDDUP alpha+16(FP), ALPHA + MOVDDUP beta+88(FP), BETA + + MOVQ x_base+56(FP), X_PTR + MOVQ y_base+96(FP), Y_PTR + MOVQ a_base+24(FP), A_ROW + MOVQ incY+120(FP), INC_Y + MOVQ lda+48(FP), LDA // LDA = LDA * sizeof(float64) + SHLQ $3, LDA + LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3 + MOVQ A_ROW, A_PTR + + XORQ TMP2, TMP2 + MOVQ M, TMP1 + SUBQ $1, TMP1 + IMULQ INC_Y, TMP1 + NEGQ TMP1 + CMPQ INC_Y, $0 + CMOVQLT TMP1, TMP2 + LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR + MOVQ Y_PTR, Y + + SHLQ $3, INC_Y // INC_Y = incY * sizeof(float64) + LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3 + + MOVSD $0.0, X0 + COMISD BETA, X0 + JNE gemv_start // if beta != 0 { goto gemv_start } + +gemv_clear: // beta == 0 is special cased to clear memory (no nan handling) + XORPS X0, X0 + XORPS X1, X1 + XORPS X2, X2 + XORPS X3, X3 + + CMPQ incY+120(FP), $1 // Check for dense vector X (fast-path) + JNE inc_clear + + SHRQ $3, M + JZ clear4 + +clear8: + MOVUPS X0, (Y_PTR) + MOVUPS X1, 16(Y_PTR) + MOVUPS X2, 32(Y_PTR) + MOVUPS X3, 48(Y_PTR) + ADDQ $8*SIZE, Y_PTR + DECQ M + JNZ clear8 + +clear4: + TESTQ $4, M_DIM + JZ clear2 + MOVUPS X0, (Y_PTR) + MOVUPS X1, 16(Y_PTR) + ADDQ $4*SIZE, Y_PTR + +clear2: + TESTQ $2, M_DIM + JZ clear1 + MOVUPS X0, (Y_PTR) + ADDQ $2*SIZE, Y_PTR + +clear1: + TESTQ $1, M_DIM + JZ prep_end + MOVSD X0, (Y_PTR) + + JMP prep_end + +inc_clear: + SHRQ $2, M + JZ inc_clear2 + +inc_clear4: + MOVSD X0, (Y_PTR) + MOVSD X1, (Y_PTR)(INC_Y*1) + MOVSD X2, (Y_PTR)(INC_Y*2) + MOVSD X3, (Y_PTR)(INC3_Y*1) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ M + JNZ inc_clear4 + +inc_clear2: + TESTQ $2, M_DIM + JZ inc_clear1 + MOVSD X0, (Y_PTR) + MOVSD X1, (Y_PTR)(INC_Y*1) + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_clear1: + TESTQ $1, M_DIM + JZ prep_end + MOVSD X0, (Y_PTR) + +prep_end: + MOVQ Y, Y_PTR + MOVQ M_DIM, M + +gemv_start: + CMPQ incX+80(FP), $1 // Check for dense vector X (fast-path) + JNE inc + + SHRQ $2, M + JZ r2 + +r4: + // LOAD 4 + INIT4 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r4c2 + +r4c4: + // 4x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x4 + + ADDQ $4*SIZE, X_PTR + + DECQ N + JNZ r4c4 + +r4c2: + TESTQ $2, N_DIM + JZ r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2 + KERNEL_4x2 + + ADDQ $2*SIZE, X_PTR + +r4c1: + HADDPD X1, X0 + HADDPD X3, X2 + TESTQ $1, N_DIM + JZ r4end + + // 4x1 KERNEL + KERNEL_4x1 + + ADDQ $SIZE, X_PTR + +r4end: + CMPQ INC_Y, $SIZE + JNZ r4st_inc + + STORE4 + ADDQ $4*SIZE, Y_PTR + JMP r4inc + +r4st_inc: + STORE4_INC + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + +r4inc: + MOVQ X, X_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ r4 + +r2: + TESTQ $2, M_DIM + JZ r1 + + // LOAD 2 + INIT2 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r2c2 + +r2c4: + // 2x4 KERNEL + KERNEL_LOAD4 + KERNEL_2x4 + + ADDQ $4*SIZE, X_PTR + + DECQ N + JNZ r2c4 + +r2c2: + TESTQ $2, N_DIM + JZ r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x2 + + ADDQ $2*SIZE, X_PTR + +r2c1: + HADDPD X1, X0 + TESTQ $1, N_DIM + JZ r2end + + // 2x1 KERNEL + KERNEL_2x1 + + ADDQ $SIZE, X_PTR + +r2end: + CMPQ INC_Y, $SIZE + JNE r2st_inc + + STORE2 + ADDQ $2*SIZE, Y_PTR + JMP r2inc + +r2st_inc: + STORE2_INC + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +r2inc: + MOVQ X, X_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +r1: + TESTQ $1, M_DIM + JZ end + + // LOAD 1 + INIT1 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r1c2 + +r1c4: + // 1x4 KERNEL + KERNEL_LOAD4 + KERNEL_1x4 + + ADDQ $4*SIZE, X_PTR + + DECQ N + JNZ r1c4 + +r1c2: + TESTQ $2, N_DIM + JZ r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2 + KERNEL_1x2 + + ADDQ $2*SIZE, X_PTR + +r1c1: + + TESTQ $1, N_DIM + JZ r1end + + // 1x1 KERNEL + KERNEL_1x1 + +r1end: + STORE1 + +end: + RET + +inc: // Algorithm for incX != 1 ( split loads in kernel ) + MOVQ incX+80(FP), INC_X // INC_X = incX + + XORQ TMP2, TMP2 // TMP2 = 0 + MOVQ N, TMP1 // TMP1 = N + SUBQ $1, TMP1 // TMP1 -= 1 + NEGQ TMP1 // TMP1 = -TMP1 + IMULQ INC_X, TMP1 // TMP1 *= INC_X + CMPQ INC_X, $0 // if INC_X < 0 { TMP2 = TMP1 } + CMOVQLT TMP1, TMP2 + LEAQ (X_PTR)(TMP2*SIZE), X_PTR // X_PTR = X_PTR[TMP2] + MOVQ X_PTR, X // X = X_PTR + + SHLQ $3, INC_X + LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3 + + SHRQ $2, M + JZ inc_r2 + +inc_r4: + // LOAD 4 + INIT4 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r4c2 + +inc_r4c4: + // 4x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x4 + + LEAQ (X_PTR)(INC_X*4), X_PTR + + DECQ N + JNZ inc_r4c4 + +inc_r4c2: + TESTQ $2, N_DIM + JZ inc_r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_4x2 + + LEAQ (X_PTR)(INC_X*2), X_PTR + +inc_r4c1: + HADDPD X1, X0 + HADDPD X3, X2 + TESTQ $1, N_DIM + JZ inc_r4end + + // 4x1 KERNEL + KERNEL_4x1 + + ADDQ INC_X, X_PTR + +inc_r4end: + CMPQ INC_Y, $SIZE + JNE inc_r4st_inc + + STORE4 + ADDQ $4*SIZE, Y_PTR + JMP inc_r4inc + +inc_r4st_inc: + STORE4_INC + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + +inc_r4inc: + MOVQ X, X_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ inc_r4 + +inc_r2: + TESTQ $2, M_DIM + JZ inc_r1 + + // LOAD 2 + INIT2 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r2c2 + +inc_r2c4: + // 2x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_2x4 + + LEAQ (X_PTR)(INC_X*4), X_PTR + DECQ N + JNZ inc_r2c4 + +inc_r2c2: + TESTQ $2, N_DIM + JZ inc_r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x2 + + LEAQ (X_PTR)(INC_X*2), X_PTR + +inc_r2c1: + HADDPD X1, X0 + TESTQ $1, N_DIM + JZ inc_r2end + + // 2x1 KERNEL + KERNEL_2x1 + + ADDQ INC_X, X_PTR + +inc_r2end: + CMPQ INC_Y, $SIZE + JNE inc_r2st_inc + + STORE2 + ADDQ $2*SIZE, Y_PTR + JMP inc_r2inc + +inc_r2st_inc: + STORE2_INC + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r2inc: + MOVQ X, X_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +inc_r1: + TESTQ $1, M_DIM + JZ inc_end + + // LOAD 1 + INIT1 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r1c2 + +inc_r1c4: + // 1x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_1x4 + + LEAQ (X_PTR)(INC_X*4), X_PTR + DECQ N + JNZ inc_r1c4 + +inc_r1c2: + TESTQ $2, N_DIM + JZ inc_r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_1x2 + + LEAQ (X_PTR)(INC_X*2), X_PTR + +inc_r1c1: + TESTQ $1, N_DIM + JZ inc_r1end + + // 1x1 KERNEL + KERNEL_1x1 + +inc_r1end: + STORE1 + +inc_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvT_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvT_amd64.s new file mode 100644 index 0000000000..ff7d60fb68 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/gemvT_amd64.s @@ -0,0 +1,745 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SIZE 8 + +#define M_DIM n+8(FP) +#define M CX +#define N_DIM m+0(FP) +#define N BX + +#define TMP1 R14 +#define TMP2 R15 + +#define X_PTR SI +#define X x_base+56(FP) +#define Y_PTR DX +#define Y y_base+96(FP) +#define A_ROW AX +#define A_PTR DI + +#define INC_X R8 +#define INC3_X R9 + +#define INC_Y R10 +#define INC3_Y R11 + +#define LDA R12 +#define LDA3 R13 + +#define ALPHA X15 +#define BETA X14 + +#define INIT4 \ + MOVDDUP (X_PTR), X8 \ + MOVDDUP (X_PTR)(INC_X*1), X9 \ + MOVDDUP (X_PTR)(INC_X*2), X10 \ + MOVDDUP (X_PTR)(INC3_X*1), X11 \ + MULPD ALPHA, X8 \ + MULPD ALPHA, X9 \ + MULPD ALPHA, X10 \ + MULPD ALPHA, X11 + +#define INIT2 \ + MOVDDUP (X_PTR), X8 \ + MOVDDUP (X_PTR)(INC_X*1), X9 \ + MULPD ALPHA, X8 \ + MULPD ALPHA, X9 + +#define INIT1 \ + MOVDDUP (X_PTR), X8 \ + MULPD ALPHA, X8 + +#define KERNEL_LOAD4 \ + MOVUPS (Y_PTR), X0 \ + MOVUPS 2*SIZE(Y_PTR), X1 + +#define KERNEL_LOAD2 \ + MOVUPS (Y_PTR), X0 + +#define KERNEL_LOAD4_INC \ + MOVSD (Y_PTR), X0 \ + MOVHPD (Y_PTR)(INC_Y*1), X0 \ + MOVSD (Y_PTR)(INC_Y*2), X1 \ + MOVHPD (Y_PTR)(INC3_Y*1), X1 + +#define KERNEL_LOAD2_INC \ + MOVSD (Y_PTR), X0 \ + MOVHPD (Y_PTR)(INC_Y*1), X0 + +#define KERNEL_4x4 \ + MOVUPS (A_PTR), X4 \ + MOVUPS 2*SIZE(A_PTR), X5 \ + MOVUPS (A_PTR)(LDA*1), X6 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X7 \ + MULPD X8, X4 \ + MULPD X8, X5 \ + MULPD X9, X6 \ + MULPD X9, X7 \ + ADDPD X4, X0 \ + ADDPD X5, X1 \ + ADDPD X6, X0 \ + ADDPD X7, X1 \ + MOVUPS (A_PTR)(LDA*2), X4 \ + MOVUPS 2*SIZE(A_PTR)(LDA*2), X5 \ + MOVUPS (A_PTR)(LDA3*1), X6 \ + MOVUPS 2*SIZE(A_PTR)(LDA3*1), X7 \ + MULPD X10, X4 \ + MULPD X10, X5 \ + MULPD X11, X6 \ + MULPD X11, X7 \ + ADDPD X4, X0 \ + ADDPD X5, X1 \ + ADDPD X6, X0 \ + ADDPD X7, X1 \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_4x2 \ + MOVUPS (A_PTR), X4 \ + MOVUPS 2*SIZE(A_PTR), X5 \ + MOVUPS (A_PTR)(LDA*1), X6 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X7 \ + MULPD X8, X4 \ + MULPD X8, X5 \ + MULPD X9, X6 \ + MULPD X9, X7 \ + ADDPD X4, X0 \ + ADDPD X5, X1 \ + ADDPD X6, X0 \ + ADDPD X7, X1 \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_4x1 \ + MOVUPS (A_PTR), X4 \ + MOVUPS 2*SIZE(A_PTR), X5 \ + MULPD X8, X4 \ + MULPD X8, X5 \ + ADDPD X4, X0 \ + ADDPD X5, X1 \ + ADDQ $4*SIZE, A_PTR + +#define STORE4 \ + MOVUPS X0, (Y_PTR) \ + MOVUPS X1, 2*SIZE(Y_PTR) + +#define STORE4_INC \ + MOVLPD X0, (Y_PTR) \ + MOVHPD X0, (Y_PTR)(INC_Y*1) \ + MOVLPD X1, (Y_PTR)(INC_Y*2) \ + MOVHPD X1, (Y_PTR)(INC3_Y*1) + +#define KERNEL_2x4 \ + MOVUPS (A_PTR), X4 \ + MOVUPS (A_PTR)(LDA*1), X5 \ + MOVUPS (A_PTR)(LDA*2), X6 \ + MOVUPS (A_PTR)(LDA3*1), X7 \ + MULPD X8, X4 \ + MULPD X9, X5 \ + MULPD X10, X6 \ + MULPD X11, X7 \ + ADDPD X4, X0 \ + ADDPD X5, X0 \ + ADDPD X6, X0 \ + ADDPD X7, X0 \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_2x2 \ + MOVUPS (A_PTR), X4 \ + MOVUPS (A_PTR)(LDA*1), X5 \ + MULPD X8, X4 \ + MULPD X9, X5 \ + ADDPD X4, X0 \ + ADDPD X5, X0 \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_2x1 \ + MOVUPS (A_PTR), X4 \ + MULPD X8, X4 \ + ADDPD X4, X0 \ + ADDQ $2*SIZE, A_PTR + +#define STORE2 \ + MOVUPS X0, (Y_PTR) + +#define STORE2_INC \ + MOVLPD X0, (Y_PTR) \ + MOVHPD X0, (Y_PTR)(INC_Y*1) + +#define KERNEL_1x4 \ + MOVSD (Y_PTR), X0 \ + MOVSD (A_PTR), X4 \ + MOVSD (A_PTR)(LDA*1), X5 \ + MOVSD (A_PTR)(LDA*2), X6 \ + MOVSD (A_PTR)(LDA3*1), X7 \ + MULSD X8, X4 \ + MULSD X9, X5 \ + MULSD X10, X6 \ + MULSD X11, X7 \ + ADDSD X4, X0 \ + ADDSD X5, X0 \ + ADDSD X6, X0 \ + ADDSD X7, X0 \ + MOVSD X0, (Y_PTR) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_1x2 \ + MOVSD (Y_PTR), X0 \ + MOVSD (A_PTR), X4 \ + MOVSD (A_PTR)(LDA*1), X5 \ + MULSD X8, X4 \ + MULSD X9, X5 \ + ADDSD X4, X0 \ + ADDSD X5, X0 \ + MOVSD X0, (Y_PTR) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_1x1 \ + MOVSD (Y_PTR), X0 \ + MOVSD (A_PTR), X4 \ + MULSD X8, X4 \ + ADDSD X4, X0 \ + MOVSD X0, (Y_PTR) \ + ADDQ $SIZE, A_PTR + +#define SCALE_8(PTR, SCAL) \ + MOVUPS (PTR), X0 \ + MOVUPS 16(PTR), X1 \ + MOVUPS 32(PTR), X2 \ + MOVUPS 48(PTR), X3 \ + MULPD SCAL, X0 \ + MULPD SCAL, X1 \ + MULPD SCAL, X2 \ + MULPD SCAL, X3 \ + MOVUPS X0, (PTR) \ + MOVUPS X1, 16(PTR) \ + MOVUPS X2, 32(PTR) \ + MOVUPS X3, 48(PTR) + +#define SCALE_4(PTR, SCAL) \ + MOVUPS (PTR), X0 \ + MOVUPS 16(PTR), X1 \ + MULPD SCAL, X0 \ + MULPD SCAL, X1 \ + MOVUPS X0, (PTR) \ + MOVUPS X1, 16(PTR) \ + +#define SCALE_2(PTR, SCAL) \ + MOVUPS (PTR), X0 \ + MULPD SCAL, X0 \ + MOVUPS X0, (PTR) \ + +#define SCALE_1(PTR, SCAL) \ + MOVSD (PTR), X0 \ + MULSD SCAL, X0 \ + MOVSD X0, (PTR) \ + +#define SCALEINC_4(PTR, INC, INC3, SCAL) \ + MOVSD (PTR), X0 \ + MOVSD (PTR)(INC*1), X1 \ + MOVSD (PTR)(INC*2), X2 \ + MOVSD (PTR)(INC3*1), X3 \ + MULSD SCAL, X0 \ + MULSD SCAL, X1 \ + MULSD SCAL, X2 \ + MULSD SCAL, X3 \ + MOVSD X0, (PTR) \ + MOVSD X1, (PTR)(INC*1) \ + MOVSD X2, (PTR)(INC*2) \ + MOVSD X3, (PTR)(INC3*1) + +#define SCALEINC_2(PTR, INC, SCAL) \ + MOVSD (PTR), X0 \ + MOVSD (PTR)(INC*1), X1 \ + MULSD SCAL, X0 \ + MULSD SCAL, X1 \ + MOVSD X0, (PTR) \ + MOVSD X1, (PTR)(INC*1) + +// func GemvT(m, n int, +// alpha float64, +// a []float64, lda int, +// x []float64, incX int, +// beta float64, +// y []float64, incY int) +TEXT ·GemvT(SB), NOSPLIT, $32-128 + MOVQ M_DIM, M + MOVQ N_DIM, N + CMPQ M, $0 + JE end + CMPQ N, $0 + JE end + + MOVDDUP alpha+16(FP), ALPHA + + MOVQ x_base+56(FP), X_PTR + MOVQ y_base+96(FP), Y_PTR + MOVQ a_base+24(FP), A_ROW + MOVQ incY+120(FP), INC_Y // INC_Y = incY * sizeof(float64) + MOVQ lda+48(FP), LDA // LDA = LDA * sizeof(float64) + SHLQ $3, LDA + LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3 + MOVQ A_ROW, A_PTR + + MOVQ incX+80(FP), INC_X // INC_X = incX * sizeof(float64) + + XORQ TMP2, TMP2 + MOVQ N, TMP1 + SUBQ $1, TMP1 + NEGQ TMP1 + IMULQ INC_X, TMP1 + CMPQ INC_X, $0 + CMOVQLT TMP1, TMP2 + LEAQ (X_PTR)(TMP2*SIZE), X_PTR + MOVQ X_PTR, X + + SHLQ $3, INC_X + LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3 + + CMPQ incY+120(FP), $1 // Check for dense vector Y (fast-path) + JNE inc + + MOVSD $1.0, X0 + COMISD beta+88(FP), X0 + JE gemv_start + + MOVSD $0.0, X0 + COMISD beta+88(FP), X0 + JE gemv_clear + + MOVDDUP beta+88(FP), BETA + SHRQ $3, M + JZ scal4 + +scal8: + SCALE_8(Y_PTR, BETA) + ADDQ $8*SIZE, Y_PTR + DECQ M + JNZ scal8 + +scal4: + TESTQ $4, M_DIM + JZ scal2 + SCALE_4(Y_PTR, BETA) + ADDQ $4*SIZE, Y_PTR + +scal2: + TESTQ $2, M_DIM + JZ scal1 + SCALE_2(Y_PTR, BETA) + ADDQ $2*SIZE, Y_PTR + +scal1: + TESTQ $1, M_DIM + JZ prep_end + SCALE_1(Y_PTR, BETA) + + JMP prep_end + +gemv_clear: // beta == 0 is special cased to clear memory (no nan handling) + XORPS X0, X0 + XORPS X1, X1 + XORPS X2, X2 + XORPS X3, X3 + + SHRQ $3, M + JZ clear4 + +clear8: + MOVUPS X0, (Y_PTR) + MOVUPS X1, 16(Y_PTR) + MOVUPS X2, 32(Y_PTR) + MOVUPS X3, 48(Y_PTR) + ADDQ $8*SIZE, Y_PTR + DECQ M + JNZ clear8 + +clear4: + TESTQ $4, M_DIM + JZ clear2 + MOVUPS X0, (Y_PTR) + MOVUPS X1, 16(Y_PTR) + ADDQ $4*SIZE, Y_PTR + +clear2: + TESTQ $2, M_DIM + JZ clear1 + MOVUPS X0, (Y_PTR) + ADDQ $2*SIZE, Y_PTR + +clear1: + TESTQ $1, M_DIM + JZ prep_end + MOVSD X0, (Y_PTR) + +prep_end: + MOVQ Y, Y_PTR + MOVQ M_DIM, M + +gemv_start: + SHRQ $2, N + JZ c2 + +c4: + // LOAD 4 + INIT4 + + MOVQ M_DIM, M + SHRQ $2, M + JZ c4r2 + +c4r4: + // 4x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x4 + STORE4 + + ADDQ $4*SIZE, Y_PTR + + DECQ M + JNZ c4r4 + +c4r2: + TESTQ $2, M_DIM + JZ c4r1 + + // 4x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x4 + STORE2 + + ADDQ $2*SIZE, Y_PTR + +c4r1: + TESTQ $1, M_DIM + JZ c4end + + // 4x1 KERNEL + KERNEL_1x4 + + ADDQ $SIZE, Y_PTR + +c4end: + LEAQ (X_PTR)(INC_X*4), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ N + JNZ c4 + +c2: + TESTQ $2, N_DIM + JZ c1 + + // LOAD 2 + INIT2 + + MOVQ M_DIM, M + SHRQ $2, M + JZ c2r2 + +c2r4: + // 2x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x2 + STORE4 + + ADDQ $4*SIZE, Y_PTR + + DECQ M + JNZ c2r4 + +c2r2: + TESTQ $2, M_DIM + JZ c2r1 + + // 2x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x2 + STORE2 + + ADDQ $2*SIZE, Y_PTR + +c2r1: + TESTQ $1, M_DIM + JZ c2end + + // 2x1 KERNEL + KERNEL_1x2 + + ADDQ $SIZE, Y_PTR + +c2end: + LEAQ (X_PTR)(INC_X*2), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +c1: + TESTQ $1, N_DIM + JZ end + + // LOAD 1 + INIT1 + + MOVQ M_DIM, M + SHRQ $2, M + JZ c1r2 + +c1r4: + // 1x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x1 + STORE4 + + ADDQ $4*SIZE, Y_PTR + + DECQ M + JNZ c1r4 + +c1r2: + TESTQ $2, M_DIM + JZ c1r1 + + // 1x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x1 + STORE2 + + ADDQ $2*SIZE, Y_PTR + +c1r1: + TESTQ $1, M_DIM + JZ end + + // 1x1 KERNEL + KERNEL_1x1 + +end: + RET + +inc: // Algorithm for incX != 0 ( split loads in kernel ) + XORQ TMP2, TMP2 + MOVQ M, TMP1 + SUBQ $1, TMP1 + IMULQ INC_Y, TMP1 + NEGQ TMP1 + CMPQ INC_Y, $0 + CMOVQLT TMP1, TMP2 + LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR + MOVQ Y_PTR, Y + + SHLQ $3, INC_Y + LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3 + + MOVSD $1.0, X0 + COMISD beta+88(FP), X0 + JE inc_gemv_start + + MOVSD $0.0, X0 + COMISD beta+88(FP), X0 + JE inc_gemv_clear + + MOVDDUP beta+88(FP), BETA + SHRQ $2, M + JZ inc_scal2 + +inc_scal4: + SCALEINC_4(Y_PTR, INC_Y, INC3_Y, BETA) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ M + JNZ inc_scal4 + +inc_scal2: + TESTQ $2, M_DIM + JZ inc_scal1 + + SCALEINC_2(Y_PTR, INC_Y, BETA) + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_scal1: + TESTQ $1, M_DIM + JZ inc_prep_end + SCALE_1(Y_PTR, BETA) + + JMP inc_prep_end + +inc_gemv_clear: // beta == 0 is special-cased to clear memory (no nan handling) + XORPS X0, X0 + XORPS X1, X1 + XORPS X2, X2 + XORPS X3, X3 + + SHRQ $2, M + JZ inc_clear2 + +inc_clear4: + MOVSD X0, (Y_PTR) + MOVSD X1, (Y_PTR)(INC_Y*1) + MOVSD X2, (Y_PTR)(INC_Y*2) + MOVSD X3, (Y_PTR)(INC3_Y*1) + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ M + JNZ inc_clear4 + +inc_clear2: + TESTQ $2, M_DIM + JZ inc_clear1 + MOVSD X0, (Y_PTR) + MOVSD X1, (Y_PTR)(INC_Y*1) + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_clear1: + TESTQ $1, M_DIM + JZ inc_prep_end + MOVSD X0, (Y_PTR) + +inc_prep_end: + MOVQ Y, Y_PTR + MOVQ M_DIM, M + +inc_gemv_start: + SHRQ $2, N + JZ inc_c2 + +inc_c4: + // LOAD 4 + INIT4 + + MOVQ M_DIM, M + SHRQ $2, M + JZ inc_c4r2 + +inc_c4r4: + // 4x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x4 + STORE4_INC + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + + DECQ M + JNZ inc_c4r4 + +inc_c4r2: + TESTQ $2, M_DIM + JZ inc_c4r1 + + // 4x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x4 + STORE2_INC + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_c4r1: + TESTQ $1, M_DIM + JZ inc_c4end + + // 4x1 KERNEL + KERNEL_1x4 + + ADDQ INC_Y, Y_PTR + +inc_c4end: + LEAQ (X_PTR)(INC_X*4), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ N + JNZ inc_c4 + +inc_c2: + TESTQ $2, N_DIM + JZ inc_c1 + + // LOAD 2 + INIT2 + + MOVQ M_DIM, M + SHRQ $2, M + JZ inc_c2r2 + +inc_c2r4: + // 2x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x2 + STORE4_INC + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ M + JNZ inc_c2r4 + +inc_c2r2: + TESTQ $2, M_DIM + JZ inc_c2r1 + + // 2x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x2 + STORE2_INC + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_c2r1: + TESTQ $1, M_DIM + JZ inc_c2end + + // 2x1 KERNEL + KERNEL_1x2 + + ADDQ INC_Y, Y_PTR + +inc_c2end: + LEAQ (X_PTR)(INC_X*2), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +inc_c1: + TESTQ $1, N_DIM + JZ inc_end + + // LOAD 1 + INIT1 + + MOVQ M_DIM, M + SHRQ $2, M + JZ inc_c1r2 + +inc_c1r4: + // 1x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x1 + STORE4_INC + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ M + JNZ inc_c1r4 + +inc_c1r2: + TESTQ $2, M_DIM + JZ inc_c1r1 + + // 1x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x1 + STORE2_INC + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_c1r1: + TESTQ $1, M_DIM + JZ inc_end + + // 1x1 KERNEL + KERNEL_1x1 + +inc_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/ger_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/ger_amd64.s new file mode 100644 index 0000000000..8c1b36a65e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/ger_amd64.s @@ -0,0 +1,591 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define SIZE 8 + +#define M_DIM m+0(FP) +#define M CX +#define N_DIM n+8(FP) +#define N BX + +#define TMP1 R14 +#define TMP2 R15 + +#define X_PTR SI +#define Y y_base+56(FP) +#define Y_PTR DX +#define A_ROW AX +#define A_PTR DI + +#define INC_X R8 +#define INC3_X R9 + +#define INC_Y R10 +#define INC3_Y R11 + +#define LDA R12 +#define LDA3 R13 + +#define ALPHA X0 + +#define LOAD4 \ + PREFETCHNTA (X_PTR )(INC_X*8) \ + MOVDDUP (X_PTR), X1 \ + MOVDDUP (X_PTR)(INC_X*1), X2 \ + MOVDDUP (X_PTR)(INC_X*2), X3 \ + MOVDDUP (X_PTR)(INC3_X*1), X4 \ + MULPD ALPHA, X1 \ + MULPD ALPHA, X2 \ + MULPD ALPHA, X3 \ + MULPD ALPHA, X4 + +#define LOAD2 \ + MOVDDUP (X_PTR), X1 \ + MOVDDUP (X_PTR)(INC_X*1), X2 \ + MULPD ALPHA, X1 \ + MULPD ALPHA, X2 + +#define LOAD1 \ + MOVDDUP (X_PTR), X1 \ + MULPD ALPHA, X1 + +#define KERNEL_LOAD4 \ + MOVUPS (Y_PTR), X5 \ + MOVUPS 2*SIZE(Y_PTR), X6 + +#define KERNEL_LOAD4_INC \ + MOVLPD (Y_PTR), X5 \ + MOVHPD (Y_PTR)(INC_Y*1), X5 \ + MOVLPD (Y_PTR)(INC_Y*2), X6 \ + MOVHPD (Y_PTR)(INC3_Y*1), X6 + +#define KERNEL_LOAD2 \ + MOVUPS (Y_PTR), X5 + +#define KERNEL_LOAD2_INC \ + MOVLPD (Y_PTR), X5 \ + MOVHPD (Y_PTR)(INC_Y*1), X5 + +#define KERNEL_4x4 \ + MOVUPS X5, X7 \ + MOVUPS X6, X8 \ + MOVUPS X5, X9 \ + MOVUPS X6, X10 \ + MOVUPS X5, X11 \ + MOVUPS X6, X12 \ + MULPD X1, X5 \ + MULPD X1, X6 \ + MULPD X2, X7 \ + MULPD X2, X8 \ + MULPD X3, X9 \ + MULPD X3, X10 \ + MULPD X4, X11 \ + MULPD X4, X12 + +#define STORE_4x4 \ + MOVUPS (A_PTR), X13 \ + ADDPD X13, X5 \ + MOVUPS 2*SIZE(A_PTR), X14 \ + ADDPD X14, X6 \ + MOVUPS (A_PTR)(LDA*1), X15 \ + ADDPD X15, X7 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X0 \ + ADDPD X0, X8 \ + MOVUPS (A_PTR)(LDA*2), X13 \ + ADDPD X13, X9 \ + MOVUPS 2*SIZE(A_PTR)(LDA*2), X14 \ + ADDPD X14, X10 \ + MOVUPS (A_PTR)(LDA3*1), X15 \ + ADDPD X15, X11 \ + MOVUPS 2*SIZE(A_PTR)(LDA3*1), X0 \ + ADDPD X0, X12 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 2*SIZE(A_PTR) \ + MOVUPS X7, (A_PTR)(LDA*1) \ + MOVUPS X8, 2*SIZE(A_PTR)(LDA*1) \ + MOVUPS X9, (A_PTR)(LDA*2) \ + MOVUPS X10, 2*SIZE(A_PTR)(LDA*2) \ + MOVUPS X11, (A_PTR)(LDA3*1) \ + MOVUPS X12, 2*SIZE(A_PTR)(LDA3*1) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_4x2 \ + MOVUPS X5, X6 \ + MOVUPS X5, X7 \ + MOVUPS X5, X8 \ + MULPD X1, X5 \ + MULPD X2, X6 \ + MULPD X3, X7 \ + MULPD X4, X8 + +#define STORE_4x2 \ + MOVUPS (A_PTR), X9 \ + ADDPD X9, X5 \ + MOVUPS (A_PTR)(LDA*1), X10 \ + ADDPD X10, X6 \ + MOVUPS (A_PTR)(LDA*2), X11 \ + ADDPD X11, X7 \ + MOVUPS (A_PTR)(LDA3*1), X12 \ + ADDPD X12, X8 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, (A_PTR)(LDA*1) \ + MOVUPS X7, (A_PTR)(LDA*2) \ + MOVUPS X8, (A_PTR)(LDA3*1) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_4x1 \ + MOVSD (Y_PTR), X5 \ + MOVSD X5, X6 \ + MOVSD X5, X7 \ + MOVSD X5, X8 \ + MULSD X1, X5 \ + MULSD X2, X6 \ + MULSD X3, X7 \ + MULSD X4, X8 + +#define STORE_4x1 \ + ADDSD (A_PTR), X5 \ + ADDSD (A_PTR)(LDA*1), X6 \ + ADDSD (A_PTR)(LDA*2), X7 \ + ADDSD (A_PTR)(LDA3*1), X8 \ + MOVSD X5, (A_PTR) \ + MOVSD X6, (A_PTR)(LDA*1) \ + MOVSD X7, (A_PTR)(LDA*2) \ + MOVSD X8, (A_PTR)(LDA3*1) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_2x4 \ + MOVUPS X5, X7 \ + MOVUPS X6, X8 \ + MULPD X1, X5 \ + MULPD X1, X6 \ + MULPD X2, X7 \ + MULPD X2, X8 + +#define STORE_2x4 \ + MOVUPS (A_PTR), X9 \ + ADDPD X9, X5 \ + MOVUPS 2*SIZE(A_PTR), X10 \ + ADDPD X10, X6 \ + MOVUPS (A_PTR)(LDA*1), X11 \ + ADDPD X11, X7 \ + MOVUPS 2*SIZE(A_PTR)(LDA*1), X12 \ + ADDPD X12, X8 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 2*SIZE(A_PTR) \ + MOVUPS X7, (A_PTR)(LDA*1) \ + MOVUPS X8, 2*SIZE(A_PTR)(LDA*1) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_2x2 \ + MOVUPS X5, X6 \ + MULPD X1, X5 \ + MULPD X2, X6 + +#define STORE_2x2 \ + MOVUPS (A_PTR), X7 \ + ADDPD X7, X5 \ + MOVUPS (A_PTR)(LDA*1), X8 \ + ADDPD X8, X6 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, (A_PTR)(LDA*1) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_2x1 \ + MOVSD (Y_PTR), X5 \ + MOVSD X5, X6 \ + MULSD X1, X5 \ + MULSD X2, X6 + +#define STORE_2x1 \ + ADDSD (A_PTR), X5 \ + ADDSD (A_PTR)(LDA*1), X6 \ + MOVSD X5, (A_PTR) \ + MOVSD X6, (A_PTR)(LDA*1) \ + ADDQ $SIZE, A_PTR + +#define KERNEL_1x4 \ + MULPD X1, X5 \ + MULPD X1, X6 + +#define STORE_1x4 \ + MOVUPS (A_PTR), X7 \ + ADDPD X7, X5 \ + MOVUPS 2*SIZE(A_PTR), X8 \ + ADDPD X8, X6 \ + MOVUPS X5, (A_PTR) \ + MOVUPS X6, 2*SIZE(A_PTR) \ + ADDQ $4*SIZE, A_PTR + +#define KERNEL_1x2 \ + MULPD X1, X5 + +#define STORE_1x2 \ + MOVUPS (A_PTR), X6 \ + ADDPD X6, X5 \ + MOVUPS X5, (A_PTR) \ + ADDQ $2*SIZE, A_PTR + +#define KERNEL_1x1 \ + MOVSD (Y_PTR), X5 \ + MULSD X1, X5 + +#define STORE_1x1 \ + ADDSD (A_PTR), X5 \ + MOVSD X5, (A_PTR) \ + ADDQ $SIZE, A_PTR + +// func Ger(m, n uintptr, alpha float64, +// x []float64, incX uintptr, +// y []float64, incY uintptr, +// a []float64, lda uintptr) +TEXT ·Ger(SB), NOSPLIT, $0 + MOVQ M_DIM, M + MOVQ N_DIM, N + CMPQ M, $0 + JE end + CMPQ N, $0 + JE end + + MOVDDUP alpha+16(FP), ALPHA + + MOVQ x_base+24(FP), X_PTR + MOVQ y_base+56(FP), Y_PTR + MOVQ a_base+88(FP), A_ROW + MOVQ incX+48(FP), INC_X // INC_X = incX * sizeof(float64) + SHLQ $3, INC_X + MOVQ lda+112(FP), LDA // LDA = LDA * sizeof(float64) + SHLQ $3, LDA + LEAQ (LDA)(LDA*2), LDA3 // LDA3 = LDA * 3 + LEAQ (INC_X)(INC_X*2), INC3_X // INC3_X = INC_X * 3 + MOVQ A_ROW, A_PTR + + XORQ TMP2, TMP2 + MOVQ M, TMP1 + SUBQ $1, TMP1 + IMULQ INC_X, TMP1 + NEGQ TMP1 + CMPQ INC_X, $0 + CMOVQLT TMP1, TMP2 + LEAQ (X_PTR)(TMP2*SIZE), X_PTR + + CMPQ incY+80(FP), $1 // Check for dense vector Y (fast-path) + JG inc + JL end + + SHRQ $2, M + JZ r2 + +r4: + // LOAD 4 + LOAD4 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r4c2 + +r4c4: + // 4x4 KERNEL + KERNEL_LOAD4 + KERNEL_4x4 + STORE_4x4 + + ADDQ $4*SIZE, Y_PTR + + DECQ N + JNZ r4c4 + + // Reload ALPHA after it's clobbered by STORE_4x4 + MOVDDUP alpha+16(FP), ALPHA + +r4c2: + TESTQ $2, N_DIM + JZ r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2 + KERNEL_4x2 + STORE_4x2 + + ADDQ $2*SIZE, Y_PTR + +r4c1: + TESTQ $1, N_DIM + JZ r4end + + // 4x1 KERNEL + KERNEL_4x1 + STORE_4x1 + + ADDQ $SIZE, Y_PTR + +r4end: + LEAQ (X_PTR)(INC_X*4), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ r4 + +r2: + TESTQ $2, M_DIM + JZ r1 + + // LOAD 2 + LOAD2 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r2c2 + +r2c4: + // 2x4 KERNEL + KERNEL_LOAD4 + KERNEL_2x4 + STORE_2x4 + + ADDQ $4*SIZE, Y_PTR + + DECQ N + JNZ r2c4 + +r2c2: + TESTQ $2, N_DIM + JZ r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2 + KERNEL_2x2 + STORE_2x2 + + ADDQ $2*SIZE, Y_PTR + +r2c1: + TESTQ $1, N_DIM + JZ r2end + + // 2x1 KERNEL + KERNEL_2x1 + STORE_2x1 + + ADDQ $SIZE, Y_PTR + +r2end: + LEAQ (X_PTR)(INC_X*2), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +r1: + TESTQ $1, M_DIM + JZ end + + // LOAD 1 + LOAD1 + + MOVQ N_DIM, N + SHRQ $2, N + JZ r1c2 + +r1c4: + // 1x4 KERNEL + KERNEL_LOAD4 + KERNEL_1x4 + STORE_1x4 + + ADDQ $4*SIZE, Y_PTR + + DECQ N + JNZ r1c4 + +r1c2: + TESTQ $2, N_DIM + JZ r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2 + KERNEL_1x2 + STORE_1x2 + + ADDQ $2*SIZE, Y_PTR + +r1c1: + TESTQ $1, N_DIM + JZ end + + // 1x1 KERNEL + KERNEL_1x1 + STORE_1x1 + + ADDQ $SIZE, Y_PTR + +end: + RET + +inc: // Algorithm for incY != 1 ( split loads in kernel ) + + MOVQ incY+80(FP), INC_Y // INC_Y = incY * sizeof(float64) + SHLQ $3, INC_Y + LEAQ (INC_Y)(INC_Y*2), INC3_Y // INC3_Y = INC_Y * 3 + + XORQ TMP2, TMP2 + MOVQ N, TMP1 + SUBQ $1, TMP1 + IMULQ INC_Y, TMP1 + NEGQ TMP1 + CMPQ INC_Y, $0 + CMOVQLT TMP1, TMP2 + LEAQ (Y_PTR)(TMP2*SIZE), Y_PTR + + SHRQ $2, M + JZ inc_r2 + +inc_r4: + // LOAD 4 + LOAD4 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r4c2 + +inc_r4c4: + // 4x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_4x4 + STORE_4x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r4c4 + + // Reload ALPHA after it's clobbered by STORE_4x4 + MOVDDUP alpha+16(FP), ALPHA + +inc_r4c2: + TESTQ $2, N_DIM + JZ inc_r4c1 + + // 4x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_4x2 + STORE_4x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r4c1: + TESTQ $1, N_DIM + JZ inc_r4end + + // 4x1 KERNEL + KERNEL_4x1 + STORE_4x1 + + ADDQ INC_Y, Y_PTR + +inc_r4end: + LEAQ (X_PTR)(INC_X*4), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*4), A_ROW + MOVQ A_ROW, A_PTR + + DECQ M + JNZ inc_r4 + +inc_r2: + TESTQ $2, M_DIM + JZ inc_r1 + + // LOAD 2 + LOAD2 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r2c2 + +inc_r2c4: + // 2x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_2x4 + STORE_2x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r2c4 + +inc_r2c2: + TESTQ $2, N_DIM + JZ inc_r2c1 + + // 2x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_2x2 + STORE_2x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r2c1: + TESTQ $1, N_DIM + JZ inc_r2end + + // 2x1 KERNEL + KERNEL_2x1 + STORE_2x1 + + ADDQ INC_Y, Y_PTR + +inc_r2end: + LEAQ (X_PTR)(INC_X*2), X_PTR + MOVQ Y, Y_PTR + LEAQ (A_ROW)(LDA*2), A_ROW + MOVQ A_ROW, A_PTR + +inc_r1: + TESTQ $1, M_DIM + JZ end + + // LOAD 1 + LOAD1 + + MOVQ N_DIM, N + SHRQ $2, N + JZ inc_r1c2 + +inc_r1c4: + // 1x4 KERNEL + KERNEL_LOAD4_INC + KERNEL_1x4 + STORE_1x4 + + LEAQ (Y_PTR)(INC_Y*4), Y_PTR + DECQ N + JNZ inc_r1c4 + +inc_r1c2: + TESTQ $2, N_DIM + JZ inc_r1c1 + + // 1x2 KERNEL + KERNEL_LOAD2_INC + KERNEL_1x2 + STORE_1x2 + + LEAQ (Y_PTR)(INC_Y*2), Y_PTR + +inc_r1c1: + TESTQ $1, N_DIM + JZ end + + // 1x1 KERNEL + KERNEL_1x1 + STORE_1x1 + + ADDQ INC_Y, Y_PTR + +inc_end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/l1norm_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/l1norm_amd64.s new file mode 100644 index 0000000000..f87f856cad --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/l1norm_amd64.s @@ -0,0 +1,58 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func L1Dist(s, t []float64) float64 +TEXT ·L1Dist(SB), NOSPLIT, $0 + MOVQ s_base+0(FP), DI // DI = &s + MOVQ t_base+24(FP), SI // SI = &t + MOVQ s_len+8(FP), CX // CX = len(s) + CMPQ t_len+32(FP), CX // CX = max( CX, len(t) ) + CMOVQLE t_len+32(FP), CX + PXOR X3, X3 // norm = 0 + CMPQ CX, $0 // if CX == 0 { return 0 } + JE l1_end + XORQ AX, AX // i = 0 + MOVQ CX, BX + ANDQ $1, BX // BX = CX % 2 + SHRQ $1, CX // CX = floor( CX / 2 ) + JZ l1_tail_start // if CX == 0 { return 0 } + +l1_loop: // Loop unrolled 2x do { + MOVUPS (SI)(AX*8), X0 // X0 = t[i:i+1] + MOVUPS (DI)(AX*8), X1 // X1 = s[i:i+1] + MOVAPS X0, X2 + SUBPD X1, X0 + SUBPD X2, X1 + MAXPD X1, X0 // X0 = max( X0 - X1, X1 - X0 ) + ADDPD X0, X3 // norm += X0 + ADDQ $2, AX // i += 2 + LOOP l1_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE l1_end + +l1_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + PXOR X0, X0 // reset X0, X1 to break dependencies + PXOR X1, X1 + +l1_tail: + MOVSD (SI)(AX*8), X0 // X0 = t[i] + MOVSD (DI)(AX*8), X1 // x1 = s[i] + MOVAPD X0, X2 + SUBSD X1, X0 + SUBSD X2, X1 + MAXSD X1, X0 // X0 = max( X0 - X1, X1 - X0 ) + ADDSD X0, X3 // norm += X0 + +l1_end: + MOVAPS X3, X2 + SHUFPD $1, X2, X2 + ADDSD X3, X2 // X2 = X3[1] + X3[0] + MOVSD X2, ret+48(FP) // return X2 + RET + diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/linfnorm_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/linfnorm_amd64.s new file mode 100644 index 0000000000..b062592800 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/linfnorm_amd64.s @@ -0,0 +1,57 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +// func LinfDist(s, t []float64) float64 +TEXT ·LinfDist(SB), NOSPLIT, $0 + MOVQ s_base+0(FP), DI // DI = &s + MOVQ t_base+24(FP), SI // SI = &t + MOVQ s_len+8(FP), CX // CX = len(s) + CMPQ t_len+32(FP), CX // CX = max( CX, len(t) ) + CMOVQLE t_len+32(FP), CX + PXOR X3, X3 // norm = 0 + CMPQ CX, $0 // if CX == 0 { return 0 } + JE l1_end + XORQ AX, AX // i = 0 + MOVQ CX, BX + ANDQ $1, BX // BX = CX % 2 + SHRQ $1, CX // CX = floor( CX / 2 ) + JZ l1_tail_start // if CX == 0 { return 0 } + +l1_loop: // Loop unrolled 2x do { + MOVUPS (SI)(AX*8), X0 // X0 = t[i:i+1] + MOVUPS (DI)(AX*8), X1 // X1 = s[i:i+1] + MOVAPS X0, X2 + SUBPD X1, X0 + SUBPD X2, X1 + MAXPD X1, X0 // X0 = max( X0 - X1, X1 - X0 ) + MAXPD X0, X3 // norm = max( norm, X0 ) + ADDQ $2, AX // i += 2 + LOOP l1_loop // } while --CX > 0 + CMPQ BX, $0 // if BX == 0 { return } + JE l1_end + +l1_tail_start: // Reset loop registers + MOVQ BX, CX // Loop counter: CX = BX + PXOR X0, X0 // reset X0, X1 to break dependencies + PXOR X1, X1 + +l1_tail: + MOVSD (SI)(AX*8), X0 // X0 = t[i] + MOVSD (DI)(AX*8), X1 // X1 = s[i] + MOVAPD X0, X2 + SUBSD X1, X0 + SUBSD X2, X1 + MAXSD X1, X0 // X0 = max( X0 - X1, X1 - X0 ) + MAXSD X0, X3 // norm = max( norm, X0 ) + +l1_end: + MOVAPS X3, X2 + SHUFPD $1, X2, X2 + MAXSD X3, X2 // X2 = max( X3[1], X3[0] ) + MOVSD X2, ret+48(FP) // return X2 + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/scal.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/scal.go new file mode 100644 index 0000000000..3cc7aca69a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/scal.go @@ -0,0 +1,57 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f64 + +// ScalUnitary is +// for i := range x { +// x[i] *= alpha +// } +func ScalUnitary(alpha float64, x []float64) { + for i := range x { + x[i] *= alpha + } +} + +// ScalUnitaryTo is +// for i, v := range x { +// dst[i] = alpha * v +// } +func ScalUnitaryTo(dst []float64, alpha float64, x []float64) { + for i, v := range x { + dst[i] = alpha * v + } +} + +// ScalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } +func ScalInc(alpha float64, x []float64, n, incX uintptr) { + var ix uintptr + for i := 0; i < int(n); i++ { + x[ix] *= alpha + ix += incX + } +} + +// ScalIncTo is +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } +func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr) { + var idst, ix uintptr + for i := 0; i < int(n); i++ { + dst[idst] = alpha * x[ix] + ix += incX + idst += incDst + } +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/scalinc_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalinc_amd64.s new file mode 100644 index 0000000000..fb8b545eba --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalinc_amd64.s @@ -0,0 +1,113 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R9 +#define ALPHA X0 +#define ALPHA_2 X1 + +// func ScalInc(alpha float64, x []float64, n, incX uintptr) +TEXT ·ScalInc(SB), NOSPLIT, $0 + MOVSD alpha+0(FP), ALPHA // ALPHA = alpha + MOVQ x_base+8(FP), X_PTR // X_PTR = &x + MOVQ incX+40(FP), INC_X // INC_X = incX + SHLQ $3, INC_X // INC_X *= sizeof(float64) + MOVQ n+32(FP), LEN // LEN = n + CMPQ LEN, $0 + JE end // if LEN == 0 { return } + + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + +loop: // do { // x[i] *= alpha unrolled 4x. + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MOVSD (X_PTR)(INC_X*2), X4 + MOVSD (X_PTR)(INCx3_X*1), X5 + + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA_2, X3 + MULSD ALPHA, X4 + MULSD ALPHA_2, X5 + + MOVSD X2, (X_PTR) // x[i] = X_i + MOVSD X3, (X_PTR)(INC_X*1) + MOVSD X4, (X_PTR)(INC_X*2) + MOVSD X5, (X_PTR)(INCx3_X*1) + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 + JE end // if TAIL == 0 { return } + +tail_start: // Reset loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( LEN / 2 ) + JZ tail_one + +tail_two: // do { + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA, X3 + MOVSD X2, (X_PTR) // x[i] = X_i + MOVSD X3, (X_PTR)(INC_X*1) + + LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2]) + + ANDQ $1, TAIL + JZ end + +tail_one: + MOVSD (X_PTR), X2 // X_i = x[i] + MULSD ALPHA, X2 // X_i *= ALPHA + MOVSD X2, (X_PTR) // x[i] = X_i + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/scalincto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalincto_amd64.s new file mode 100644 index 0000000000..186fd1c05f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalincto_amd64.s @@ -0,0 +1,122 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define DST_PTR DI +#define LEN CX +#define TAIL BX +#define INC_X R8 +#define INCx3_X R9 +#define INC_DST R10 +#define INCx3_DST R11 +#define ALPHA X0 +#define ALPHA_2 X1 + +// func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr) +TEXT ·ScalIncTo(SB), NOSPLIT, $0 + MOVQ dst_base+0(FP), DST_PTR // DST_PTR = &dst + MOVQ incDst+24(FP), INC_DST // INC_DST = incDst + SHLQ $3, INC_DST // INC_DST *= sizeof(float64) + MOVSD alpha+32(FP), ALPHA // ALPHA = alpha + MOVQ x_base+40(FP), X_PTR // X_PTR = &x + MOVQ n+64(FP), LEN // LEN = n + MOVQ incX+72(FP), INC_X // INC_X = incX + SHLQ $3, INC_X // INC_X *= sizeof(float64) + CMPQ LEN, $0 + JE end // if LEN == 0 { return } + + MOVQ LEN, TAIL + ANDQ $3, TAIL // TAIL = LEN % 4 + SHRQ $2, LEN // LEN = floor( LEN / 4 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining + LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3 + LEAQ (INC_DST)(INC_DST*2), INCx3_DST // INCx3_DST = INC_DST * 3 + +loop: // do { // x[i] *= alpha unrolled 4x. + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MOVSD (X_PTR)(INC_X*2), X4 + MOVSD (X_PTR)(INCx3_X*1), X5 + + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA_2, X3 + MULSD ALPHA, X4 + MULSD ALPHA_2, X5 + + MOVSD X2, (DST_PTR) // dst[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + MOVSD X4, (DST_PTR)(INC_DST*2) + MOVSD X5, (DST_PTR)(INCx3_DST*1) + + LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4]) + LEAQ (DST_PTR)(INC_DST*4), DST_PTR // DST_PTR = &(DST_PTR[incDst*4]) + DECQ LEN + JNZ loop // } while --LEN > 0 + CMPQ TAIL, $0 + JE end // if TAIL == 0 { return } + +tail_start: // Reset loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( LEN / 2 ) + JZ tail_one + +tail_two: + MOVSD (X_PTR), X2 // X_i = x[i] + MOVSD (X_PTR)(INC_X*1), X3 + MULSD ALPHA, X2 // X_i *= a + MULSD ALPHA, X3 + MOVSD X2, (DST_PTR) // dst[i] = X_i + MOVSD X3, (DST_PTR)(INC_DST*1) + + LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2]) + LEAQ (DST_PTR)(INC_DST*2), DST_PTR // DST_PTR = &(DST_PTR[incDst*2]) + + ANDQ $1, TAIL + JZ end + +tail_one: + MOVSD (X_PTR), X2 // X_i = x[i] + MULSD ALPHA, X2 // X_i *= ALPHA + MOVSD X2, (DST_PTR) // x[i] = X_i + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitary_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitary_amd64.s new file mode 100644 index 0000000000..f852c7f7c8 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitary_amd64.s @@ -0,0 +1,112 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x0824 // @ MOVDDUP XMM0, 8[RSP] + +#define X_PTR SI +#define DST_PTR DI +#define IDX AX +#define LEN CX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_2 X1 + +// func ScalUnitary(alpha float64, x []float64) +TEXT ·ScalUnitary(SB), NOSPLIT, $0 + MOVDDUP_ALPHA // ALPHA = { alpha, alpha } + MOVQ x_base+8(FP), X_PTR // X_PTR = &x + MOVQ x_len+16(FP), LEN // LEN = len(x) + CMPQ LEN, $0 + JE end // if LEN == 0 { return } + XORQ IDX, IDX // IDX = 0 + + MOVQ LEN, TAIL + ANDQ $7, TAIL // TAIL = LEN % 8 + SHRQ $3, LEN // LEN = floor( LEN / 8 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVUPS ALPHA, ALPHA_2 + +loop: // do { // x[i] *= alpha unrolled 8x. + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MOVUPS 16(X_PTR)(IDX*8), X3 + MOVUPS 32(X_PTR)(IDX*8), X4 + MOVUPS 48(X_PTR)(IDX*8), X5 + + MULPD ALPHA, X2 // X_i *= ALPHA + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + MOVUPS X2, (X_PTR)(IDX*8) // x[i] = X_i + MOVUPS X3, 16(X_PTR)(IDX*8) + MOVUPS X4, 32(X_PTR)(IDX*8) + MOVUPS X5, 48(X_PTR)(IDX*8) + + ADDQ $8, IDX // i += 8 + DECQ LEN + JNZ loop // while --LEN > 0 + CMPQ TAIL, $0 + JE end // if TAIL == 0 { return } + +tail_start: // Reset loop registers + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( TAIL / 2 ) + JZ tail_one // if n == 0 goto end + +tail_two: // do { + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MULPD ALPHA, X2 // X_i *= ALPHA + MOVUPS X2, (X_PTR)(IDX*8) // x[i] = X_i + ADDQ $2, IDX // i += 2 + DECQ LEN + JNZ tail_two // while --LEN > 0 + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { return } + +tail_one: + // x[i] *= alpha for the remaining element. + MOVSD (X_PTR)(IDX*8), X2 + MULSD ALPHA, X2 + MOVSD X2, (X_PTR)(IDX*8) + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitaryto_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitaryto_amd64.s new file mode 100644 index 0000000000..d2b607f525 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/scalunitaryto_amd64.s @@ -0,0 +1,113 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Some of the loop unrolling code is copied from: +// http://golang.org/src/math/big/arith_amd64.s +// which is distributed under these terms: +// +// Copyright (c) 2012 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//+build !noasm,!appengine,!safe + +#include "textflag.h" + +#define MOVDDUP_ALPHA LONG $0x44120FF2; WORD $0x2024 // @ MOVDDUP 32(SP), X0 /*XMM0, 32[RSP]*/ + +#define X_PTR SI +#define DST_PTR DI +#define IDX AX +#define LEN CX +#define TAIL BX +#define ALPHA X0 +#define ALPHA_2 X1 + +// func ScalUnitaryTo(dst []float64, alpha float64, x []float64) +// This function assumes len(dst) >= len(x). +TEXT ·ScalUnitaryTo(SB), NOSPLIT, $0 + MOVQ x_base+32(FP), X_PTR // X_PTR = &x + MOVQ dst_base+0(FP), DST_PTR // DST_PTR = &dst + MOVDDUP_ALPHA // ALPHA = { alpha, alpha } + MOVQ x_len+40(FP), LEN // LEN = len(x) + CMPQ LEN, $0 + JE end // if LEN == 0 { return } + + XORQ IDX, IDX // IDX = 0 + MOVQ LEN, TAIL + ANDQ $7, TAIL // TAIL = LEN % 8 + SHRQ $3, LEN // LEN = floor( LEN / 8 ) + JZ tail_start // if LEN == 0 { goto tail_start } + + MOVUPS ALPHA, ALPHA_2 // ALPHA_2 = ALPHA for pipelining + +loop: // do { // dst[i] = alpha * x[i] unrolled 8x. + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MOVUPS 16(X_PTR)(IDX*8), X3 + MOVUPS 32(X_PTR)(IDX*8), X4 + MOVUPS 48(X_PTR)(IDX*8), X5 + + MULPD ALPHA, X2 // X_i *= ALPHA + MULPD ALPHA_2, X3 + MULPD ALPHA, X4 + MULPD ALPHA_2, X5 + + MOVUPS X2, (DST_PTR)(IDX*8) // dst[i] = X_i + MOVUPS X3, 16(DST_PTR)(IDX*8) + MOVUPS X4, 32(DST_PTR)(IDX*8) + MOVUPS X5, 48(DST_PTR)(IDX*8) + + ADDQ $8, IDX // i += 8 + DECQ LEN + JNZ loop // while --LEN > 0 + CMPQ TAIL, $0 + JE end // if TAIL == 0 { return } + +tail_start: // Reset loop counters + MOVQ TAIL, LEN // Loop counter: LEN = TAIL + SHRQ $1, LEN // LEN = floor( TAIL / 2 ) + JZ tail_one // if LEN == 0 { goto tail_one } + +tail_two: // do { + MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i] + MULPD ALPHA, X2 // X_i *= ALPHA + MOVUPS X2, (DST_PTR)(IDX*8) // dst[i] = X_i + ADDQ $2, IDX // i += 2 + DECQ LEN + JNZ tail_two // while --LEN > 0 + + ANDQ $1, TAIL + JZ end // if TAIL == 0 { return } + +tail_one: + MOVSD (X_PTR)(IDX*8), X2 // X_i = x[i] + MULSD ALPHA, X2 // X_i *= ALPHA + MOVSD X2, (DST_PTR)(IDX*8) // dst[i] = X_i + +end: + RET diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_amd64.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_amd64.go new file mode 100644 index 0000000000..a51b94514a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_amd64.go @@ -0,0 +1,172 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +package f64 + +// L1Norm is +// for _, v := range x { +// sum += math.Abs(v) +// } +// return sum +func L1Norm(x []float64) (sum float64) + +// L1NormInc is +// for i := 0; i < n*incX; i += incX { +// sum += math.Abs(x[i]) +// } +// return sum +func L1NormInc(x []float64, n, incX int) (sum float64) + +// AddConst is +// for i := range x { +// x[i] += alpha +// } +func AddConst(alpha float64, x []float64) + +// Add is +// for i, v := range s { +// dst[i] += v +// } +func Add(dst, s []float64) + +// AxpyUnitary is +// for i, v := range x { +// y[i] += alpha * v +// } +func AxpyUnitary(alpha float64, x, y []float64) + +// AxpyUnitaryTo is +// for i, v := range x { +// dst[i] = alpha*v + y[i] +// } +func AxpyUnitaryTo(dst []float64, alpha float64, x, y []float64) + +// AxpyInc is +// for i := 0; i < int(n); i++ { +// y[iy] += alpha * x[ix] +// ix += incX +// iy += incY +// } +func AxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) + +// AxpyIncTo is +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha*x[ix] + y[iy] +// ix += incX +// iy += incY +// idst += incDst +// } +func AxpyIncTo(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) + +// CumSum is +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] + v +// } +// return dst +func CumSum(dst, s []float64) []float64 + +// CumProd is +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] * v +// } +// return dst +func CumProd(dst, s []float64) []float64 + +// Div is +// for i, v := range s { +// dst[i] /= v +// } +func Div(dst, s []float64) + +// DivTo is +// for i, v := range s { +// dst[i] = v / t[i] +// } +// return dst +func DivTo(dst, x, y []float64) []float64 + +// DotUnitary is +// for i, v := range x { +// sum += y[i] * v +// } +// return sum +func DotUnitary(x, y []float64) (sum float64) + +// DotInc is +// for i := 0; i < int(n); i++ { +// sum += y[iy] * x[ix] +// ix += incX +// iy += incY +// } +// return sum +func DotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) + +// L1Dist is +// var norm float64 +// for i, v := range s { +// norm += math.Abs(t[i] - v) +// } +// return norm +func L1Dist(s, t []float64) float64 + +// LinfDist is +// var norm float64 +// if len(s) == 0 { +// return 0 +// } +// norm = math.Abs(t[0] - s[0]) +// for i, v := range s[1:] { +// absDiff := math.Abs(t[i+1] - v) +// if absDiff > norm || math.IsNaN(norm) { +// norm = absDiff +// } +// } +// return norm +func LinfDist(s, t []float64) float64 + +// ScalUnitary is +// for i := range x { +// x[i] *= alpha +// } +func ScalUnitary(alpha float64, x []float64) + +// ScalUnitaryTo is +// for i, v := range x { +// dst[i] = alpha * v +// } +func ScalUnitaryTo(dst []float64, alpha float64, x []float64) + +// ScalInc is +// var ix uintptr +// for i := 0; i < int(n); i++ { +// x[ix] *= alpha +// ix += incX +// } +func ScalInc(alpha float64, x []float64, n, incX uintptr) + +// ScalIncTo is +// var idst, ix uintptr +// for i := 0; i < int(n); i++ { +// dst[idst] = alpha * x[ix] +// ix += incX +// idst += incDst +// } +func ScalIncTo(dst []float64, incDst uintptr, alpha float64, x []float64, n, incX uintptr) + +// Sum is +// var sum float64 +// for i := range x { +// sum += x[i] +// } +func Sum(x []float64) float64 diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_noasm.go b/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_noasm.go new file mode 100644 index 0000000000..670978aa47 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/stubs_noasm.go @@ -0,0 +1,170 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package f64 + +import "math" + +// L1Norm is +// for _, v := range x { +// sum += math.Abs(v) +// } +// return sum +func L1Norm(x []float64) (sum float64) { + for _, v := range x { + sum += math.Abs(v) + } + return sum +} + +// L1NormInc is +// for i := 0; i < n*incX; i += incX { +// sum += math.Abs(x[i]) +// } +// return sum +func L1NormInc(x []float64, n, incX int) (sum float64) { + for i := 0; i < n*incX; i += incX { + sum += math.Abs(x[i]) + } + return sum +} + +// Add is +// for i, v := range s { +// dst[i] += v +// } +func Add(dst, s []float64) { + for i, v := range s { + dst[i] += v + } +} + +// AddConst is +// for i := range x { +// x[i] += alpha +// } +func AddConst(alpha float64, x []float64) { + for i := range x { + x[i] += alpha + } +} + +// CumSum is +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] + v +// } +// return dst +func CumSum(dst, s []float64) []float64 { + if len(s) == 0 { + return dst + } + dst[0] = s[0] + for i, v := range s[1:] { + dst[i+1] = dst[i] + v + } + return dst +} + +// CumProd is +// if len(s) == 0 { +// return dst +// } +// dst[0] = s[0] +// for i, v := range s[1:] { +// dst[i+1] = dst[i] * v +// } +// return dst +func CumProd(dst, s []float64) []float64 { + if len(s) == 0 { + return dst + } + dst[0] = s[0] + for i, v := range s[1:] { + dst[i+1] = dst[i] * v + } + return dst +} + +// Div is +// for i, v := range s { +// dst[i] /= v +// } +func Div(dst, s []float64) { + for i, v := range s { + dst[i] /= v + } +} + +// DivTo is +// for i, v := range s { +// dst[i] = v / t[i] +// } +// return dst +func DivTo(dst, s, t []float64) []float64 { + for i, v := range s { + dst[i] = v / t[i] + } + return dst +} + +// L1Dist is +// var norm float64 +// for i, v := range s { +// norm += math.Abs(t[i] - v) +// } +// return norm +func L1Dist(s, t []float64) float64 { + var norm float64 + for i, v := range s { + norm += math.Abs(t[i] - v) + } + return norm +} + +// LinfDist is +// var norm float64 +// if len(s) == 0 { +// return 0 +// } +// norm = math.Abs(t[0] - s[0]) +// for i, v := range s[1:] { +// absDiff := math.Abs(t[i+1] - v) +// if absDiff > norm || math.IsNaN(norm) { +// norm = absDiff +// } +// } +// return norm +func LinfDist(s, t []float64) float64 { + var norm float64 + if len(s) == 0 { + return 0 + } + norm = math.Abs(t[0] - s[0]) + for i, v := range s[1:] { + absDiff := math.Abs(t[i+1] - v) + if absDiff > norm || math.IsNaN(norm) { + norm = absDiff + } + } + return norm +} + +// Sum is +// var sum float64 +// for i := range x { +// sum += x[i] +// } +func Sum(x []float64) float64 { + var sum float64 + for _, v := range x { + sum += v + } + return sum +} diff --git a/vendor/gonum.org/v1/gonum/internal/asm/f64/sum_amd64.s b/vendor/gonum.org/v1/gonum/internal/asm/f64/sum_amd64.s new file mode 100644 index 0000000000..22eede6e11 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/asm/f64/sum_amd64.s @@ -0,0 +1,100 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +#include "textflag.h" + +#define X_PTR SI +#define IDX AX +#define LEN CX +#define TAIL BX +#define SUM X0 +#define SUM_1 X1 +#define SUM_2 X2 +#define SUM_3 X3 + +// func Sum(x []float64) float64 +TEXT ·Sum(SB), NOSPLIT, $0 + MOVQ x_base+0(FP), X_PTR // X_PTR = &x + MOVQ x_len+8(FP), LEN // LEN = len(x) + XORQ IDX, IDX // i = 0 + PXOR SUM, SUM // p_sum_i = 0 + CMPQ LEN, $0 // if LEN == 0 { return 0 } + JE sum_end + + PXOR SUM_1, SUM_1 + PXOR SUM_2, SUM_2 + PXOR SUM_3, SUM_3 + + MOVQ X_PTR, TAIL // Check memory alignment + ANDQ $15, TAIL // TAIL = &y % 16 + JZ no_trim // if TAIL == 0 { goto no_trim } + + // Align on 16-byte boundary + ADDSD (X_PTR), X0 // X0 += x[0] + INCQ IDX // i++ + DECQ LEN // LEN-- + DECQ TAIL // TAIL-- + JZ sum_end // if TAIL == 0 { return } + +no_trim: + MOVQ LEN, TAIL + SHRQ $4, LEN // LEN = floor( n / 16 ) + JZ sum_tail8 // if LEN == 0 { goto sum_tail8 } + +sum_loop: // sum 16x wide do { + ADDPD (SI)(AX*8), SUM // sum_i += x[i:i+2] + ADDPD 16(SI)(AX*8), SUM_1 + ADDPD 32(SI)(AX*8), SUM_2 + ADDPD 48(SI)(AX*8), SUM_3 + ADDPD 64(SI)(AX*8), SUM + ADDPD 80(SI)(AX*8), SUM_1 + ADDPD 96(SI)(AX*8), SUM_2 + ADDPD 112(SI)(AX*8), SUM_3 + ADDQ $16, IDX // i += 16 + DECQ LEN + JNZ sum_loop // } while --CX > 0 + +sum_tail8: + TESTQ $8, TAIL + JZ sum_tail4 + + ADDPD (SI)(AX*8), SUM // sum_i += x[i:i+2] + ADDPD 16(SI)(AX*8), SUM_1 + ADDPD 32(SI)(AX*8), SUM_2 + ADDPD 48(SI)(AX*8), SUM_3 + ADDQ $8, IDX + +sum_tail4: + ADDPD SUM_3, SUM + ADDPD SUM_2, SUM_1 + + TESTQ $4, TAIL + JZ sum_tail2 + + ADDPD (SI)(AX*8), SUM // sum_i += x[i:i+2] + ADDPD 16(SI)(AX*8), SUM_1 + ADDQ $4, IDX + +sum_tail2: + ADDPD SUM_1, SUM + + TESTQ $2, TAIL + JZ sum_tail1 + + ADDPD (SI)(AX*8), SUM // sum_i += x[i:i+2] + ADDQ $2, IDX + +sum_tail1: + HADDPD SUM, SUM // sum_i[0] += sum_i[1] + + TESTQ $1, TAIL + JZ sum_end + + ADDSD (SI)(IDX*8), SUM + +sum_end: // return sum + MOVSD SUM, sum+24(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/internal/cmplx64/abs.go b/vendor/gonum.org/v1/gonum/internal/cmplx64/abs.go new file mode 100644 index 0000000000..ac6eb81c0e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/cmplx64/abs.go @@ -0,0 +1,14 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmplx64 + +import math "gonum.org/v1/gonum/internal/math32" + +// Abs returns the absolute value (also called the modulus) of x. +func Abs(x complex64) float32 { return math.Hypot(real(x), imag(x)) } diff --git a/vendor/gonum.org/v1/gonum/internal/cmplx64/conj.go b/vendor/gonum.org/v1/gonum/internal/cmplx64/conj.go new file mode 100644 index 0000000000..705262f2f9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/cmplx64/conj.go @@ -0,0 +1,12 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmplx64 + +// Conj returns the complex conjugate of x. +func Conj(x complex64) complex64 { return complex(real(x), -imag(x)) } diff --git a/vendor/gonum.org/v1/gonum/internal/cmplx64/doc.go b/vendor/gonum.org/v1/gonum/internal/cmplx64/doc.go new file mode 100644 index 0000000000..5424ea099c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/cmplx64/doc.go @@ -0,0 +1,7 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cmplx64 provides complex64 versions of standard library math/cmplx +// package routines used by gonum/blas. +package cmplx64 // import "gonum.org/v1/gonum/internal/cmplx64" diff --git a/vendor/gonum.org/v1/gonum/internal/cmplx64/isinf.go b/vendor/gonum.org/v1/gonum/internal/cmplx64/isinf.go new file mode 100644 index 0000000000..21d3d180e1 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/cmplx64/isinf.go @@ -0,0 +1,25 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmplx64 + +import math "gonum.org/v1/gonum/internal/math32" + +// IsInf returns true if either real(x) or imag(x) is an infinity. +func IsInf(x complex64) bool { + if math.IsInf(real(x), 0) || math.IsInf(imag(x), 0) { + return true + } + return false +} + +// Inf returns a complex infinity, complex(+Inf, +Inf). +func Inf() complex64 { + inf := math.Inf(1) + return complex(inf, inf) +} diff --git a/vendor/gonum.org/v1/gonum/internal/cmplx64/isnan.go b/vendor/gonum.org/v1/gonum/internal/cmplx64/isnan.go new file mode 100644 index 0000000000..7e0bf788f1 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/cmplx64/isnan.go @@ -0,0 +1,29 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmplx64 + +import math "gonum.org/v1/gonum/internal/math32" + +// IsNaN returns true if either real(x) or imag(x) is NaN +// and neither is an infinity. +func IsNaN(x complex64) bool { + switch { + case math.IsInf(real(x), 0) || math.IsInf(imag(x), 0): + return false + case math.IsNaN(real(x)) || math.IsNaN(imag(x)): + return true + } + return false +} + +// NaN returns a complex ``not-a-number'' value. +func NaN() complex64 { + nan := math.NaN() + return complex(nan, nan) +} diff --git a/vendor/gonum.org/v1/gonum/internal/cmplx64/sqrt.go b/vendor/gonum.org/v1/gonum/internal/cmplx64/sqrt.go new file mode 100644 index 0000000000..439987b4ba --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/cmplx64/sqrt.go @@ -0,0 +1,108 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmplx64 + +import math "gonum.org/v1/gonum/internal/math32" + +// The original C code, the long comment, and the constants +// below are from http://netlib.sandia.gov/cephes/c9x-complex/clog.c. +// The go code is a simplified version of the original C. +// +// Cephes Math Library Release 2.8: June, 2000 +// Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier +// +// The readme file at http://netlib.sandia.gov/cephes/ says: +// Some software in this archive may be from the book _Methods and +// Programs for Mathematical Functions_ (Prentice-Hall or Simon & Schuster +// International, 1989) or from the Cephes Mathematical Library, a +// commercial product. In either event, it is copyrighted by the author. +// What you see here may be used freely but it comes with no support or +// guarantee. +// +// The two known misprints in the book are repaired here in the +// source listings for the gamma function and the incomplete beta +// integral. +// +// Stephen L. Moshier +// moshier@na-net.ornl.gov + +// Complex square root +// +// DESCRIPTION: +// +// If z = x + iy, r = |z|, then +// +// 1/2 +// Re w = [ (r + x)/2 ] , +// +// 1/2 +// Im w = [ (r - x)/2 ] . +// +// Cancelation error in r-x or r+x is avoided by using the +// identity 2 Re w Im w = y. +// +// Note that -w is also a square root of z. The root chosen +// is always in the right half plane and Im w has the same sign as y. +// +// ACCURACY: +// +// Relative error: +// arithmetic domain # trials peak rms +// DEC -10,+10 25000 3.2e-17 9.6e-18 +// IEEE -10,+10 1,000,000 2.9e-16 6.1e-17 + +// Sqrt returns the square root of x. +// The result r is chosen so that real(r) ≥ 0 and imag(r) has the same sign as imag(x). +func Sqrt(x complex64) complex64 { + if imag(x) == 0 { + if real(x) == 0 { + return complex(0, 0) + } + if real(x) < 0 { + return complex(0, math.Sqrt(-real(x))) + } + return complex(math.Sqrt(real(x)), 0) + } + if real(x) == 0 { + if imag(x) < 0 { + r := math.Sqrt(-0.5 * imag(x)) + return complex(r, -r) + } + r := math.Sqrt(0.5 * imag(x)) + return complex(r, r) + } + a := real(x) + b := imag(x) + var scale float32 + // Rescale to avoid internal overflow or underflow. + if math.Abs(a) > 4 || math.Abs(b) > 4 { + a *= 0.25 + b *= 0.25 + scale = 2 + } else { + a *= 1.8014398509481984e16 // 2**54 + b *= 1.8014398509481984e16 + scale = 7.450580596923828125e-9 // 2**-27 + } + r := math.Hypot(a, b) + var t float32 + if a > 0 { + t = math.Sqrt(0.5*r + 0.5*a) + r = scale * math.Abs((0.5*b)/t) + t *= scale + } else { + r = math.Sqrt(0.5*r - 0.5*a) + t = scale * math.Abs((0.5*b)/r) + r *= scale + } + if b < 0 { + return complex(t, -r) + } + return complex(t, r) +} diff --git a/vendor/gonum.org/v1/gonum/internal/math32/doc.go b/vendor/gonum.org/v1/gonum/internal/math32/doc.go new file mode 100644 index 0000000000..68917c64e6 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/math32/doc.go @@ -0,0 +1,7 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package math32 provides float32 versions of standard library math package +// routines used by gonum/blas/native. +package math32 // import "gonum.org/v1/gonum/internal/math32" diff --git a/vendor/gonum.org/v1/gonum/internal/math32/math.go b/vendor/gonum.org/v1/gonum/internal/math32/math.go new file mode 100644 index 0000000000..56c90be027 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/math32/math.go @@ -0,0 +1,111 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package math32 + +import ( + "math" +) + +const ( + unan = 0x7fc00000 + uinf = 0x7f800000 + uneginf = 0xff800000 + mask = 0x7f8 >> 3 + shift = 32 - 8 - 1 + bias = 127 +) + +// Abs returns the absolute value of x. +// +// Special cases are: +// Abs(±Inf) = +Inf +// Abs(NaN) = NaN +func Abs(x float32) float32 { + switch { + case x < 0: + return -x + case x == 0: + return 0 // return correctly abs(-0) + } + return x +} + +// Copysign returns a value with the magnitude +// of x and the sign of y. +func Copysign(x, y float32) float32 { + const sign = 1 << 31 + return math.Float32frombits(math.Float32bits(x)&^sign | math.Float32bits(y)&sign) +} + +// Hypot returns Sqrt(p*p + q*q), taking care to avoid +// unnecessary overflow and underflow. +// +// Special cases are: +// Hypot(±Inf, q) = +Inf +// Hypot(p, ±Inf) = +Inf +// Hypot(NaN, q) = NaN +// Hypot(p, NaN) = NaN +func Hypot(p, q float32) float32 { + // special cases + switch { + case IsInf(p, 0) || IsInf(q, 0): + return Inf(1) + case IsNaN(p) || IsNaN(q): + return NaN() + } + if p < 0 { + p = -p + } + if q < 0 { + q = -q + } + if p < q { + p, q = q, p + } + if p == 0 { + return 0 + } + q = q / p + return p * Sqrt(1+q*q) +} + +// Inf returns positive infinity if sign >= 0, negative infinity if sign < 0. +func Inf(sign int) float32 { + var v uint32 + if sign >= 0 { + v = uinf + } else { + v = uneginf + } + return math.Float32frombits(v) +} + +// IsInf reports whether f is an infinity, according to sign. +// If sign > 0, IsInf reports whether f is positive infinity. +// If sign < 0, IsInf reports whether f is negative infinity. +// If sign == 0, IsInf reports whether f is either infinity. +func IsInf(f float32, sign int) bool { + // Test for infinity by comparing against maximum float. + // To avoid the floating-point hardware, could use: + // x := math.Float32bits(f); + // return sign >= 0 && x == uinf || sign <= 0 && x == uneginf; + return sign >= 0 && f > math.MaxFloat32 || sign <= 0 && f < -math.MaxFloat32 +} + +// IsNaN reports whether f is an IEEE 754 ``not-a-number'' value. +func IsNaN(f float32) (is bool) { + // IEEE 754 says that only NaNs satisfy f != f. + // To avoid the floating-point hardware, could use: + // x := math.Float32bits(f); + // return uint32(x>>shift)&mask == mask && x != uinf && x != uneginf + return f != f +} + +// NaN returns an IEEE 754 ``not-a-number'' value. +func NaN() float32 { return math.Float32frombits(unan) } diff --git a/vendor/gonum.org/v1/gonum/internal/math32/signbit.go b/vendor/gonum.org/v1/gonum/internal/math32/signbit.go new file mode 100644 index 0000000000..3e9f0bb41d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/math32/signbit.go @@ -0,0 +1,16 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package math32 + +import "math" + +// Signbit returns true if x is negative or negative zero. +func Signbit(x float32) bool { + return math.Float32bits(x)&(1<<31) != 0 +} diff --git a/vendor/gonum.org/v1/gonum/internal/math32/sqrt.go b/vendor/gonum.org/v1/gonum/internal/math32/sqrt.go new file mode 100644 index 0000000000..bf630de99c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/math32/sqrt.go @@ -0,0 +1,25 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 noasm appengine safe + +package math32 + +import ( + "math" +) + +// Sqrt returns the square root of x. +// +// Special cases are: +// Sqrt(+Inf) = +Inf +// Sqrt(±0) = ±0 +// Sqrt(x < 0) = NaN +// Sqrt(NaN) = NaN +func Sqrt(x float32) float32 { + // FIXME(kortschak): Direct translation of the math package + // asm code for 386 fails to build. No test hardware is available + // for arm, so using conversion instead. + return float32(math.Sqrt(float64(x))) +} diff --git a/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.go b/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.go new file mode 100644 index 0000000000..905ae5c686 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.go @@ -0,0 +1,20 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !noasm,!appengine,!safe + +package math32 + +// Sqrt returns the square root of x. +// +// Special cases are: +// Sqrt(+Inf) = +Inf +// Sqrt(±0) = ±0 +// Sqrt(x < 0) = NaN +// Sqrt(NaN) = NaN +func Sqrt(x float32) float32 diff --git a/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.s b/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.s new file mode 100644 index 0000000000..fa2b8696ea --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/math32/sqrt_amd64.s @@ -0,0 +1,20 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !noasm,!appengine,!safe + +// TODO(kortschak): use textflag.h after we drop Go 1.3 support +//#include "textflag.h" +// Don't insert stack check preamble. +#define NOSPLIT 4 + +// func Sqrt(x float32) float32 +TEXT ·Sqrt(SB),NOSPLIT,$0 + SQRTSS x+0(FP), X0 + MOVSS X0, ret+8(FP) + RET diff --git a/vendor/gonum.org/v1/gonum/lapack/.gitignore b/vendor/gonum.org/v1/gonum/lapack/.gitignore new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/gonum.org/v1/gonum/lapack/README.md b/vendor/gonum.org/v1/gonum/lapack/README.md new file mode 100644 index 0000000000..c355017c8b --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/README.md @@ -0,0 +1,28 @@ +Gonum LAPACK [![GoDoc](https://godoc.org/gonum.org/v1/gonum/lapack?status.svg)](https://godoc.org/gonum.org/v1/gonum/lapack) +====== + +A collection of packages to provide LAPACK functionality for the Go programming +language (http://golang.org). This provides a partial implementation in native go +and a wrapper using cgo to a c-based implementation. + +## Installation + +``` + go get gonum.org/v1/gonum/lapack/... +``` + +## Packages + +### lapack + +Defines the LAPACK API based on http://www.netlib.org/lapack/lapacke.html + +### lapack/gonum + +Go implementation of the LAPACK API (incomplete, implements the `float64` API). + +### lapack/lapack64 + +Wrappers for an implementation of the double (i.e., `float64`) precision real parts of +the LAPACK API. + diff --git a/vendor/gonum.org/v1/gonum/lapack/doc.go b/vendor/gonum.org/v1/gonum/lapack/doc.go new file mode 100644 index 0000000000..2475cb4aa0 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lapack provides interfaces for the LAPACK linear algebra standard. +package lapack // import "gonum.org/v1/gonum/lapack" diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dbdsqr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dbdsqr.go new file mode 100644 index 0000000000..5f3833fd97 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dbdsqr.go @@ -0,0 +1,505 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dbdsqr performs a singular value decomposition of a real n×n bidiagonal matrix. +// +// The SVD of the bidiagonal matrix B is +// B = Q * S * P^T +// where S is a diagonal matrix of singular values, Q is an orthogonal matrix of +// left singular vectors, and P is an orthogonal matrix of right singular vectors. +// +// Q and P are only computed if requested. If left singular vectors are requested, +// this routine returns U * Q instead of Q, and if right singular vectors are +// requested P^T * VT is returned instead of P^T. +// +// Frequently Dbdsqr is used in conjunction with Dgebrd which reduces a general +// matrix A into bidiagonal form. In this case, the SVD of A is +// A = (U * Q) * S * (P^T * VT) +// +// This routine may also compute Q^T * C. +// +// d and e contain the elements of the bidiagonal matrix b. d must have length at +// least n, and e must have length at least n-1. Dbdsqr will panic if there is +// insufficient length. On exit, D contains the singular values of B in decreasing +// order. +// +// VT is a matrix of size n×ncvt whose elements are stored in vt. The elements +// of vt are modified to contain P^T * VT on exit. VT is not used if ncvt == 0. +// +// U is a matrix of size nru×n whose elements are stored in u. The elements +// of u are modified to contain U * Q on exit. U is not used if nru == 0. +// +// C is a matrix of size n×ncc whose elements are stored in c. The elements +// of c are modified to contain Q^T * C on exit. C is not used if ncc == 0. +// +// work contains temporary storage and must have length at least 4*(n-1). Dbdsqr +// will panic if there is insufficient working memory. +// +// Dbdsqr returns whether the decomposition was successful. +// +// Dbdsqr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dbdsqr(uplo blas.Uplo, n, ncvt, nru, ncc int, d, e, vt []float64, ldvt int, u []float64, ldu int, c []float64, ldc int, work []float64) (ok bool) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case ncvt < 0: + panic(ncvtLT0) + case nru < 0: + panic(nruLT0) + case ncc < 0: + panic(nccLT0) + case ldvt < max(1, ncvt): + panic(badLdVT) + case (ldu < max(1, n) && nru > 0) || (ldu < 1 && nru == 0): + panic(badLdU) + case ldc < max(1, ncc): + panic(badLdC) + } + + // Quick return if possible. + if n == 0 { + return true + } + + if len(vt) < (n-1)*ldvt+ncvt && ncvt != 0 { + panic(shortVT) + } + if len(u) < (nru-1)*ldu+n && nru != 0 { + panic(shortU) + } + if len(c) < (n-1)*ldc+ncc && ncc != 0 { + panic(shortC) + } + if len(d) < n { + panic(shortD) + } + if len(e) < n-1 { + panic(shortE) + } + if len(work) < 4*(n-1) { + panic(shortWork) + } + + var info int + bi := blas64.Implementation() + const maxIter = 6 + + if n != 1 { + // If the singular vectors do not need to be computed, use qd algorithm. + if !(ncvt > 0 || nru > 0 || ncc > 0) { + info = impl.Dlasq1(n, d, e, work) + // If info is 2 dqds didn't finish, and so try to. + if info != 2 { + return info == 0 + } + } + nm1 := n - 1 + nm12 := nm1 + nm1 + nm13 := nm12 + nm1 + idir := 0 + + eps := dlamchE + unfl := dlamchS + lower := uplo == blas.Lower + var cs, sn, r float64 + if lower { + for i := 0; i < n-1; i++ { + cs, sn, r = impl.Dlartg(d[i], e[i]) + d[i] = r + e[i] = sn * d[i+1] + d[i+1] *= cs + work[i] = cs + work[nm1+i] = sn + } + if nru > 0 { + impl.Dlasr(blas.Right, lapack.Variable, lapack.Forward, nru, n, work, work[n-1:], u, ldu) + } + if ncc > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Forward, n, ncc, work, work[n-1:], c, ldc) + } + } + // Compute singular values to a relative accuracy of tol. If tol is negative + // the values will be computed to an absolute accuracy of math.Abs(tol) * norm(b) + tolmul := math.Max(10, math.Min(100, math.Pow(eps, -1.0/8))) + tol := tolmul * eps + var smax float64 + for i := 0; i < n; i++ { + smax = math.Max(smax, math.Abs(d[i])) + } + for i := 0; i < n-1; i++ { + smax = math.Max(smax, math.Abs(e[i])) + } + + var sminl float64 + var thresh float64 + if tol >= 0 { + sminoa := math.Abs(d[0]) + if sminoa != 0 { + mu := sminoa + for i := 1; i < n; i++ { + mu = math.Abs(d[i]) * (mu / (mu + math.Abs(e[i-1]))) + sminoa = math.Min(sminoa, mu) + if sminoa == 0 { + break + } + } + } + sminoa = sminoa / math.Sqrt(float64(n)) + thresh = math.Max(tol*sminoa, float64(maxIter*n*n)*unfl) + } else { + thresh = math.Max(math.Abs(tol)*smax, float64(maxIter*n*n)*unfl) + } + // Prepare for the main iteration loop for the singular values. + maxIt := maxIter * n * n + iter := 0 + oldl2 := -1 + oldm := -1 + // m points to the last element of unconverged part of matrix. + m := n + + Outer: + for m > 1 { + if iter > maxIt { + info = 0 + for i := 0; i < n-1; i++ { + if e[i] != 0 { + info++ + } + } + return info == 0 + } + // Find diagonal block of matrix to work on. + if tol < 0 && math.Abs(d[m-1]) <= thresh { + d[m-1] = 0 + } + smax = math.Abs(d[m-1]) + smin := smax + var l2 int + var broke bool + for l3 := 0; l3 < m-1; l3++ { + l2 = m - l3 - 2 + abss := math.Abs(d[l2]) + abse := math.Abs(e[l2]) + if tol < 0 && abss <= thresh { + d[l2] = 0 + } + if abse <= thresh { + broke = true + break + } + smin = math.Min(smin, abss) + smax = math.Max(math.Max(smax, abss), abse) + } + if broke { + e[l2] = 0 + if l2 == m-2 { + // Convergence of bottom singular value, return to top. + m-- + continue + } + l2++ + } else { + l2 = 0 + } + // e[ll] through e[m-2] are nonzero, e[ll-1] is zero + if l2 == m-2 { + // Handle 2×2 block separately. + var sinr, cosr, sinl, cosl float64 + d[m-1], d[m-2], sinr, cosr, sinl, cosl = impl.Dlasv2(d[m-2], e[m-2], d[m-1]) + e[m-2] = 0 + if ncvt > 0 { + bi.Drot(ncvt, vt[(m-2)*ldvt:], 1, vt[(m-1)*ldvt:], 1, cosr, sinr) + } + if nru > 0 { + bi.Drot(nru, u[m-2:], ldu, u[m-1:], ldu, cosl, sinl) + } + if ncc > 0 { + bi.Drot(ncc, c[(m-2)*ldc:], 1, c[(m-1)*ldc:], 1, cosl, sinl) + } + m -= 2 + continue + } + // If working on a new submatrix, choose shift direction from larger end + // diagonal element toward smaller. + if l2 > oldm-1 || m-1 < oldl2 { + if math.Abs(d[l2]) >= math.Abs(d[m-1]) { + idir = 1 + } else { + idir = 2 + } + } + // Apply convergence tests. + // TODO(btracey): There is a lot of similar looking code here. See + // if there is a better way to de-duplicate. + if idir == 1 { + // Run convergence test in forward direction. + // First apply standard test to bottom of matrix. + if math.Abs(e[m-2]) <= math.Abs(tol)*math.Abs(d[m-1]) || (tol < 0 && math.Abs(e[m-2]) <= thresh) { + e[m-2] = 0 + continue + } + if tol >= 0 { + // If relative accuracy desired, apply convergence criterion forward. + mu := math.Abs(d[l2]) + sminl = mu + for l3 := l2; l3 < m-1; l3++ { + if math.Abs(e[l3]) <= tol*mu { + e[l3] = 0 + continue Outer + } + mu = math.Abs(d[l3+1]) * (mu / (mu + math.Abs(e[l3]))) + sminl = math.Min(sminl, mu) + } + } + } else { + // Run convergence test in backward direction. + // First apply standard test to top of matrix. + if math.Abs(e[l2]) <= math.Abs(tol)*math.Abs(d[l2]) || (tol < 0 && math.Abs(e[l2]) <= thresh) { + e[l2] = 0 + continue + } + if tol >= 0 { + // If relative accuracy desired, apply convergence criterion backward. + mu := math.Abs(d[m-1]) + sminl = mu + for l3 := m - 2; l3 >= l2; l3-- { + if math.Abs(e[l3]) <= tol*mu { + e[l3] = 0 + continue Outer + } + mu = math.Abs(d[l3]) * (mu / (mu + math.Abs(e[l3]))) + sminl = math.Min(sminl, mu) + } + } + } + oldl2 = l2 + oldm = m + // Compute shift. First, test if shifting would ruin relative accuracy, + // and if so set the shift to zero. + var shift float64 + if tol >= 0 && float64(n)*tol*(sminl/smax) <= math.Max(eps, (1.0/100)*tol) { + shift = 0 + } else { + var sl2 float64 + if idir == 1 { + sl2 = math.Abs(d[l2]) + shift, _ = impl.Dlas2(d[m-2], e[m-2], d[m-1]) + } else { + sl2 = math.Abs(d[m-1]) + shift, _ = impl.Dlas2(d[l2], e[l2], d[l2+1]) + } + // Test if shift is negligible + if sl2 > 0 { + if (shift/sl2)*(shift/sl2) < eps { + shift = 0 + } + } + } + iter += m - l2 + 1 + // If no shift, do simplified QR iteration. + if shift == 0 { + if idir == 1 { + cs := 1.0 + oldcs := 1.0 + var sn, r, oldsn float64 + for i := l2; i < m-1; i++ { + cs, sn, r = impl.Dlartg(d[i]*cs, e[i]) + if i > l2 { + e[i-1] = oldsn * r + } + oldcs, oldsn, d[i] = impl.Dlartg(oldcs*r, d[i+1]*sn) + work[i-l2] = cs + work[i-l2+nm1] = sn + work[i-l2+nm12] = oldcs + work[i-l2+nm13] = oldsn + } + h := d[m-1] * cs + d[m-1] = h * oldcs + e[m-2] = h * oldsn + if ncvt > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Forward, m-l2, ncvt, work, work[n-1:], vt[l2*ldvt:], ldvt) + } + if nru > 0 { + impl.Dlasr(blas.Right, lapack.Variable, lapack.Forward, nru, m-l2, work[nm12:], work[nm13:], u[l2:], ldu) + } + if ncc > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Forward, m-l2, ncc, work[nm12:], work[nm13:], c[l2*ldc:], ldc) + } + if math.Abs(e[m-2]) < thresh { + e[m-2] = 0 + } + } else { + cs := 1.0 + oldcs := 1.0 + var sn, r, oldsn float64 + for i := m - 1; i >= l2+1; i-- { + cs, sn, r = impl.Dlartg(d[i]*cs, e[i-1]) + if i < m-1 { + e[i] = oldsn * r + } + oldcs, oldsn, d[i] = impl.Dlartg(oldcs*r, d[i-1]*sn) + work[i-l2-1] = cs + work[i-l2+nm1-1] = -sn + work[i-l2+nm12-1] = oldcs + work[i-l2+nm13-1] = -oldsn + } + h := d[l2] * cs + d[l2] = h * oldcs + e[l2] = h * oldsn + if ncvt > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Backward, m-l2, ncvt, work[nm12:], work[nm13:], vt[l2*ldvt:], ldvt) + } + if nru > 0 { + impl.Dlasr(blas.Right, lapack.Variable, lapack.Backward, nru, m-l2, work, work[n-1:], u[l2:], ldu) + } + if ncc > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Backward, m-l2, ncc, work, work[n-1:], c[l2*ldc:], ldc) + } + if math.Abs(e[l2]) <= thresh { + e[l2] = 0 + } + } + } else { + // Use nonzero shift. + if idir == 1 { + // Chase bulge from top to bottom. Save cosines and sines for + // later singular vector updates. + f := (math.Abs(d[l2]) - shift) * (math.Copysign(1, d[l2]) + shift/d[l2]) + g := e[l2] + var cosl, sinl float64 + for i := l2; i < m-1; i++ { + cosr, sinr, r := impl.Dlartg(f, g) + if i > l2 { + e[i-1] = r + } + f = cosr*d[i] + sinr*e[i] + e[i] = cosr*e[i] - sinr*d[i] + g = sinr * d[i+1] + d[i+1] *= cosr + cosl, sinl, r = impl.Dlartg(f, g) + d[i] = r + f = cosl*e[i] + sinl*d[i+1] + d[i+1] = cosl*d[i+1] - sinl*e[i] + if i < m-2 { + g = sinl * e[i+1] + e[i+1] = cosl * e[i+1] + } + work[i-l2] = cosr + work[i-l2+nm1] = sinr + work[i-l2+nm12] = cosl + work[i-l2+nm13] = sinl + } + e[m-2] = f + if ncvt > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Forward, m-l2, ncvt, work, work[n-1:], vt[l2*ldvt:], ldvt) + } + if nru > 0 { + impl.Dlasr(blas.Right, lapack.Variable, lapack.Forward, nru, m-l2, work[nm12:], work[nm13:], u[l2:], ldu) + } + if ncc > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Forward, m-l2, ncc, work[nm12:], work[nm13:], c[l2*ldc:], ldc) + } + if math.Abs(e[m-2]) <= thresh { + e[m-2] = 0 + } + } else { + // Chase bulge from top to bottom. Save cosines and sines for + // later singular vector updates. + f := (math.Abs(d[m-1]) - shift) * (math.Copysign(1, d[m-1]) + shift/d[m-1]) + g := e[m-2] + for i := m - 1; i > l2; i-- { + cosr, sinr, r := impl.Dlartg(f, g) + if i < m-1 { + e[i] = r + } + f = cosr*d[i] + sinr*e[i-1] + e[i-1] = cosr*e[i-1] - sinr*d[i] + g = sinr * d[i-1] + d[i-1] *= cosr + cosl, sinl, r := impl.Dlartg(f, g) + d[i] = r + f = cosl*e[i-1] + sinl*d[i-1] + d[i-1] = cosl*d[i-1] - sinl*e[i-1] + if i > l2+1 { + g = sinl * e[i-2] + e[i-2] *= cosl + } + work[i-l2-1] = cosr + work[i-l2+nm1-1] = -sinr + work[i-l2+nm12-1] = cosl + work[i-l2+nm13-1] = -sinl + } + e[l2] = f + if math.Abs(e[l2]) <= thresh { + e[l2] = 0 + } + if ncvt > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Backward, m-l2, ncvt, work[nm12:], work[nm13:], vt[l2*ldvt:], ldvt) + } + if nru > 0 { + impl.Dlasr(blas.Right, lapack.Variable, lapack.Backward, nru, m-l2, work, work[n-1:], u[l2:], ldu) + } + if ncc > 0 { + impl.Dlasr(blas.Left, lapack.Variable, lapack.Backward, m-l2, ncc, work, work[n-1:], c[l2*ldc:], ldc) + } + } + } + } + } + + // All singular values converged, make them positive. + for i := 0; i < n; i++ { + if d[i] < 0 { + d[i] *= -1 + if ncvt > 0 { + bi.Dscal(ncvt, -1, vt[i*ldvt:], 1) + } + } + } + + // Sort the singular values in decreasing order. + for i := 0; i < n-1; i++ { + isub := 0 + smin := d[0] + for j := 1; j < n-i; j++ { + if d[j] <= smin { + isub = j + smin = d[j] + } + } + if isub != n-i { + // Swap singular values and vectors. + d[isub] = d[n-i-1] + d[n-i-1] = smin + if ncvt > 0 { + bi.Dswap(ncvt, vt[isub*ldvt:], 1, vt[(n-i-1)*ldvt:], 1) + } + if nru > 0 { + bi.Dswap(nru, u[isub:], ldu, u[n-i-1:], ldu) + } + if ncc > 0 { + bi.Dswap(ncc, c[isub*ldc:], 1, c[(n-i-1)*ldc:], 1) + } + } + } + info = 0 + for i := 0; i < n-1; i++ { + if e[i] != 0 { + info++ + } + } + return info == 0 +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgebak.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgebak.go new file mode 100644 index 0000000000..7caa0b1739 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgebak.go @@ -0,0 +1,89 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dgebak updates an n×m matrix V as +// V = P D V, if side == lapack.EVRight, +// V = P D^{-1} V, if side == lapack.EVLeft, +// where P and D are n×n permutation and scaling matrices, respectively, +// implicitly represented by job, scale, ilo and ihi as returned by Dgebal. +// +// Typically, columns of the matrix V contain the right or left (determined by +// side) eigenvectors of the balanced matrix output by Dgebal, and Dgebak forms +// the eigenvectors of the original matrix. +// +// Dgebak is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgebak(job lapack.BalanceJob, side lapack.EVSide, n, ilo, ihi int, scale []float64, m int, v []float64, ldv int) { + switch { + case job != lapack.BalanceNone && job != lapack.Permute && job != lapack.Scale && job != lapack.PermuteScale: + panic(badBalanceJob) + case side != lapack.EVLeft && side != lapack.EVRight: + panic(badEVSide) + case n < 0: + panic(nLT0) + case ilo < 0 || max(0, n-1) < ilo: + panic(badIlo) + case ihi < min(ilo, n-1) || n <= ihi: + panic(badIhi) + case m < 0: + panic(mLT0) + case ldv < max(1, m): + panic(badLdV) + } + + // Quick return if possible. + if n == 0 || m == 0 { + return + } + + if len(scale) < n { + panic(shortScale) + } + if len(v) < (n-1)*ldv+m { + panic(shortV) + } + + // Quick return if possible. + if job == lapack.BalanceNone { + return + } + + bi := blas64.Implementation() + if ilo != ihi && job != lapack.Permute { + // Backward balance. + if side == lapack.EVRight { + for i := ilo; i <= ihi; i++ { + bi.Dscal(m, scale[i], v[i*ldv:], 1) + } + } else { + for i := ilo; i <= ihi; i++ { + bi.Dscal(m, 1/scale[i], v[i*ldv:], 1) + } + } + } + if job == lapack.Scale { + return + } + // Backward permutation. + for i := ilo - 1; i >= 0; i-- { + k := int(scale[i]) + if k == i { + continue + } + bi.Dswap(m, v[i*ldv:], 1, v[k*ldv:], 1) + } + for i := ihi + 1; i < n; i++ { + k := int(scale[i]) + if k == i { + continue + } + bi.Dswap(m, v[i*ldv:], 1, v[k*ldv:], 1) + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgebal.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgebal.go new file mode 100644 index 0000000000..6fb5170cd2 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgebal.go @@ -0,0 +1,239 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dgebal balances an n×n matrix A. Balancing consists of two stages, permuting +// and scaling. Both steps are optional and depend on the value of job. +// +// Permuting consists of applying a permutation matrix P such that the matrix +// that results from P^T*A*P takes the upper block triangular form +// [ T1 X Y ] +// P^T A P = [ 0 B Z ], +// [ 0 0 T2 ] +// where T1 and T2 are upper triangular matrices and B contains at least one +// nonzero off-diagonal element in each row and column. The indices ilo and ihi +// mark the starting and ending columns of the submatrix B. The eigenvalues of A +// isolated in the first 0 to ilo-1 and last ihi+1 to n-1 elements on the +// diagonal can be read off without any roundoff error. +// +// Scaling consists of applying a diagonal similarity transformation D such that +// D^{-1}*B*D has the 1-norm of each row and its corresponding column nearly +// equal. The output matrix is +// [ T1 X*D Y ] +// [ 0 inv(D)*B*D inv(D)*Z ]. +// [ 0 0 T2 ] +// Scaling may reduce the 1-norm of the matrix, and improve the accuracy of +// the computed eigenvalues and/or eigenvectors. +// +// job specifies the operations that will be performed on A. +// If job is lapack.BalanceNone, Dgebal sets scale[i] = 1 for all i and returns ilo=0, ihi=n-1. +// If job is lapack.Permute, only permuting will be done. +// If job is lapack.Scale, only scaling will be done. +// If job is lapack.PermuteScale, both permuting and scaling will be done. +// +// On return, if job is lapack.Permute or lapack.PermuteScale, it will hold that +// A[i,j] == 0, for i > j and j ∈ {0, ..., ilo-1, ihi+1, ..., n-1}. +// If job is lapack.BalanceNone or lapack.Scale, or if n == 0, it will hold that +// ilo == 0 and ihi == n-1. +// +// On return, scale will contain information about the permutations and scaling +// factors applied to A. If π(j) denotes the index of the column interchanged +// with column j, and D[j,j] denotes the scaling factor applied to column j, +// then +// scale[j] == π(j), for j ∈ {0, ..., ilo-1, ihi+1, ..., n-1}, +// == D[j,j], for j ∈ {ilo, ..., ihi}. +// scale must have length equal to n, otherwise Dgebal will panic. +// +// Dgebal is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgebal(job lapack.BalanceJob, n int, a []float64, lda int, scale []float64) (ilo, ihi int) { + switch { + case job != lapack.BalanceNone && job != lapack.Permute && job != lapack.Scale && job != lapack.PermuteScale: + panic(badBalanceJob) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + ilo = 0 + ihi = n - 1 + + if n == 0 { + return ilo, ihi + } + + if len(scale) != n { + panic(shortScale) + } + + if job == lapack.BalanceNone { + for i := range scale { + scale[i] = 1 + } + return ilo, ihi + } + + if len(a) < (n-1)*lda+n { + panic(shortA) + } + + bi := blas64.Implementation() + swapped := true + + if job == lapack.Scale { + goto scaling + } + + // Permutation to isolate eigenvalues if possible. + // + // Search for rows isolating an eigenvalue and push them down. + for swapped { + swapped = false + rows: + for i := ihi; i >= 0; i-- { + for j := 0; j <= ihi; j++ { + if i == j { + continue + } + if a[i*lda+j] != 0 { + continue rows + } + } + // Row i has only zero off-diagonal elements in the + // block A[ilo:ihi+1,ilo:ihi+1]. + scale[ihi] = float64(i) + if i != ihi { + bi.Dswap(ihi+1, a[i:], lda, a[ihi:], lda) + bi.Dswap(n, a[i*lda:], 1, a[ihi*lda:], 1) + } + if ihi == 0 { + scale[0] = 1 + return ilo, ihi + } + ihi-- + swapped = true + break + } + } + // Search for columns isolating an eigenvalue and push them left. + swapped = true + for swapped { + swapped = false + columns: + for j := ilo; j <= ihi; j++ { + for i := ilo; i <= ihi; i++ { + if i == j { + continue + } + if a[i*lda+j] != 0 { + continue columns + } + } + // Column j has only zero off-diagonal elements in the + // block A[ilo:ihi+1,ilo:ihi+1]. + scale[ilo] = float64(j) + if j != ilo { + bi.Dswap(ihi+1, a[j:], lda, a[ilo:], lda) + bi.Dswap(n-ilo, a[j*lda+ilo:], 1, a[ilo*lda+ilo:], 1) + } + swapped = true + ilo++ + break + } + } + +scaling: + for i := ilo; i <= ihi; i++ { + scale[i] = 1 + } + + if job == lapack.Permute { + return ilo, ihi + } + + // Balance the submatrix in rows ilo to ihi. + + const ( + // sclfac should be a power of 2 to avoid roundoff errors. + // Elements of scale are restricted to powers of sclfac, + // therefore the matrix will be only nearly balanced. + sclfac = 2 + // factor determines the minimum reduction of the row and column + // norms that is considered non-negligible. It must be less than 1. + factor = 0.95 + ) + sfmin1 := dlamchS / dlamchP + sfmax1 := 1 / sfmin1 + sfmin2 := sfmin1 * sclfac + sfmax2 := 1 / sfmin2 + + // Iterative loop for norm reduction. + var conv bool + for !conv { + conv = true + for i := ilo; i <= ihi; i++ { + c := bi.Dnrm2(ihi-ilo+1, a[ilo*lda+i:], lda) + r := bi.Dnrm2(ihi-ilo+1, a[i*lda+ilo:], 1) + ica := bi.Idamax(ihi+1, a[i:], lda) + ca := math.Abs(a[ica*lda+i]) + ira := bi.Idamax(n-ilo, a[i*lda+ilo:], 1) + ra := math.Abs(a[i*lda+ilo+ira]) + + // Guard against zero c or r due to underflow. + if c == 0 || r == 0 { + continue + } + g := r / sclfac + f := 1.0 + s := c + r + for c < g && math.Max(f, math.Max(c, ca)) < sfmax2 && math.Min(r, math.Min(g, ra)) > sfmin2 { + if math.IsNaN(c + f + ca + r + g + ra) { + // Panic if NaN to avoid infinite loop. + panic("lapack: NaN") + } + f *= sclfac + c *= sclfac + ca *= sclfac + g /= sclfac + r /= sclfac + ra /= sclfac + } + g = c / sclfac + for r <= g && math.Max(r, ra) < sfmax2 && math.Min(math.Min(f, c), math.Min(g, ca)) > sfmin2 { + f /= sclfac + c /= sclfac + ca /= sclfac + g /= sclfac + r *= sclfac + ra *= sclfac + } + + if c+r >= factor*s { + // Reduction would be negligible. + continue + } + if f < 1 && scale[i] < 1 && f*scale[i] <= sfmin1 { + continue + } + if f > 1 && scale[i] > 1 && scale[i] >= sfmax1/f { + continue + } + + // Now balance. + scale[i] *= f + bi.Dscal(n-ilo, 1/f, a[i*lda+ilo:], 1) + bi.Dscal(ihi+1, f, a[i:], lda) + conv = false + } + } + return ilo, ihi +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgebd2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgebd2.go new file mode 100644 index 0000000000..cf951a1202 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgebd2.go @@ -0,0 +1,86 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dgebd2 reduces an m×n matrix A to upper or lower bidiagonal form by an orthogonal +// transformation. +// Q^T * A * P = B +// if m >= n, B is upper diagonal, otherwise B is lower bidiagonal. +// d is the diagonal, len = min(m,n) +// e is the off-diagonal len = min(m,n)-1 +// +// Dgebd2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgebd2(m, n int, a []float64, lda int, d, e, tauQ, tauP, work []float64) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + minmn := min(m, n) + if minmn == 0 { + return + } + + switch { + case len(d) < minmn: + panic(shortD) + case len(e) < minmn-1: + panic(shortE) + case len(tauQ) < minmn: + panic(shortTauQ) + case len(tauP) < minmn: + panic(shortTauP) + case len(work) < max(m, n): + panic(shortWork) + } + + if m >= n { + for i := 0; i < n; i++ { + a[i*lda+i], tauQ[i] = impl.Dlarfg(m-i, a[i*lda+i], a[min(i+1, m-1)*lda+i:], lda) + d[i] = a[i*lda+i] + a[i*lda+i] = 1 + // Apply H_i to A[i:m, i+1:n] from the left. + if i < n-1 { + impl.Dlarf(blas.Left, m-i, n-i-1, a[i*lda+i:], lda, tauQ[i], a[i*lda+i+1:], lda, work) + } + a[i*lda+i] = d[i] + if i < n-1 { + a[i*lda+i+1], tauP[i] = impl.Dlarfg(n-i-1, a[i*lda+i+1], a[i*lda+min(i+2, n-1):], 1) + e[i] = a[i*lda+i+1] + a[i*lda+i+1] = 1 + impl.Dlarf(blas.Right, m-i-1, n-i-1, a[i*lda+i+1:], 1, tauP[i], a[(i+1)*lda+i+1:], lda, work) + a[i*lda+i+1] = e[i] + } else { + tauP[i] = 0 + } + } + return + } + for i := 0; i < m; i++ { + a[i*lda+i], tauP[i] = impl.Dlarfg(n-i, a[i*lda+i], a[i*lda+min(i+1, n-1):], 1) + d[i] = a[i*lda+i] + a[i*lda+i] = 1 + if i < m-1 { + impl.Dlarf(blas.Right, m-i-1, n-i, a[i*lda+i:], 1, tauP[i], a[(i+1)*lda+i:], lda, work) + } + a[i*lda+i] = d[i] + if i < m-1 { + a[(i+1)*lda+i], tauQ[i] = impl.Dlarfg(m-i-1, a[(i+1)*lda+i], a[min(i+2, m-1)*lda+i:], lda) + e[i] = a[(i+1)*lda+i] + a[(i+1)*lda+i] = 1 + impl.Dlarf(blas.Left, m-i-1, n-i-1, a[(i+1)*lda+i:], lda, tauQ[i], a[(i+1)*lda+i+1:], lda, work) + a[(i+1)*lda+i] = e[i] + } else { + tauQ[i] = 0 + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgebrd.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgebrd.go new file mode 100644 index 0000000000..f03bf8d939 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgebrd.go @@ -0,0 +1,161 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dgebrd reduces a general m×n matrix A to upper or lower bidiagonal form B by +// an orthogonal transformation: +// Q^T * A * P = B. +// The diagonal elements of B are stored in d and the off-diagonal elements are stored +// in e. These are additionally stored along the diagonal of A and the off-diagonal +// of A. If m >= n B is an upper-bidiagonal matrix, and if m < n B is a +// lower-bidiagonal matrix. +// +// The remaining elements of A store the data needed to construct Q and P. +// The matrices Q and P are products of elementary reflectors +// if m >= n, Q = H_0 * H_1 * ... * H_{n-1}, +// P = G_0 * G_1 * ... * G_{n-2}, +// if m < n, Q = H_0 * H_1 * ... * H_{m-2}, +// P = G_0 * G_1 * ... * G_{m-1}, +// where +// H_i = I - tauQ[i] * v_i * v_i^T, +// G_i = I - tauP[i] * u_i * u_i^T. +// +// As an example, on exit the entries of A when m = 6, and n = 5 +// [ d e u1 u1 u1] +// [v1 d e u2 u2] +// [v1 v2 d e u3] +// [v1 v2 v3 d e] +// [v1 v2 v3 v4 d] +// [v1 v2 v3 v4 v5] +// and when m = 5, n = 6 +// [ d u1 u1 u1 u1 u1] +// [ e d u2 u2 u2 u2] +// [v1 e d u3 u3 u3] +// [v1 v2 e d u4 u4] +// [v1 v2 v3 e d u5] +// +// d, tauQ, and tauP must all have length at least min(m,n), and e must have +// length min(m,n) - 1, unless lwork is -1 when there is no check except for +// work which must have a length of at least one. +// +// work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= max(1,m,n) or be -1 and this function will panic otherwise. +// Dgebrd is blocked decomposition, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Dgebrd, +// the optimal work length will be stored into work[0]. +// +// Dgebrd is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgebrd(m, n int, a []float64, lda int, d, e, tauQ, tauP, work []float64, lwork int) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case lwork < max(1, max(m, n)) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + minmn := min(m, n) + if minmn == 0 { + work[0] = 1 + return + } + + nb := impl.Ilaenv(1, "DGEBRD", " ", m, n, -1, -1) + lwkopt := (m + n) * nb + if lwork == -1 { + work[0] = float64(lwkopt) + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(d) < minmn: + panic(shortD) + case len(e) < minmn-1: + panic(shortE) + case len(tauQ) < minmn: + panic(shortTauQ) + case len(tauP) < minmn: + panic(shortTauP) + } + + nx := minmn + ws := max(m, n) + if 1 < nb && nb < minmn { + // At least one blocked operation can be done. + // Get the crossover point nx. + nx = max(nb, impl.Ilaenv(3, "DGEBRD", " ", m, n, -1, -1)) + // Determine when to switch from blocked to unblocked code. + if nx < minmn { + // At least one blocked operation will be done. + ws = (m + n) * nb + if lwork < ws { + // Not enough work space for the optimal nb, + // consider using a smaller block size. + nbmin := impl.Ilaenv(2, "DGEBRD", " ", m, n, -1, -1) + if lwork >= (m+n)*nbmin { + // Enough work space for minimum block size. + nb = lwork / (m + n) + } else { + nb = minmn + nx = minmn + } + } + } + } + bi := blas64.Implementation() + ldworkx := nb + ldworky := nb + var i int + for i = 0; i < minmn-nx; i += nb { + // Reduce rows and columns i:i+nb to bidiagonal form and return + // the matrices X and Y which are needed to update the unreduced + // part of the matrix. + // X is stored in the first m rows of work, y in the next rows. + x := work[:m*ldworkx] + y := work[m*ldworkx:] + impl.Dlabrd(m-i, n-i, nb, a[i*lda+i:], lda, + d[i:], e[i:], tauQ[i:], tauP[i:], + x, ldworkx, y, ldworky) + + // Update the trailing submatrix A[i+nb:m,i+nb:n], using an update + // of the form A := A - V*Y**T - X*U**T + bi.Dgemm(blas.NoTrans, blas.Trans, m-i-nb, n-i-nb, nb, + -1, a[(i+nb)*lda+i:], lda, y[nb*ldworky:], ldworky, + 1, a[(i+nb)*lda+i+nb:], lda) + + bi.Dgemm(blas.NoTrans, blas.NoTrans, m-i-nb, n-i-nb, nb, + -1, x[nb*ldworkx:], ldworkx, a[i*lda+i+nb:], lda, + 1, a[(i+nb)*lda+i+nb:], lda) + + // Copy diagonal and off-diagonal elements of B back into A. + if m >= n { + for j := i; j < i+nb; j++ { + a[j*lda+j] = d[j] + a[j*lda+j+1] = e[j] + } + } else { + for j := i; j < i+nb; j++ { + a[j*lda+j] = d[j] + a[(j+1)*lda+j] = e[j] + } + } + } + // Use unblocked code to reduce the remainder of the matrix. + impl.Dgebd2(m-i, n-i, a[i*lda+i:], lda, d[i:], e[i:], tauQ[i:], tauP[i:], work) + work[0] = float64(ws) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgecon.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgecon.go new file mode 100644 index 0000000000..1d1ca586bb --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgecon.go @@ -0,0 +1,92 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dgecon estimates the reciprocal of the condition number of the n×n matrix A +// given the LU decomposition of the matrix. The condition number computed may +// be based on the 1-norm or the ∞-norm. +// +// The slice a contains the result of the LU decomposition of A as computed by Dgetrf. +// +// anorm is the corresponding 1-norm or ∞-norm of the original matrix A. +// +// work is a temporary data slice of length at least 4*n and Dgecon will panic otherwise. +// +// iwork is a temporary data slice of length at least n and Dgecon will panic otherwise. +func (impl Implementation) Dgecon(norm lapack.MatrixNorm, n int, a []float64, lda int, anorm float64, work []float64, iwork []int) float64 { + switch { + case norm != lapack.MaxColumnSum && norm != lapack.MaxRowSum: + panic(badNorm) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if n == 0 { + return 1 + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(work) < 4*n: + panic(shortWork) + case len(iwork) < n: + panic(shortIWork) + } + + // Quick return if possible. + if anorm == 0 { + return 0 + } + + bi := blas64.Implementation() + var rcond, ainvnm float64 + var kase int + var normin bool + isave := new([3]int) + onenrm := norm == lapack.MaxColumnSum + smlnum := dlamchS + kase1 := 2 + if onenrm { + kase1 = 1 + } + for { + ainvnm, kase = impl.Dlacn2(n, work[n:], work, iwork, ainvnm, kase, isave) + if kase == 0 { + if ainvnm != 0 { + rcond = (1 / ainvnm) / anorm + } + return rcond + } + var sl, su float64 + if kase == kase1 { + sl = impl.Dlatrs(blas.Lower, blas.NoTrans, blas.Unit, normin, n, a, lda, work, work[2*n:]) + su = impl.Dlatrs(blas.Upper, blas.NoTrans, blas.NonUnit, normin, n, a, lda, work, work[3*n:]) + } else { + su = impl.Dlatrs(blas.Upper, blas.Trans, blas.NonUnit, normin, n, a, lda, work, work[3*n:]) + sl = impl.Dlatrs(blas.Lower, blas.Trans, blas.Unit, normin, n, a, lda, work, work[2*n:]) + } + scale := sl * su + normin = true + if scale != 1 { + ix := bi.Idamax(n, work, 1) + if scale == 0 || scale < math.Abs(work[ix])*smlnum { + return rcond + } + impl.Drscl(n, scale, work, 1) + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgeev.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeev.go new file mode 100644 index 0000000000..0da4e609c5 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeev.go @@ -0,0 +1,279 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dgeev computes the eigenvalues and, optionally, the left and/or right +// eigenvectors for an n×n real nonsymmetric matrix A. +// +// The right eigenvector v_j of A corresponding to an eigenvalue λ_j +// is defined by +// A v_j = λ_j v_j, +// and the left eigenvector u_j corresponding to an eigenvalue λ_j is defined by +// u_j^H A = λ_j u_j^H, +// where u_j^H is the conjugate transpose of u_j. +// +// On return, A will be overwritten and the left and right eigenvectors will be +// stored, respectively, in the columns of the n×n matrices VL and VR in the +// same order as their eigenvalues. If the j-th eigenvalue is real, then +// u_j = VL[:,j], +// v_j = VR[:,j], +// and if it is not real, then j and j+1 form a complex conjugate pair and the +// eigenvectors can be recovered as +// u_j = VL[:,j] + i*VL[:,j+1], +// u_{j+1} = VL[:,j] - i*VL[:,j+1], +// v_j = VR[:,j] + i*VR[:,j+1], +// v_{j+1} = VR[:,j] - i*VR[:,j+1], +// where i is the imaginary unit. The computed eigenvectors are normalized to +// have Euclidean norm equal to 1 and largest component real. +// +// Left eigenvectors will be computed only if jobvl == lapack.LeftEVCompute, +// otherwise jobvl must be lapack.LeftEVNone. +// Right eigenvectors will be computed only if jobvr == lapack.RightEVCompute, +// otherwise jobvr must be lapack.RightEVNone. +// For other values of jobvl and jobvr Dgeev will panic. +// +// wr and wi contain the real and imaginary parts, respectively, of the computed +// eigenvalues. Complex conjugate pairs of eigenvalues appear consecutively with +// the eigenvalue having the positive imaginary part first. +// wr and wi must have length n, and Dgeev will panic otherwise. +// +// work must have length at least lwork and lwork must be at least max(1,4*n) if +// the left or right eigenvectors are computed, and at least max(1,3*n) if no +// eigenvectors are computed. For good performance, lwork must generally be +// larger. On return, optimal value of lwork will be stored in work[0]. +// +// If lwork == -1, instead of performing Dgeev, the function only calculates the +// optimal vaule of lwork and stores it into work[0]. +// +// On return, first is the index of the first valid eigenvalue. If first == 0, +// all eigenvalues and eigenvectors have been computed. If first is positive, +// Dgeev failed to compute all the eigenvalues, no eigenvectors have been +// computed and wr[first:] and wi[first:] contain those eigenvalues which have +// converged. +func (impl Implementation) Dgeev(jobvl lapack.LeftEVJob, jobvr lapack.RightEVJob, n int, a []float64, lda int, wr, wi []float64, vl []float64, ldvl int, vr []float64, ldvr int, work []float64, lwork int) (first int) { + wantvl := jobvl == lapack.LeftEVCompute + wantvr := jobvr == lapack.RightEVCompute + var minwrk int + if wantvl || wantvr { + minwrk = max(1, 4*n) + } else { + minwrk = max(1, 3*n) + } + switch { + case jobvl != lapack.LeftEVCompute && jobvl != lapack.LeftEVNone: + panic(badLeftEVJob) + case jobvr != lapack.RightEVCompute && jobvr != lapack.RightEVNone: + panic(badRightEVJob) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case ldvl < 1 || (ldvl < n && wantvl): + panic(badLdVL) + case ldvr < 1 || (ldvr < n && wantvr): + panic(badLdVR) + case lwork < minwrk && lwork != -1: + panic(badLWork) + case len(work) < lwork: + panic(shortWork) + } + + // Quick return if possible. + if n == 0 { + work[0] = 1 + return 0 + } + + maxwrk := 2*n + n*impl.Ilaenv(1, "DGEHRD", " ", n, 1, n, 0) + if wantvl || wantvr { + maxwrk = max(maxwrk, 2*n+(n-1)*impl.Ilaenv(1, "DORGHR", " ", n, 1, n, -1)) + impl.Dhseqr(lapack.EigenvaluesAndSchur, lapack.SchurOrig, n, 0, n-1, + a, lda, wr, wi, nil, n, work, -1) + maxwrk = max(maxwrk, max(n+1, n+int(work[0]))) + side := lapack.EVLeft + if wantvr { + side = lapack.EVRight + } + impl.Dtrevc3(side, lapack.EVAllMulQ, nil, n, a, lda, vl, ldvl, vr, ldvr, + n, work, -1) + maxwrk = max(maxwrk, n+int(work[0])) + maxwrk = max(maxwrk, 4*n) + } else { + impl.Dhseqr(lapack.EigenvaluesOnly, lapack.SchurNone, n, 0, n-1, + a, lda, wr, wi, vr, ldvr, work, -1) + maxwrk = max(maxwrk, max(n+1, n+int(work[0]))) + } + maxwrk = max(maxwrk, minwrk) + + if lwork == -1 { + work[0] = float64(maxwrk) + return 0 + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(wr) != n: + panic(badLenWr) + case len(wi) != n: + panic(badLenWi) + case len(vl) < (n-1)*ldvl+n && wantvl: + panic(shortVL) + case len(vr) < (n-1)*ldvr+n && wantvr: + panic(shortVR) + } + + // Get machine constants. + smlnum := math.Sqrt(dlamchS) / dlamchP + bignum := 1 / smlnum + + // Scale A if max element outside range [smlnum,bignum]. + anrm := impl.Dlange(lapack.MaxAbs, n, n, a, lda, nil) + var scalea bool + var cscale float64 + if 0 < anrm && anrm < smlnum { + scalea = true + cscale = smlnum + } else if anrm > bignum { + scalea = true + cscale = bignum + } + if scalea { + impl.Dlascl(lapack.General, 0, 0, anrm, cscale, n, n, a, lda) + } + + // Balance the matrix. + workbal := work[:n] + ilo, ihi := impl.Dgebal(lapack.PermuteScale, n, a, lda, workbal) + + // Reduce to upper Hessenberg form. + iwrk := 2 * n + tau := work[n : iwrk-1] + impl.Dgehrd(n, ilo, ihi, a, lda, tau, work[iwrk:], lwork-iwrk) + + var side lapack.EVSide + if wantvl { + side = lapack.EVLeft + // Copy Householder vectors to VL. + impl.Dlacpy(blas.Lower, n, n, a, lda, vl, ldvl) + // Generate orthogonal matrix in VL. + impl.Dorghr(n, ilo, ihi, vl, ldvl, tau, work[iwrk:], lwork-iwrk) + // Perform QR iteration, accumulating Schur vectors in VL. + iwrk = n + first = impl.Dhseqr(lapack.EigenvaluesAndSchur, lapack.SchurOrig, n, ilo, ihi, + a, lda, wr, wi, vl, ldvl, work[iwrk:], lwork-iwrk) + if wantvr { + // Want left and right eigenvectors. + // Copy Schur vectors to VR. + side = lapack.EVBoth + impl.Dlacpy(blas.All, n, n, vl, ldvl, vr, ldvr) + } + } else if wantvr { + side = lapack.EVRight + // Copy Householder vectors to VR. + impl.Dlacpy(blas.Lower, n, n, a, lda, vr, ldvr) + // Generate orthogonal matrix in VR. + impl.Dorghr(n, ilo, ihi, vr, ldvr, tau, work[iwrk:], lwork-iwrk) + // Perform QR iteration, accumulating Schur vectors in VR. + iwrk = n + first = impl.Dhseqr(lapack.EigenvaluesAndSchur, lapack.SchurOrig, n, ilo, ihi, + a, lda, wr, wi, vr, ldvr, work[iwrk:], lwork-iwrk) + } else { + // Compute eigenvalues only. + iwrk = n + first = impl.Dhseqr(lapack.EigenvaluesOnly, lapack.SchurNone, n, ilo, ihi, + a, lda, wr, wi, nil, 1, work[iwrk:], lwork-iwrk) + } + + if first > 0 { + if scalea { + // Undo scaling. + impl.Dlascl(lapack.General, 0, 0, cscale, anrm, n-first, 1, wr[first:], 1) + impl.Dlascl(lapack.General, 0, 0, cscale, anrm, n-first, 1, wi[first:], 1) + impl.Dlascl(lapack.General, 0, 0, cscale, anrm, ilo, 1, wr, 1) + impl.Dlascl(lapack.General, 0, 0, cscale, anrm, ilo, 1, wi, 1) + } + work[0] = float64(maxwrk) + return first + } + + if wantvl || wantvr { + // Compute left and/or right eigenvectors. + impl.Dtrevc3(side, lapack.EVAllMulQ, nil, n, + a, lda, vl, ldvl, vr, ldvr, n, work[iwrk:], lwork-iwrk) + } + bi := blas64.Implementation() + if wantvl { + // Undo balancing of left eigenvectors. + impl.Dgebak(lapack.PermuteScale, lapack.EVLeft, n, ilo, ihi, workbal, n, vl, ldvl) + // Normalize left eigenvectors and make largest component real. + for i, wii := range wi { + if wii < 0 { + continue + } + if wii == 0 { + scl := 1 / bi.Dnrm2(n, vl[i:], ldvl) + bi.Dscal(n, scl, vl[i:], ldvl) + continue + } + scl := 1 / impl.Dlapy2(bi.Dnrm2(n, vl[i:], ldvl), bi.Dnrm2(n, vl[i+1:], ldvl)) + bi.Dscal(n, scl, vl[i:], ldvl) + bi.Dscal(n, scl, vl[i+1:], ldvl) + for k := 0; k < n; k++ { + vi := vl[k*ldvl+i] + vi1 := vl[k*ldvl+i+1] + work[iwrk+k] = vi*vi + vi1*vi1 + } + k := bi.Idamax(n, work[iwrk:iwrk+n], 1) + cs, sn, _ := impl.Dlartg(vl[k*ldvl+i], vl[k*ldvl+i+1]) + bi.Drot(n, vl[i:], ldvl, vl[i+1:], ldvl, cs, sn) + vl[k*ldvl+i+1] = 0 + } + } + if wantvr { + // Undo balancing of right eigenvectors. + impl.Dgebak(lapack.PermuteScale, lapack.EVRight, n, ilo, ihi, workbal, n, vr, ldvr) + // Normalize right eigenvectors and make largest component real. + for i, wii := range wi { + if wii < 0 { + continue + } + if wii == 0 { + scl := 1 / bi.Dnrm2(n, vr[i:], ldvr) + bi.Dscal(n, scl, vr[i:], ldvr) + continue + } + scl := 1 / impl.Dlapy2(bi.Dnrm2(n, vr[i:], ldvr), bi.Dnrm2(n, vr[i+1:], ldvr)) + bi.Dscal(n, scl, vr[i:], ldvr) + bi.Dscal(n, scl, vr[i+1:], ldvr) + for k := 0; k < n; k++ { + vi := vr[k*ldvr+i] + vi1 := vr[k*ldvr+i+1] + work[iwrk+k] = vi*vi + vi1*vi1 + } + k := bi.Idamax(n, work[iwrk:iwrk+n], 1) + cs, sn, _ := impl.Dlartg(vr[k*ldvr+i], vr[k*ldvr+i+1]) + bi.Drot(n, vr[i:], ldvr, vr[i+1:], ldvr, cs, sn) + vr[k*ldvr+i+1] = 0 + } + } + + if scalea { + // Undo scaling. + impl.Dlascl(lapack.General, 0, 0, cscale, anrm, n-first, 1, wr[first:], 1) + impl.Dlascl(lapack.General, 0, 0, cscale, anrm, n-first, 1, wi[first:], 1) + } + + work[0] = float64(maxwrk) + return first +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgehd2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgehd2.go new file mode 100644 index 0000000000..261f21b983 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgehd2.go @@ -0,0 +1,97 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dgehd2 reduces a block of a general n×n matrix A to upper Hessenberg form H +// by an orthogonal similarity transformation Q^T * A * Q = H. +// +// The matrix Q is represented as a product of (ihi-ilo) elementary +// reflectors +// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. +// Each H_i has the form +// H_i = I - tau[i] * v * v^T +// where v is a real vector with v[0:i+1] = 0, v[i+1] = 1 and v[ihi+1:n] = 0. +// v[i+2:ihi+1] is stored on exit in A[i+2:ihi+1,i]. +// +// On entry, a contains the n×n general matrix to be reduced. On return, the +// upper triangle and the first subdiagonal of A are overwritten with the upper +// Hessenberg matrix H, and the elements below the first subdiagonal, with the +// slice tau, represent the orthogonal matrix Q as a product of elementary +// reflectors. +// +// The contents of A are illustrated by the following example, with n = 7, ilo = +// 1 and ihi = 5. +// On entry, +// [ a a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a ] +// on return, +// [ a a h h h h a ] +// [ a h h h h a ] +// [ h h h h h h ] +// [ v1 h h h h h ] +// [ v1 v2 h h h h ] +// [ v1 v2 v3 h h h ] +// [ a ] +// where a denotes an element of the original matrix A, h denotes a +// modified element of the upper Hessenberg matrix H, and vi denotes an +// element of the vector defining H_i. +// +// ilo and ihi determine the block of A that will be reduced to upper Hessenberg +// form. It must hold that 0 <= ilo <= ihi <= max(0, n-1), otherwise Dgehd2 will +// panic. +// +// On return, tau will contain the scalar factors of the elementary reflectors. +// It must have length equal to n-1, otherwise Dgehd2 will panic. +// +// work must have length at least n, otherwise Dgehd2 will panic. +// +// Dgehd2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgehd2(n, ilo, ihi int, a []float64, lda int, tau, work []float64) { + switch { + case n < 0: + panic(nLT0) + case ilo < 0 || max(0, n-1) < ilo: + panic(badIlo) + case ihi < min(ilo, n-1) || n <= ihi: + panic(badIhi) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if n == 0 { + return + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(tau) != n-1: + panic(badLenTau) + case len(work) < n: + panic(shortWork) + } + + for i := ilo; i < ihi; i++ { + // Compute elementary reflector H_i to annihilate A[i+2:ihi+1,i]. + var aii float64 + aii, tau[i] = impl.Dlarfg(ihi-i, a[(i+1)*lda+i], a[min(i+2, n-1)*lda+i:], lda) + a[(i+1)*lda+i] = 1 + + // Apply H_i to A[0:ihi+1,i+1:ihi+1] from the right. + impl.Dlarf(blas.Right, ihi+1, ihi-i, a[(i+1)*lda+i:], lda, tau[i], a[i+1:], lda, work) + + // Apply H_i to A[i+1:ihi+1,i+1:n] from the left. + impl.Dlarf(blas.Left, ihi-i, n-i-1, a[(i+1)*lda+i:], lda, tau[i], a[(i+1)*lda+i+1:], lda, work) + a[(i+1)*lda+i] = aii + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgehrd.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgehrd.go new file mode 100644 index 0000000000..89b73cef99 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgehrd.go @@ -0,0 +1,194 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dgehrd reduces a block of a real n×n general matrix A to upper Hessenberg +// form H by an orthogonal similarity transformation Q^T * A * Q = H. +// +// The matrix Q is represented as a product of (ihi-ilo) elementary +// reflectors +// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. +// Each H_i has the form +// H_i = I - tau[i] * v * v^T +// where v is a real vector with v[0:i+1] = 0, v[i+1] = 1 and v[ihi+1:n] = 0. +// v[i+2:ihi+1] is stored on exit in A[i+2:ihi+1,i]. +// +// On entry, a contains the n×n general matrix to be reduced. On return, the +// upper triangle and the first subdiagonal of A will be overwritten with the +// upper Hessenberg matrix H, and the elements below the first subdiagonal, with +// the slice tau, represent the orthogonal matrix Q as a product of elementary +// reflectors. +// +// The contents of a are illustrated by the following example, with n = 7, ilo = +// 1 and ihi = 5. +// On entry, +// [ a a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a a a a a a ] +// [ a ] +// on return, +// [ a a h h h h a ] +// [ a h h h h a ] +// [ h h h h h h ] +// [ v1 h h h h h ] +// [ v1 v2 h h h h ] +// [ v1 v2 v3 h h h ] +// [ a ] +// where a denotes an element of the original matrix A, h denotes a +// modified element of the upper Hessenberg matrix H, and vi denotes an +// element of the vector defining H_i. +// +// ilo and ihi determine the block of A that will be reduced to upper Hessenberg +// form. It must hold that 0 <= ilo <= ihi < n if n > 0, and ilo == 0 and ihi == +// -1 if n == 0, otherwise Dgehrd will panic. +// +// On return, tau will contain the scalar factors of the elementary reflectors. +// Elements tau[:ilo] and tau[ihi:] will be set to zero. tau must have length +// equal to n-1 if n > 0, otherwise Dgehrd will panic. +// +// work must have length at least lwork and lwork must be at least max(1,n), +// otherwise Dgehrd will panic. On return, work[0] contains the optimal value of +// lwork. +// +// If lwork == -1, instead of performing Dgehrd, only the optimal value of lwork +// will be stored in work[0]. +// +// Dgehrd is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgehrd(n, ilo, ihi int, a []float64, lda int, tau, work []float64, lwork int) { + switch { + case n < 0: + panic(nLT0) + case ilo < 0 || max(0, n-1) < ilo: + panic(badIlo) + case ihi < min(ilo, n-1) || n <= ihi: + panic(badIhi) + case lda < max(1, n): + panic(badLdA) + case lwork < max(1, n) && lwork != -1: + panic(badLWork) + case len(work) < lwork: + panic(shortWork) + } + + // Quick return if possible. + if n == 0 { + work[0] = 1 + return + } + + const ( + nbmax = 64 + ldt = nbmax + 1 + tsize = ldt * nbmax + ) + // Compute the workspace requirements. + nb := min(nbmax, impl.Ilaenv(1, "DGEHRD", " ", n, ilo, ihi, -1)) + lwkopt := n*nb + tsize + if lwork == -1 { + work[0] = float64(lwkopt) + return + } + + if len(a) < (n-1)*lda+n { + panic(shortA) + } + if len(tau) != n-1 { + panic(badLenTau) + } + + // Set tau[:ilo] and tau[ihi:] to zero. + for i := 0; i < ilo; i++ { + tau[i] = 0 + } + for i := ihi; i < n-1; i++ { + tau[i] = 0 + } + + // Quick return if possible. + nh := ihi - ilo + 1 + if nh <= 1 { + work[0] = 1 + return + } + + // Determine the block size. + nbmin := 2 + var nx int + if 1 < nb && nb < nh { + // Determine when to cross over from blocked to unblocked code + // (last block is always handled by unblocked code). + nx = max(nb, impl.Ilaenv(3, "DGEHRD", " ", n, ilo, ihi, -1)) + if nx < nh { + // Determine if workspace is large enough for blocked code. + if lwork < n*nb+tsize { + // Not enough workspace to use optimal nb: + // determine the minimum value of nb, and reduce + // nb or force use of unblocked code. + nbmin = max(2, impl.Ilaenv(2, "DGEHRD", " ", n, ilo, ihi, -1)) + if lwork >= n*nbmin+tsize { + nb = (lwork - tsize) / n + } else { + nb = 1 + } + } + } + } + ldwork := nb // work is used as an n×nb matrix. + + var i int + if nb < nbmin || nh <= nb { + // Use unblocked code below. + i = ilo + } else { + // Use blocked code. + bi := blas64.Implementation() + iwt := n * nb // Size of the matrix Y and index where the matrix T starts in work. + for i = ilo; i < ihi-nx; i += nb { + ib := min(nb, ihi-i) + + // Reduce columns [i:i+ib] to Hessenberg form, returning the + // matrices V and T of the block reflector H = I - V*T*V^T + // which performs the reduction, and also the matrix Y = A*V*T. + impl.Dlahr2(ihi+1, i+1, ib, a[i:], lda, tau[i:], work[iwt:], ldt, work, ldwork) + + // Apply the block reflector H to A[:ihi+1,i+ib:ihi+1] from the + // right, computing A := A - Y * V^T. V[i+ib,i+ib-1] must be set + // to 1. + ei := a[(i+ib)*lda+i+ib-1] + a[(i+ib)*lda+i+ib-1] = 1 + bi.Dgemm(blas.NoTrans, blas.Trans, ihi+1, ihi-i-ib+1, ib, + -1, work, ldwork, + a[(i+ib)*lda+i:], lda, + 1, a[i+ib:], lda) + a[(i+ib)*lda+i+ib-1] = ei + + // Apply the block reflector H to A[0:i+1,i+1:i+ib-1] from the + // right. + bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, i+1, ib-1, + 1, a[(i+1)*lda+i:], lda, work, ldwork) + for j := 0; j <= ib-2; j++ { + bi.Daxpy(i+1, -1, work[j:], ldwork, a[i+j+1:], lda) + } + + // Apply the block reflector H to A[i+1:ihi+1,i+ib:n] from the + // left. + impl.Dlarfb(blas.Left, blas.Trans, lapack.Forward, lapack.ColumnWise, + ihi-i, n-i-ib, ib, + a[(i+1)*lda+i:], lda, work[iwt:], ldt, a[(i+1)*lda+i+ib:], lda, work, ldwork) + } + } + // Use unblocked code to reduce the rest of the matrix. + impl.Dgehd2(n, i, ihi, a, lda, tau, work) + work[0] = float64(lwkopt) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgelq2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgelq2.go new file mode 100644 index 0000000000..abc96f7d2a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgelq2.go @@ -0,0 +1,65 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dgelq2 computes the LQ factorization of the m×n matrix A. +// +// In an LQ factorization, L is a lower triangular m×n matrix, and Q is an n×n +// orthonormal matrix. +// +// a is modified to contain the information to construct L and Q. +// The lower triangle of a contains the matrix L. The upper triangular elements +// (not including the diagonal) contain the elementary reflectors. tau is modified +// to contain the reflector scales. tau must have length of at least k = min(m,n) +// and this function will panic otherwise. +// +// See Dgeqr2 for a description of the elementary reflectors and orthonormal +// matrix Q. Q is constructed as a product of these elementary reflectors, +// Q = H_{k-1} * ... * H_1 * H_0. +// +// work is temporary storage of length at least m and this function will panic otherwise. +// +// Dgelq2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgelq2(m, n int, a []float64, lda int, tau, work []float64) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + k := min(m, n) + if k == 0 { + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + case len(work) < m: + panic(shortWork) + } + + for i := 0; i < k; i++ { + a[i*lda+i], tau[i] = impl.Dlarfg(n-i, a[i*lda+i], a[i*lda+min(i+1, n-1):], 1) + if i < m-1 { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(blas.Right, m-i-1, n-i, + a[i*lda+i:], 1, + tau[i], + a[(i+1)*lda+i:], lda, + work) + a[i*lda+i] = aii + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgelqf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgelqf.go new file mode 100644 index 0000000000..f1fd13a019 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgelqf.go @@ -0,0 +1,97 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dgelqf computes the LQ factorization of the m×n matrix A using a blocked +// algorithm. See the documentation for Dgelq2 for a description of the +// parameters at entry and exit. +// +// work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= m, and this function will panic otherwise. +// Dgelqf is a blocked LQ factorization, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Dgelqf, +// the optimal work length will be stored into work[0]. +// +// tau must have length at least min(m,n), and this function will panic otherwise. +func (impl Implementation) Dgelqf(m, n int, a []float64, lda int, tau, work []float64, lwork int) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case lwork < max(1, m) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + k := min(m, n) + if k == 0 { + work[0] = 1 + return + } + + nb := impl.Ilaenv(1, "DGELQF", " ", m, n, -1, -1) + if lwork == -1 { + work[0] = float64(m * nb) + return + } + + if len(a) < (m-1)*lda+n { + panic(shortA) + } + if len(tau) < k { + panic(shortTau) + } + + // Find the optimal blocking size based on the size of available memory + // and optimal machine parameters. + nbmin := 2 + var nx int + iws := m + if 1 < nb && nb < k { + nx = max(0, impl.Ilaenv(3, "DGELQF", " ", m, n, -1, -1)) + if nx < k { + iws = m * nb + if lwork < iws { + nb = lwork / m + nbmin = max(2, impl.Ilaenv(2, "DGELQF", " ", m, n, -1, -1)) + } + } + } + ldwork := nb + // Computed blocked LQ factorization. + var i int + if nbmin <= nb && nb < k && nx < k { + for i = 0; i < k-nx; i += nb { + ib := min(k-i, nb) + impl.Dgelq2(ib, n-i, a[i*lda+i:], lda, tau[i:], work) + if i+ib < m { + impl.Dlarft(lapack.Forward, lapack.RowWise, n-i, ib, + a[i*lda+i:], lda, + tau[i:], + work, ldwork) + impl.Dlarfb(blas.Right, blas.NoTrans, lapack.Forward, lapack.RowWise, + m-i-ib, n-i, ib, + a[i*lda+i:], lda, + work, ldwork, + a[(i+ib)*lda+i:], lda, + work[ib*ldwork:], ldwork) + } + } + } + // Perform unblocked LQ factorization on the remainder. + if i < k { + impl.Dgelq2(m-i, n-i, a[i*lda+i:], lda, tau[i:], work) + } + work[0] = float64(iws) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgels.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgels.go new file mode 100644 index 0000000000..a3894b6a0b --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgels.go @@ -0,0 +1,219 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dgels finds a minimum-norm solution based on the matrices A and B using the +// QR or LQ factorization. Dgels returns false if the matrix +// A is singular, and true if this solution was successfully found. +// +// The minimization problem solved depends on the input parameters. +// +// 1. If m >= n and trans == blas.NoTrans, Dgels finds X such that || A*X - B||_2 +// is minimized. +// 2. If m < n and trans == blas.NoTrans, Dgels finds the minimum norm solution of +// A * X = B. +// 3. If m >= n and trans == blas.Trans, Dgels finds the minimum norm solution of +// A^T * X = B. +// 4. If m < n and trans == blas.Trans, Dgels finds X such that || A*X - B||_2 +// is minimized. +// Note that the least-squares solutions (cases 1 and 3) perform the minimization +// per column of B. This is not the same as finding the minimum-norm matrix. +// +// The matrix A is a general matrix of size m×n and is modified during this call. +// The input matrix B is of size max(m,n)×nrhs, and serves two purposes. On entry, +// the elements of b specify the input matrix B. B has size m×nrhs if +// trans == blas.NoTrans, and n×nrhs if trans == blas.Trans. On exit, the +// leading submatrix of b contains the solution vectors X. If trans == blas.NoTrans, +// this submatrix is of size n×nrhs, and of size m×nrhs otherwise. +// +// work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= max(m,n) + max(m,n,nrhs), and this function will panic +// otherwise. A longer work will enable blocked algorithms to be called. +// In the special case that lwork == -1, work[0] will be set to the optimal working +// length. +func (impl Implementation) Dgels(trans blas.Transpose, m, n, nrhs int, a []float64, lda int, b []float64, ldb int, work []float64, lwork int) bool { + mn := min(m, n) + minwrk := mn + max(mn, nrhs) + switch { + case trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans: + panic(badTrans) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case nrhs < 0: + panic(nrhsLT0) + case lda < max(1, n): + panic(badLdA) + case ldb < max(1, nrhs): + panic(badLdB) + case lwork < max(1, minwrk) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if mn == 0 || nrhs == 0 { + impl.Dlaset(blas.All, max(m, n), nrhs, 0, 0, b, ldb) + work[0] = 1 + return true + } + + // Find optimal block size. + var nb int + if m >= n { + nb = impl.Ilaenv(1, "DGEQRF", " ", m, n, -1, -1) + if trans != blas.NoTrans { + nb = max(nb, impl.Ilaenv(1, "DORMQR", "LN", m, nrhs, n, -1)) + } else { + nb = max(nb, impl.Ilaenv(1, "DORMQR", "LT", m, nrhs, n, -1)) + } + } else { + nb = impl.Ilaenv(1, "DGELQF", " ", m, n, -1, -1) + if trans != blas.NoTrans { + nb = max(nb, impl.Ilaenv(1, "DORMLQ", "LT", n, nrhs, m, -1)) + } else { + nb = max(nb, impl.Ilaenv(1, "DORMLQ", "LN", n, nrhs, m, -1)) + } + } + wsize := max(1, mn+max(mn, nrhs)*nb) + work[0] = float64(wsize) + + if lwork == -1 { + return true + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(b) < (max(m, n)-1)*ldb+nrhs: + panic(shortB) + } + + // Scale the input matrices if they contain extreme values. + smlnum := dlamchS / dlamchP + bignum := 1 / smlnum + anrm := impl.Dlange(lapack.MaxAbs, m, n, a, lda, nil) + var iascl int + if anrm > 0 && anrm < smlnum { + impl.Dlascl(lapack.General, 0, 0, anrm, smlnum, m, n, a, lda) + iascl = 1 + } else if anrm > bignum { + impl.Dlascl(lapack.General, 0, 0, anrm, bignum, m, n, a, lda) + } else if anrm == 0 { + // Matrix is all zeros. + impl.Dlaset(blas.All, max(m, n), nrhs, 0, 0, b, ldb) + return true + } + brow := m + if trans != blas.NoTrans { + brow = n + } + bnrm := impl.Dlange(lapack.MaxAbs, brow, nrhs, b, ldb, nil) + ibscl := 0 + if bnrm > 0 && bnrm < smlnum { + impl.Dlascl(lapack.General, 0, 0, bnrm, smlnum, brow, nrhs, b, ldb) + ibscl = 1 + } else if bnrm > bignum { + impl.Dlascl(lapack.General, 0, 0, bnrm, bignum, brow, nrhs, b, ldb) + ibscl = 2 + } + + // Solve the minimization problem using a QR or an LQ decomposition. + var scllen int + if m >= n { + impl.Dgeqrf(m, n, a, lda, work, work[mn:], lwork-mn) + if trans == blas.NoTrans { + impl.Dormqr(blas.Left, blas.Trans, m, nrhs, n, + a, lda, + work[:n], + b, ldb, + work[mn:], lwork-mn) + ok := impl.Dtrtrs(blas.Upper, blas.NoTrans, blas.NonUnit, n, nrhs, + a, lda, + b, ldb) + if !ok { + return false + } + scllen = n + } else { + ok := impl.Dtrtrs(blas.Upper, blas.Trans, blas.NonUnit, n, nrhs, + a, lda, + b, ldb) + if !ok { + return false + } + for i := n; i < m; i++ { + for j := 0; j < nrhs; j++ { + b[i*ldb+j] = 0 + } + } + impl.Dormqr(blas.Left, blas.NoTrans, m, nrhs, n, + a, lda, + work[:n], + b, ldb, + work[mn:], lwork-mn) + scllen = m + } + } else { + impl.Dgelqf(m, n, a, lda, work, work[mn:], lwork-mn) + if trans == blas.NoTrans { + ok := impl.Dtrtrs(blas.Lower, blas.NoTrans, blas.NonUnit, + m, nrhs, + a, lda, + b, ldb) + if !ok { + return false + } + for i := m; i < n; i++ { + for j := 0; j < nrhs; j++ { + b[i*ldb+j] = 0 + } + } + impl.Dormlq(blas.Left, blas.Trans, n, nrhs, m, + a, lda, + work, + b, ldb, + work[mn:], lwork-mn) + scllen = n + } else { + impl.Dormlq(blas.Left, blas.NoTrans, n, nrhs, m, + a, lda, + work, + b, ldb, + work[mn:], lwork-mn) + ok := impl.Dtrtrs(blas.Lower, blas.Trans, blas.NonUnit, + m, nrhs, + a, lda, + b, ldb) + if !ok { + return false + } + } + } + + // Adjust answer vector based on scaling. + if iascl == 1 { + impl.Dlascl(lapack.General, 0, 0, anrm, smlnum, scllen, nrhs, b, ldb) + } + if iascl == 2 { + impl.Dlascl(lapack.General, 0, 0, anrm, bignum, scllen, nrhs, b, ldb) + } + if ibscl == 1 { + impl.Dlascl(lapack.General, 0, 0, smlnum, bnrm, scllen, nrhs, b, ldb) + } + if ibscl == 2 { + impl.Dlascl(lapack.General, 0, 0, bignum, bnrm, scllen, nrhs, b, ldb) + } + + work[0] = float64(wsize) + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgeql2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeql2.go new file mode 100644 index 0000000000..3f3ddb163f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeql2.go @@ -0,0 +1,61 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dgeql2 computes the QL factorization of the m×n matrix A. That is, Dgeql2 +// computes Q and L such that +// A = Q * L +// where Q is an m×m orthonormal matrix and L is a lower trapezoidal matrix. +// +// Q is represented as a product of elementary reflectors, +// Q = H_{k-1} * ... * H_1 * H_0 +// where k = min(m,n) and each H_i has the form +// H_i = I - tau[i] * v_i * v_i^T +// Vector v_i has v[m-k+i+1:m] = 0, v[m-k+i] = 1, and v[:m-k+i+1] is stored on +// exit in A[0:m-k+i-1, n-k+i]. +// +// tau must have length at least min(m,n), and Dgeql2 will panic otherwise. +// +// work is temporary memory storage and must have length at least n. +// +// Dgeql2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgeql2(m, n int, a []float64, lda int, tau, work []float64) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + k := min(m, n) + if k == 0 { + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + case len(work) < n: + panic(shortWork) + } + + var aii float64 + for i := k - 1; i >= 0; i-- { + // Generate elementary reflector H_i to annihilate A[0:m-k+i-1, n-k+i]. + aii, tau[i] = impl.Dlarfg(m-k+i+1, a[(m-k+i)*lda+n-k+i], a[n-k+i:], lda) + + // Apply H_i to A[0:m-k+i, 0:n-k+i-1] from the left. + a[(m-k+i)*lda+n-k+i] = 1 + impl.Dlarf(blas.Left, m-k+i+1, n-k+i, a[n-k+i:], lda, tau[i], a, lda, work) + a[(m-k+i)*lda+n-k+i] = aii + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqp3.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqp3.go new file mode 100644 index 0000000000..6949da967a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqp3.go @@ -0,0 +1,186 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dgeqp3 computes a QR factorization with column pivoting of the +// m×n matrix A: A*P = Q*R using Level 3 BLAS. +// +// The matrix Q is represented as a product of elementary reflectors +// Q = H_0 H_1 . . . H_{k-1}, where k = min(m,n). +// Each H_i has the form +// H_i = I - tau * v * v^T +// where tau and v are real vectors with v[0:i-1] = 0 and v[i] = 1; +// v[i:m] is stored on exit in A[i:m, i], and tau in tau[i]. +// +// jpvt specifies a column pivot to be applied to A. If +// jpvt[j] is at least zero, the jth column of A is permuted +// to the front of A*P (a leading column), if jpvt[j] is -1 +// the jth column of A is a free column. If jpvt[j] < -1, Dgeqp3 +// will panic. On return, jpvt holds the permutation that was +// applied; the jth column of A*P was the jpvt[j] column of A. +// jpvt must have length n or Dgeqp3 will panic. +// +// tau holds the scalar factors of the elementary reflectors. +// It must have length min(m, n), otherwise Dgeqp3 will panic. +// +// work must have length at least max(1,lwork), and lwork must be at least +// 3*n+1, otherwise Dgeqp3 will panic. For optimal performance lwork must +// be at least 2*n+(n+1)*nb, where nb is the optimal blocksize. On return, +// work[0] will contain the optimal value of lwork. +// +// If lwork == -1, instead of performing Dgeqp3, only the optimal value of lwork +// will be stored in work[0]. +// +// Dgeqp3 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgeqp3(m, n int, a []float64, lda int, jpvt []int, tau, work []float64, lwork int) { + const ( + inb = 1 + inbmin = 2 + ixover = 3 + ) + + minmn := min(m, n) + iws := 3*n + 1 + if minmn == 0 { + iws = 1 + } + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case lwork < iws && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if minmn == 0 { + work[0] = 1 + return + } + + nb := impl.Ilaenv(inb, "DGEQRF", " ", m, n, -1, -1) + if lwork == -1 { + work[0] = float64(2*n + (n+1)*nb) + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(jpvt) != n: + panic(badLenJpvt) + case len(tau) < minmn: + panic(shortTau) + } + + for _, v := range jpvt { + if v < -1 || n <= v { + panic(badJpvt) + } + } + + bi := blas64.Implementation() + + // Move initial columns up front. + var nfxd int + for j := 0; j < n; j++ { + if jpvt[j] == -1 { + jpvt[j] = j + continue + } + if j != nfxd { + bi.Dswap(m, a[j:], lda, a[nfxd:], lda) + jpvt[j], jpvt[nfxd] = jpvt[nfxd], j + } else { + jpvt[j] = j + } + nfxd++ + } + + // Factorize nfxd columns. + // + // Compute the QR factorization of nfxd columns and update remaining columns. + if nfxd > 0 { + na := min(m, nfxd) + impl.Dgeqrf(m, na, a, lda, tau, work, lwork) + iws = max(iws, int(work[0])) + if na < n { + impl.Dormqr(blas.Left, blas.Trans, m, n-na, na, a, lda, tau[:na], a[na:], lda, + work, lwork) + iws = max(iws, int(work[0])) + } + } + + if nfxd >= minmn { + work[0] = float64(iws) + return + } + + // Factorize free columns. + sm := m - nfxd + sn := n - nfxd + sminmn := minmn - nfxd + + // Determine the block size. + nb = impl.Ilaenv(inb, "DGEQRF", " ", sm, sn, -1, -1) + nbmin := 2 + nx := 0 + + if 1 < nb && nb < sminmn { + // Determine when to cross over from blocked to unblocked code. + nx = max(0, impl.Ilaenv(ixover, "DGEQRF", " ", sm, sn, -1, -1)) + + if nx < sminmn { + // Determine if workspace is large enough for blocked code. + minws := 2*sn + (sn+1)*nb + iws = max(iws, minws) + if lwork < minws { + // Not enough workspace to use optimal nb. Reduce + // nb and determine the minimum value of nb. + nb = (lwork - 2*sn) / (sn + 1) + nbmin = max(2, impl.Ilaenv(inbmin, "DGEQRF", " ", sm, sn, -1, -1)) + } + } + } + + // Initialize partial column norms. + // The first n elements of work store the exact column norms. + for j := nfxd; j < n; j++ { + work[j] = bi.Dnrm2(sm, a[nfxd*lda+j:], lda) + work[n+j] = work[j] + } + j := nfxd + if nbmin <= nb && nb < sminmn && nx < sminmn { + // Use blocked code initially. + + // Compute factorization. + var fjb int + for topbmn := minmn - nx; j < topbmn; j += fjb { + jb := min(nb, topbmn-j) + + // Factorize jb columns among columns j:n. + fjb = impl.Dlaqps(m, n-j, j, jb, a[j:], lda, jpvt[j:], tau[j:], + work[j:n], work[j+n:2*n], work[2*n:2*n+jb], work[2*n+jb:], jb) + } + } + + // Use unblocked code to factor the last or only block. + if j < minmn { + impl.Dlaqp2(m, n-j, j, a[j:], lda, jpvt[j:], tau[j:], + work[j:n], work[j+n:2*n], work[2*n:]) + } + + work[0] = float64(iws) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqr2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqr2.go new file mode 100644 index 0000000000..3e35d7e2f4 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqr2.go @@ -0,0 +1,76 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dgeqr2 computes a QR factorization of the m×n matrix A. +// +// In a QR factorization, Q is an m×m orthonormal matrix, and R is an +// upper triangular m×n matrix. +// +// A is modified to contain the information to construct Q and R. +// The upper triangle of a contains the matrix R. The lower triangular elements +// (not including the diagonal) contain the elementary reflectors. tau is modified +// to contain the reflector scales. tau must have length at least min(m,n), and +// this function will panic otherwise. +// +// The ith elementary reflector can be explicitly constructed by first extracting +// the +// v[j] = 0 j < i +// v[j] = 1 j == i +// v[j] = a[j*lda+i] j > i +// and computing H_i = I - tau[i] * v * v^T. +// +// The orthonormal matrix Q can be constructed from a product of these elementary +// reflectors, Q = H_0 * H_1 * ... * H_{k-1}, where k = min(m,n). +// +// work is temporary storage of length at least n and this function will panic otherwise. +// +// Dgeqr2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgeqr2(m, n int, a []float64, lda int, tau, work []float64) { + // TODO(btracey): This is oriented such that columns of a are eliminated. + // This likely could be re-arranged to take better advantage of row-major + // storage. + + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case len(work) < n: + panic(shortWork) + } + + // Quick return if possible. + k := min(m, n) + if k == 0 { + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + } + + for i := 0; i < k; i++ { + // Generate elementary reflector H_i. + a[i*lda+i], tau[i] = impl.Dlarfg(m-i, a[i*lda+i], a[min((i+1), m-1)*lda+i:], lda) + if i < n-1 { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(blas.Left, m-i, n-i-1, + a[i*lda+i:], lda, + tau[i], + a[i*lda+i+1:], lda, + work) + a[i*lda+i] = aii + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqrf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqrf.go new file mode 100644 index 0000000000..300f8eea4e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgeqrf.go @@ -0,0 +1,108 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dgeqrf computes the QR factorization of the m×n matrix A using a blocked +// algorithm. See the documentation for Dgeqr2 for a description of the +// parameters at entry and exit. +// +// work is temporary storage, and lwork specifies the usable memory length. +// The length of work must be at least max(1, lwork) and lwork must be -1 +// or at least n, otherwise this function will panic. +// Dgeqrf is a blocked QR factorization, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Dgeqrf, +// the optimal work length will be stored into work[0]. +// +// tau must have length at least min(m,n), and this function will panic otherwise. +func (impl Implementation) Dgeqrf(m, n int, a []float64, lda int, tau, work []float64, lwork int) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case lwork < max(1, n) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + k := min(m, n) + if k == 0 { + work[0] = 1 + return + } + + // nb is the optimal blocksize, i.e. the number of columns transformed at a time. + nb := impl.Ilaenv(1, "DGEQRF", " ", m, n, -1, -1) + if lwork == -1 { + work[0] = float64(n * nb) + return + } + + if len(a) < (m-1)*lda+n { + panic(shortA) + } + if len(tau) < k { + panic(shortTau) + } + + nbmin := 2 // Minimal block size. + var nx int // Use unblocked (unless changed in the next for loop) + iws := n + // Only consider blocked if the suggested block size is > 1 and the + // number of rows or columns is sufficiently large. + if 1 < nb && nb < k { + // nx is the block size at which the code switches from blocked + // to unblocked. + nx = max(0, impl.Ilaenv(3, "DGEQRF", " ", m, n, -1, -1)) + if k > nx { + iws = n * nb + if lwork < iws { + // Not enough workspace to use the optimal block + // size. Get the minimum block size instead. + nb = lwork / n + nbmin = max(2, impl.Ilaenv(2, "DGEQRF", " ", m, n, -1, -1)) + } + } + } + + // Compute QR using a blocked algorithm. + var i int + if nbmin <= nb && nb < k && nx < k { + ldwork := nb + for i = 0; i < k-nx; i += nb { + ib := min(k-i, nb) + // Compute the QR factorization of the current block. + impl.Dgeqr2(m-i, ib, a[i*lda+i:], lda, tau[i:], work) + if i+ib < n { + // Form the triangular factor of the block reflector and apply H^T + // In Dlarft, work becomes the T matrix. + impl.Dlarft(lapack.Forward, lapack.ColumnWise, m-i, ib, + a[i*lda+i:], lda, + tau[i:], + work, ldwork) + impl.Dlarfb(blas.Left, blas.Trans, lapack.Forward, lapack.ColumnWise, + m-i, n-i-ib, ib, + a[i*lda+i:], lda, + work, ldwork, + a[i*lda+i+ib:], lda, + work[ib*ldwork:], ldwork) + } + } + } + // Call unblocked code on the remaining columns. + if i < k { + impl.Dgeqr2(m-i, n-i, a[i*lda+i:], lda, tau[i:], work) + } + work[0] = float64(iws) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgerq2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgerq2.go new file mode 100644 index 0000000000..60dac973a1 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgerq2.go @@ -0,0 +1,68 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dgerq2 computes an RQ factorization of the m×n matrix A, +// A = R * Q. +// On exit, if m <= n, the upper triangle of the subarray +// A[0:m, n-m:n] contains the m×m upper triangular matrix R. +// If m >= n, the elements on and above the (m-n)-th subdiagonal +// contain the m×n upper trapezoidal matrix R. +// The remaining elements, with tau, represent the +// orthogonal matrix Q as a product of min(m,n) elementary +// reflectors. +// +// The matrix Q is represented as a product of elementary reflectors +// Q = H_0 H_1 . . . H_{min(m,n)-1}. +// Each H(i) has the form +// H_i = I - tau_i * v * v^T +// where v is a vector with v[0:n-k+i-1] stored in A[m-k+i, 0:n-k+i-1], +// v[n-k+i:n] = 0 and v[n-k+i] = 1. +// +// tau must have length min(m,n) and work must have length m, otherwise +// Dgerq2 will panic. +// +// Dgerq2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgerq2(m, n int, a []float64, lda int, tau, work []float64) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case len(work) < m: + panic(shortWork) + } + + // Quick return if possible. + k := min(m, n) + if k == 0 { + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + } + + for i := k - 1; i >= 0; i-- { + // Generate elementary reflector H[i] to annihilate + // A[m-k+i, 0:n-k+i-1]. + mki := m - k + i + nki := n - k + i + var aii float64 + aii, tau[i] = impl.Dlarfg(nki+1, a[mki*lda+nki], a[mki*lda:], 1) + + // Apply H[i] to A[0:m-k+i-1, 0:n-k+i] from the right. + a[mki*lda+nki] = 1 + impl.Dlarf(blas.Right, mki, nki+1, a[mki*lda:], 1, tau[i], a, lda, work) + a[mki*lda+nki] = aii + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgerqf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgerqf.go new file mode 100644 index 0000000000..9b4aa050ef --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgerqf.go @@ -0,0 +1,129 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dgerqf computes an RQ factorization of the m×n matrix A, +// A = R * Q. +// On exit, if m <= n, the upper triangle of the subarray +// A[0:m, n-m:n] contains the m×m upper triangular matrix R. +// If m >= n, the elements on and above the (m-n)-th subdiagonal +// contain the m×n upper trapezoidal matrix R. +// The remaining elements, with tau, represent the +// orthogonal matrix Q as a product of min(m,n) elementary +// reflectors. +// +// The matrix Q is represented as a product of elementary reflectors +// Q = H_0 H_1 . . . H_{min(m,n)-1}. +// Each H(i) has the form +// H_i = I - tau_i * v * v^T +// where v is a vector with v[0:n-k+i-1] stored in A[m-k+i, 0:n-k+i-1], +// v[n-k+i:n] = 0 and v[n-k+i] = 1. +// +// tau must have length min(m,n), work must have length max(1, lwork), +// and lwork must be -1 or at least max(1, m), otherwise Dgerqf will panic. +// On exit, work[0] will contain the optimal length for work. +// +// Dgerqf is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dgerqf(m, n int, a []float64, lda int, tau, work []float64, lwork int) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case lwork < max(1, m) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + k := min(m, n) + if k == 0 { + work[0] = 1 + return + } + + nb := impl.Ilaenv(1, "DGERQF", " ", m, n, -1, -1) + if lwork == -1 { + work[0] = float64(m * nb) + return + } + + if len(a) < (m-1)*lda+n { + panic(shortA) + } + if len(tau) != k { + panic(badLenTau) + } + + nbmin := 2 + nx := 1 + iws := m + var ldwork int + if 1 < nb && nb < k { + // Determine when to cross over from blocked to unblocked code. + nx = max(0, impl.Ilaenv(3, "DGERQF", " ", m, n, -1, -1)) + if nx < k { + // Determine whether workspace is large enough for blocked code. + iws = m * nb + if lwork < iws { + // Not enough workspace to use optimal nb. Reduce + // nb and determine the minimum value of nb. + nb = lwork / m + nbmin = max(2, impl.Ilaenv(2, "DGERQF", " ", m, n, -1, -1)) + } + ldwork = nb + } + } + + var mu, nu int + if nbmin <= nb && nb < k && nx < k { + // Use blocked code initially. + // The last kk rows are handled by the block method. + ki := ((k - nx - 1) / nb) * nb + kk := min(k, ki+nb) + + var i int + for i = k - kk + ki; i >= k-kk; i -= nb { + ib := min(k-i, nb) + + // Compute the RQ factorization of the current block + // A[m-k+i:m-k+i+ib-1, 0:n-k+i+ib-1]. + impl.Dgerq2(ib, n-k+i+ib, a[(m-k+i)*lda:], lda, tau[i:], work) + if m-k+i > 0 { + // Form the triangular factor of the block reflector + // H = H_{i+ib-1} . . . H_{i+1} H_i. + impl.Dlarft(lapack.Backward, lapack.RowWise, + n-k+i+ib, ib, a[(m-k+i)*lda:], lda, tau[i:], + work, ldwork) + + // Apply H to A[0:m-k+i-1, 0:n-k+i+ib-1] from the right. + impl.Dlarfb(blas.Right, blas.NoTrans, lapack.Backward, lapack.RowWise, + m-k+i, n-k+i+ib, ib, a[(m-k+i)*lda:], lda, + work, ldwork, + a, lda, + work[ib*ldwork:], ldwork) + } + } + mu = m - k + i + nb + nu = n - k + i + nb + } else { + mu = m + nu = n + } + + // Use unblocked code to factor the last or only block. + if mu > 0 && nu > 0 { + impl.Dgerq2(mu, nu, a, lda, tau, work) + } + work[0] = float64(iws) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgesvd.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgesvd.go new file mode 100644 index 0000000000..136f683e4b --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgesvd.go @@ -0,0 +1,1374 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +const noSVDO = "dgesvd: not coded for overwrite" + +// Dgesvd computes the singular value decomposition of the input matrix A. +// +// The singular value decomposition is +// A = U * Sigma * V^T +// where Sigma is an m×n diagonal matrix containing the singular values of A, +// U is an m×m orthogonal matrix and V is an n×n orthogonal matrix. The first +// min(m,n) columns of U and V are the left and right singular vectors of A +// respectively. +// +// jobU and jobVT are options for computing the singular vectors. The behavior +// is as follows +// jobU == lapack.SVDAll All m columns of U are returned in u +// jobU == lapack.SVDStore The first min(m,n) columns are returned in u +// jobU == lapack.SVDOverwrite The first min(m,n) columns of U are written into a +// jobU == lapack.SVDNone The columns of U are not computed. +// The behavior is the same for jobVT and the rows of V^T. At most one of jobU +// and jobVT can equal lapack.SVDOverwrite, and Dgesvd will panic otherwise. +// +// On entry, a contains the data for the m×n matrix A. During the call to Dgesvd +// the data is overwritten. On exit, A contains the appropriate singular vectors +// if either job is lapack.SVDOverwrite. +// +// s is a slice of length at least min(m,n) and on exit contains the singular +// values in decreasing order. +// +// u contains the left singular vectors on exit, stored column-wise. If +// jobU == lapack.SVDAll, u is of size m×m. If jobU == lapack.SVDStore u is +// of size m×min(m,n). If jobU == lapack.SVDOverwrite or lapack.SVDNone, u is +// not used. +// +// vt contains the left singular vectors on exit, stored row-wise. If +// jobV == lapack.SVDAll, vt is of size n×n. If jobVT == lapack.SVDStore vt is +// of size min(m,n)×n. If jobVT == lapack.SVDOverwrite or lapack.SVDNone, vt is +// not used. +// +// work is a slice for storing temporary memory, and lwork is the usable size of +// the slice. lwork must be at least max(5*min(m,n), 3*min(m,n)+max(m,n)). +// If lwork == -1, instead of performing Dgesvd, the optimal work length will be +// stored into work[0]. Dgesvd will panic if the working memory has insufficient +// storage. +// +// Dgesvd returns whether the decomposition successfully completed. +func (impl Implementation) Dgesvd(jobU, jobVT lapack.SVDJob, m, n int, a []float64, lda int, s, u []float64, ldu int, vt []float64, ldvt int, work []float64, lwork int) (ok bool) { + if jobU == lapack.SVDOverwrite || jobVT == lapack.SVDOverwrite { + panic(noSVDO) + } + + wantua := jobU == lapack.SVDAll + wantus := jobU == lapack.SVDStore + wantuas := wantua || wantus + wantuo := jobU == lapack.SVDOverwrite + wantun := jobU == lapack.SVDNone + if !(wantua || wantus || wantuo || wantun) { + panic(badSVDJob) + } + + wantva := jobVT == lapack.SVDAll + wantvs := jobVT == lapack.SVDStore + wantvas := wantva || wantvs + wantvo := jobVT == lapack.SVDOverwrite + wantvn := jobVT == lapack.SVDNone + if !(wantva || wantvs || wantvo || wantvn) { + panic(badSVDJob) + } + + if wantuo && wantvo { + panic(bothSVDOver) + } + + minmn := min(m, n) + minwork := 1 + if minmn > 0 { + minwork = max(3*minmn+max(m, n), 5*minmn) + } + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case ldu < 1, wantua && ldu < m, wantus && ldu < minmn: + panic(badLdU) + case ldvt < 1 || (wantvas && ldvt < n): + panic(badLdVT) + case lwork < minwork && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if minmn == 0 { + work[0] = 1 + return true + } + + // Compute optimal workspace size for subroutines. + opts := string(jobU) + string(jobVT) + mnthr := impl.Ilaenv(6, "DGESVD", opts, m, n, 0, 0) + maxwrk := 1 + var wrkbl, bdspac int + if m >= n { + bdspac = 5 * n + impl.Dgeqrf(m, n, a, lda, nil, work, -1) + lwork_dgeqrf := int(work[0]) + + impl.Dorgqr(m, n, n, a, lda, nil, work, -1) + lwork_dorgqr_n := int(work[0]) + impl.Dorgqr(m, m, n, a, lda, nil, work, -1) + lwork_dorgqr_m := int(work[0]) + + impl.Dgebrd(n, n, a, lda, s, nil, nil, nil, work, -1) + lwork_dgebrd := int(work[0]) + + impl.Dorgbr(lapack.GeneratePT, n, n, n, a, lda, nil, work, -1) + lwork_dorgbr_p := int(work[0]) + + impl.Dorgbr(lapack.GenerateQ, n, n, n, a, lda, nil, work, -1) + lwork_dorgbr_q := int(work[0]) + + if m >= mnthr { + if wantun { + // Path 1 (m much larger than n, jobU == None) + maxwrk = n + lwork_dgeqrf + maxwrk = max(maxwrk, 3*n+lwork_dgebrd) + if wantvo || wantvas { + maxwrk = max(maxwrk, 3*n+lwork_dorgbr_p) + } + maxwrk = max(maxwrk, bdspac) + } else if wantuo && wantvn { + // Path 2 (m much larger than n, jobU == Overwrite, jobVT == None) + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_n) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = max(n*n+wrkbl, n*n+m*n+n) + } else if wantuo && wantvas { + // Path 3 (m much larger than n, jobU == Overwrite, jobVT == Store or All) + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_n) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = max(n*n+wrkbl, n*n+m*n+n) + } else if wantus && wantvn { + // Path 4 (m much larger than n, jobU == Store, jobVT == None) + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_n) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = n*n + wrkbl + } else if wantus && wantvo { + // Path 5 (m much larger than n, jobU == Store, jobVT == Overwrite) + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_n) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = 2*n*n + wrkbl + } else if wantus && wantvas { + // Path 6 (m much larger than n, jobU == Store, jobVT == Store or All) + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_n) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = n*n + wrkbl + } else if wantua && wantvn { + // Path 7 (m much larger than n, jobU == All, jobVT == None) + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_m) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = n*n + wrkbl + } else if wantua && wantvo { + // Path 8 (m much larger than n, jobU == All, jobVT == Overwrite) + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_m) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = 2*n*n + wrkbl + } else if wantua && wantvas { + // Path 9 (m much larger than n, jobU == All, jobVT == Store or All) + wrkbl = n + lwork_dgeqrf + wrkbl = max(wrkbl, n+lwork_dorgqr_m) + wrkbl = max(wrkbl, 3*n+lwork_dgebrd) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_q) + wrkbl = max(wrkbl, 3*n+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = n*n + wrkbl + } + } else { + // Path 10 (m at least n, but not much larger) + impl.Dgebrd(m, n, a, lda, s, nil, nil, nil, work, -1) + lwork_dgebrd := int(work[0]) + maxwrk = 3*n + lwork_dgebrd + if wantus || wantuo { + impl.Dorgbr(lapack.GenerateQ, m, n, n, a, lda, nil, work, -1) + lwork_dorgbr_q = int(work[0]) + maxwrk = max(maxwrk, 3*n+lwork_dorgbr_q) + } + if wantua { + impl.Dorgbr(lapack.GenerateQ, m, m, n, a, lda, nil, work, -1) + lwork_dorgbr_q := int(work[0]) + maxwrk = max(maxwrk, 3*n+lwork_dorgbr_q) + } + if !wantvn { + maxwrk = max(maxwrk, 3*n+lwork_dorgbr_p) + } + maxwrk = max(maxwrk, bdspac) + } + } else { + bdspac = 5 * m + + impl.Dgelqf(m, n, a, lda, nil, work, -1) + lwork_dgelqf := int(work[0]) + + impl.Dorglq(n, n, m, nil, n, nil, work, -1) + lwork_dorglq_n := int(work[0]) + impl.Dorglq(m, n, m, a, lda, nil, work, -1) + lwork_dorglq_m := int(work[0]) + + impl.Dgebrd(m, m, a, lda, s, nil, nil, nil, work, -1) + lwork_dgebrd := int(work[0]) + + impl.Dorgbr(lapack.GeneratePT, m, m, m, a, n, nil, work, -1) + lwork_dorgbr_p := int(work[0]) + + impl.Dorgbr(lapack.GenerateQ, m, m, m, a, n, nil, work, -1) + lwork_dorgbr_q := int(work[0]) + + if n >= mnthr { + if wantvn { + // Path 1t (n much larger than m, jobVT == None) + maxwrk = m + lwork_dgelqf + maxwrk = max(maxwrk, 3*m+lwork_dgebrd) + if wantuo || wantuas { + maxwrk = max(maxwrk, 3*m+lwork_dorgbr_q) + } + maxwrk = max(maxwrk, bdspac) + } else if wantvo && wantun { + // Path 2t (n much larger than m, jobU == None, jobVT == Overwrite) + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_m) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = max(m*m+wrkbl, m*m+m*n+m) + } else if wantvo && wantuas { + // Path 3t (n much larger than m, jobU == Store or All, jobVT == Overwrite) + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_m) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = max(m*m+wrkbl, m*m+m*n+m) + } else if wantvs && wantun { + // Path 4t (n much larger than m, jobU == None, jobVT == Store) + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_m) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = m*m + wrkbl + } else if wantvs && wantuo { + // Path 5t (n much larger than m, jobU == Overwrite, jobVT == Store) + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_m) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = 2*m*m + wrkbl + } else if wantvs && wantuas { + // Path 6t (n much larger than m, jobU == Store or All, jobVT == Store) + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_m) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = m*m + wrkbl + } else if wantva && wantun { + // Path 7t (n much larger than m, jobU== None, jobVT == All) + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_n) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, bdspac) + maxwrk = m*m + wrkbl + } else if wantva && wantuo { + // Path 8t (n much larger than m, jobU == Overwrite, jobVT == All) + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_n) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = 2*m*m + wrkbl + } else if wantva && wantuas { + // Path 9t (n much larger than m, jobU == Store or All, jobVT == All) + wrkbl = m + lwork_dgelqf + wrkbl = max(wrkbl, m+lwork_dorglq_n) + wrkbl = max(wrkbl, 3*m+lwork_dgebrd) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_p) + wrkbl = max(wrkbl, 3*m+lwork_dorgbr_q) + wrkbl = max(wrkbl, bdspac) + maxwrk = m*m + wrkbl + } + } else { + // Path 10t (n greater than m, but not much larger) + impl.Dgebrd(m, n, a, lda, s, nil, nil, nil, work, -1) + lwork_dgebrd = int(work[0]) + maxwrk = 3*m + lwork_dgebrd + if wantvs || wantvo { + impl.Dorgbr(lapack.GeneratePT, m, n, m, a, n, nil, work, -1) + lwork_dorgbr_p = int(work[0]) + maxwrk = max(maxwrk, 3*m+lwork_dorgbr_p) + } + if wantva { + impl.Dorgbr(lapack.GeneratePT, n, n, m, a, n, nil, work, -1) + lwork_dorgbr_p = int(work[0]) + maxwrk = max(maxwrk, 3*m+lwork_dorgbr_p) + } + if !wantun { + maxwrk = max(maxwrk, 3*m+lwork_dorgbr_q) + } + maxwrk = max(maxwrk, bdspac) + } + } + + maxwrk = max(maxwrk, minwork) + if lwork == -1 { + work[0] = float64(maxwrk) + return true + } + + if len(a) < (m-1)*lda+n { + panic(shortA) + } + if len(s) < minmn { + panic(shortS) + } + if (len(u) < (m-1)*ldu+m && wantua) || (len(u) < (m-1)*ldu+minmn && wantus) { + panic(shortU) + } + if (len(vt) < (n-1)*ldvt+n && wantva) || (len(vt) < (minmn-1)*ldvt+n && wantvs) { + panic(shortVT) + } + + // Perform decomposition. + eps := dlamchE + smlnum := math.Sqrt(dlamchS) / eps + bignum := 1 / smlnum + + // Scale A if max element outside range [smlnum, bignum]. + anrm := impl.Dlange(lapack.MaxAbs, m, n, a, lda, nil) + var iscl bool + if anrm > 0 && anrm < smlnum { + iscl = true + impl.Dlascl(lapack.General, 0, 0, anrm, smlnum, m, n, a, lda) + } else if anrm > bignum { + iscl = true + impl.Dlascl(lapack.General, 0, 0, anrm, bignum, m, n, a, lda) + } + + bi := blas64.Implementation() + var ie int + if m >= n { + // If A has sufficiently more rows than columns, use the QR decomposition. + if m >= mnthr { + // m >> n + if wantun { + // Path 1. + itau := 0 + iwork := itau + n + + // Compute A = Q * R. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + // Zero out below R. + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, a[lda:], lda) + ie = 0 + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + // Bidiagonalize R in A. + impl.Dgebrd(n, n, a, lda, s, work[ie:], work[itauq:], + work[itaup:], work[iwork:], lwork-iwork) + ncvt := 0 + if wantvo || wantvas { + impl.Dorgbr(lapack.GeneratePT, n, n, n, a, lda, work[itaup:], + work[iwork:], lwork-iwork) + ncvt = n + } + iwork = ie + n + + // Perform bidiagonal QR iteration computing right singular vectors + // of A in A if desired. + ok = impl.Dbdsqr(blas.Upper, n, ncvt, 0, 0, s, work[ie:], + a, lda, work, 1, work, 1, work[iwork:]) + + // If right singular vectors desired in VT, copy them there. + if wantvas { + impl.Dlacpy(blas.All, n, n, a, lda, vt, ldvt) + } + } else if wantuo && wantvn { + // Path 2 + panic(noSVDO) + } else if wantuo && wantvas { + // Path 3 + panic(noSVDO) + } else if wantus { + if wantvn { + // Path 4 + if lwork >= n*n+max(4*n, bdspac) { + // Sufficient workspace for a fast algorithm. + ir := 0 + var ldworkr int + if lwork >= wrkbl+lda*n { + ldworkr = lda + } else { + ldworkr = n + } + itau := ir + ldworkr*n + iwork := itau + n + // Compute A = Q * R. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + // Copy R to work[ir:], zeroing out below it. + impl.Dlacpy(blas.Upper, n, n, a, lda, work[ir:], ldworkr) + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, work[ir+ldworkr:], ldworkr) + + // Generate Q in A. + impl.Dorgqr(m, n, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Bidiagonalize R in work[ir:]. + impl.Dgebrd(n, n, work[ir:], ldworkr, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Generate left vectors bidiagonalizing R in work[ir:]. + impl.Dorgbr(lapack.GenerateQ, n, n, n, work[ir:], ldworkr, + work[itauq:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, compuing left singular + // vectors of R in work[ir:]. + ok = impl.Dbdsqr(blas.Upper, n, 0, n, 0, s, work[ie:], work, 1, + work[ir:], ldworkr, work, 1, work[iwork:]) + + // Multiply Q in A by left singular vectors of R in + // work[ir:], storing result in U. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, n, 1, a, lda, + work[ir:], ldworkr, 0, u, ldu) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + n + + // Compute A = Q*R, copying result to U. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + + // Generate Q in U. + impl.Dorgqr(m, n, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Zero out below R in A. + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, a[lda:], lda) + + // Bidiagonalize R in A. + impl.Dgebrd(n, n, a, lda, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply Q in U by left vectors bidiagonalizing R. + impl.Dormbr(lapack.ApplyQ, blas.Right, blas.NoTrans, m, n, n, + a, lda, work[itauq:], u, ldu, work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left + // singular vectors of A in U. + ok = impl.Dbdsqr(blas.Upper, n, 0, m, 0, s, work[ie:], work, 1, + u, ldu, work, 1, work[iwork:]) + } + } else if wantvo { + // Path 5 + panic(noSVDO) + } else if wantvas { + // Path 6 + if lwork >= n*n+max(4*n, bdspac) { + // Sufficient workspace for a fast algorithm. + iu := 0 + var ldworku int + if lwork >= wrkbl+lda*n { + ldworku = lda + } else { + ldworku = n + } + itau := iu + ldworku*n + iwork := itau + n + + // Compute A = Q * R. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + // Copy R to work[iu:], zeroing out below it. + impl.Dlacpy(blas.Upper, n, n, a, lda, work[iu:], ldworku) + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, work[iu+ldworku:], ldworku) + + // Generate Q in A. + impl.Dorgqr(m, n, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Bidiagonalize R in work[iu:], copying result to VT. + impl.Dgebrd(n, n, work[iu:], ldworku, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, n, n, work[iu:], ldworku, vt, ldvt) + + // Generate left bidiagonalizing vectors in work[iu:]. + impl.Dorgbr(lapack.GenerateQ, n, n, n, work[iu:], ldworku, + work[itauq:], work[iwork:], lwork-iwork) + + // Generate right bidiagonalizing vectors in VT. + impl.Dorgbr(lapack.GeneratePT, n, n, n, vt, ldvt, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left singular + // vectors of R in work[iu:], and computing right singular + // vectors of R in VT. + ok = impl.Dbdsqr(blas.Upper, n, n, n, 0, s, work[ie:], + vt, ldvt, work[iu:], ldworku, work, 1, work[iwork:]) + + // Multiply Q in A by left singular vectors of R in + // work[iu:], storing result in U. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, n, 1, a, lda, + work[iu:], ldworku, 0, u, ldu) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + n + + // Compute A = Q * R, copying result to U. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + + // Generate Q in U. + impl.Dorgqr(m, n, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) + + // Copy R to VT, zeroing out below it. + impl.Dlacpy(blas.Upper, n, n, a, lda, vt, ldvt) + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, vt[ldvt:], ldvt) + + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Bidiagonalize R in VT. + impl.Dgebrd(n, n, vt, ldvt, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply Q in U by left bidiagonalizing vectors in VT. + impl.Dormbr(lapack.ApplyQ, blas.Right, blas.NoTrans, m, n, n, + vt, ldvt, work[itauq:], u, ldu, work[iwork:], lwork-iwork) + + // Generate right bidiagonalizing vectors in VT. + impl.Dorgbr(lapack.GeneratePT, n, n, n, vt, ldvt, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left singular + // vectors of A in U and computing right singular vectors + // of A in VT. + ok = impl.Dbdsqr(blas.Upper, n, n, m, 0, s, work[ie:], + vt, ldvt, u, ldu, work, 1, work[iwork:]) + } + } + } else if wantua { + if wantvn { + // Path 7 + if lwork >= n*n+max(max(n+m, 4*n), bdspac) { + // Sufficient workspace for a fast algorithm. + ir := 0 + var ldworkr int + if lwork >= wrkbl+lda*n { + ldworkr = lda + } else { + ldworkr = n + } + itau := ir + ldworkr*n + iwork := itau + n + + // Compute A = Q*R, copying result to U. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + + // Copy R to work[ir:], zeroing out below it. + impl.Dlacpy(blas.Upper, n, n, a, lda, work[ir:], ldworkr) + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, work[ir+ldworkr:], ldworkr) + + // Generate Q in U. + impl.Dorgqr(m, m, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Bidiagonalize R in work[ir:]. + impl.Dgebrd(n, n, work[ir:], ldworkr, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Generate left bidiagonalizing vectors in work[ir:]. + impl.Dorgbr(lapack.GenerateQ, n, n, n, work[ir:], ldworkr, + work[itauq:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left singular + // vectors of R in work[ir:]. + ok = impl.Dbdsqr(blas.Upper, n, 0, n, 0, s, work[ie:], work, 1, + work[ir:], ldworkr, work, 1, work[iwork:]) + + // Multiply Q in U by left singular vectors of R in + // work[ir:], storing result in A. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, n, 1, u, ldu, + work[ir:], ldworkr, 0, a, lda) + + // Copy left singular vectors of A from A to U. + impl.Dlacpy(blas.All, m, n, a, lda, u, ldu) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + n + + // Compute A = Q*R, copying result to U. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + + // Generate Q in U. + impl.Dorgqr(m, m, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Zero out below R in A. + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, a[lda:], lda) + + // Bidiagonalize R in A. + impl.Dgebrd(n, n, a, lda, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply Q in U by left bidiagonalizing vectors in A. + impl.Dormbr(lapack.ApplyQ, blas.Right, blas.NoTrans, m, n, n, + a, lda, work[itauq:], u, ldu, work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left + // singular vectors of A in U. + ok = impl.Dbdsqr(blas.Upper, n, 0, m, 0, s, work[ie:], + work, 1, u, ldu, work, 1, work[iwork:]) + } + } else if wantvo { + // Path 8. + panic(noSVDO) + } else if wantvas { + // Path 9. + if lwork >= n*n+max(max(n+m, 4*n), bdspac) { + // Sufficient workspace for a fast algorithm. + iu := 0 + var ldworku int + if lwork >= wrkbl+lda*n { + ldworku = lda + } else { + ldworku = n + } + itau := iu + ldworku*n + iwork := itau + n + + // Compute A = Q * R, copying result to U. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + + // Generate Q in U. + impl.Dorgqr(m, m, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) + + // Copy R to work[iu:], zeroing out below it. + impl.Dlacpy(blas.Upper, n, n, a, lda, work[iu:], ldworku) + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, work[iu+ldworku:], ldworku) + + ie = itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Bidiagonalize R in work[iu:], copying result to VT. + impl.Dgebrd(n, n, work[iu:], ldworku, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, n, n, work[iu:], ldworku, vt, ldvt) + + // Generate left bidiagonalizing vectors in work[iu:]. + impl.Dorgbr(lapack.GenerateQ, n, n, n, work[iu:], ldworku, + work[itauq:], work[iwork:], lwork-iwork) + + // Generate right bidiagonalizing vectors in VT. + impl.Dorgbr(lapack.GeneratePT, n, n, n, vt, ldvt, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left singular + // vectors of R in work[iu:] and computing right + // singular vectors of R in VT. + ok = impl.Dbdsqr(blas.Upper, n, n, n, 0, s, work[ie:], + vt, ldvt, work[iu:], ldworku, work, 1, work[iwork:]) + + // Multiply Q in U by left singular vectors of R in + // work[iu:], storing result in A. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, n, 1, + u, ldu, work[iu:], ldworku, 0, a, lda) + + // Copy left singular vectors of A from A to U. + impl.Dlacpy(blas.All, m, n, a, lda, u, ldu) + + /* + // Bidiagonalize R in VT. + impl.Dgebrd(n, n, vt, ldvt, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply Q in U by left bidiagonalizing vectors in VT. + impl.Dormbr(lapack.ApplyQ, blas.Right, blas.NoTrans, + m, n, n, vt, ldvt, work[itauq:], u, ldu, work[iwork:], lwork-iwork) + + // Generate right bidiagonalizing vectors in VT. + impl.Dorgbr(lapack.GeneratePT, n, n, n, vt, ldvt, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left singular + // vectors of A in U and computing right singular vectors + // of A in VT. + ok = impl.Dbdsqr(blas.Upper, n, n, m, 0, s, work[ie:], + vt, ldvt, u, ldu, work, 1, work[iwork:]) + */ + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + n + + // Compute A = Q*R, copying result to U. + impl.Dgeqrf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + + // Generate Q in U. + impl.Dorgqr(m, m, n, u, ldu, work[itau:], work[iwork:], lwork-iwork) + + // Copy R from A to VT, zeroing out below it. + impl.Dlacpy(blas.Upper, n, n, a, lda, vt, ldvt) + if n > 1 { + impl.Dlaset(blas.Lower, n-1, n-1, 0, 0, vt[ldvt:], ldvt) + } + + ie := itau + itauq := ie + n + itaup := itauq + n + iwork = itaup + n + + // Bidiagonalize R in VT. + impl.Dgebrd(n, n, vt, ldvt, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply Q in U by left bidiagonalizing vectors in VT. + impl.Dormbr(lapack.ApplyQ, blas.Right, blas.NoTrans, + m, n, n, vt, ldvt, work[itauq:], u, ldu, work[iwork:], lwork-iwork) + + // Generate right bidiagonizing vectors in VT. + impl.Dorgbr(lapack.GeneratePT, n, n, n, vt, ldvt, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + n + + // Perform bidiagonal QR iteration, computing left singular + // vectors of A in U and computing right singular vectors + // of A in VT. + ok = impl.Dbdsqr(blas.Upper, n, n, m, 0, s, work[ie:], + vt, ldvt, u, ldu, work, 1, work[iwork:]) + } + } + } + } else { + // Path 10. + // M at least N, but not much larger. + ie = 0 + itauq := ie + n + itaup := itauq + n + iwork := itaup + n + + // Bidiagonalize A. + impl.Dgebrd(m, n, a, lda, s, work[ie:], work[itauq:], + work[itaup:], work[iwork:], lwork-iwork) + if wantuas { + // Left singular vectors are desired in U. Copy result to U and + // generate left biadiagonalizing vectors in U. + impl.Dlacpy(blas.Lower, m, n, a, lda, u, ldu) + var ncu int + if wantus { + ncu = n + } + if wantua { + ncu = m + } + impl.Dorgbr(lapack.GenerateQ, m, ncu, n, u, ldu, work[itauq:], work[iwork:], lwork-iwork) + } + if wantvas { + // Right singular vectors are desired in VT. Copy result to VT and + // generate left biadiagonalizing vectors in VT. + impl.Dlacpy(blas.Upper, n, n, a, lda, vt, ldvt) + impl.Dorgbr(lapack.GeneratePT, n, n, n, vt, ldvt, work[itaup:], work[iwork:], lwork-iwork) + } + if wantuo { + panic(noSVDO) + } + if wantvo { + panic(noSVDO) + } + iwork = ie + n + var nru, ncvt int + if wantuas || wantuo { + nru = m + } + if wantun { + nru = 0 + } + if wantvas || wantvo { + ncvt = n + } + if wantvn { + ncvt = 0 + } + if !wantuo && !wantvo { + // Perform bidiagonal QR iteration, if desired, computing left + // singular vectors in U and right singular vectors in VT. + ok = impl.Dbdsqr(blas.Upper, n, ncvt, nru, 0, s, work[ie:], + vt, ldvt, u, ldu, work, 1, work[iwork:]) + } else { + // There will be two branches when the implementation is complete. + panic(noSVDO) + } + } + } else { + // A has more columns than rows. If A has sufficiently more columns than + // rows, first reduce using the LQ decomposition. + if n >= mnthr { + // n >> m. + if wantvn { + // Path 1t. + itau := 0 + iwork := itau + m + + // Compute A = L*Q. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + // Zero out above L. + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, a[1:], lda) + ie := 0 + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in A. + impl.Dgebrd(m, m, a, lda, s, work[ie:itauq], + work[itauq:itaup], work[itaup:iwork], work[iwork:], lwork-iwork) + if wantuo || wantuas { + impl.Dorgbr(lapack.GenerateQ, m, m, m, a, lda, + work[itauq:], work[iwork:], lwork-iwork) + } + iwork = ie + m + nru := 0 + if wantuo || wantuas { + nru = m + } + + // Perform bidiagonal QR iteration, computing left singular vectors + // of A in A if desired. + ok = impl.Dbdsqr(blas.Upper, m, 0, nru, 0, s, work[ie:], + work, 1, a, lda, work, 1, work[iwork:]) + + // If left singular vectors desired in U, copy them there. + if wantuas { + impl.Dlacpy(blas.All, m, m, a, lda, u, ldu) + } + } else if wantvo && wantun { + // Path 2t. + panic(noSVDO) + } else if wantvo && wantuas { + // Path 3t. + panic(noSVDO) + } else if wantvs { + if wantun { + // Path 4t. + if lwork >= m*m+max(4*m, bdspac) { + // Sufficient workspace for a fast algorithm. + ir := 0 + var ldworkr int + if lwork >= wrkbl+lda*m { + ldworkr = lda + } else { + ldworkr = m + } + itau := ir + ldworkr*m + iwork := itau + m + + // Compute A = L*Q. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + // Copy L to work[ir:], zeroing out above it. + impl.Dlacpy(blas.Lower, m, m, a, lda, work[ir:], ldworkr) + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, work[ir+1:], ldworkr) + + // Generate Q in A. + impl.Dorglq(m, n, m, a, lda, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in work[ir:]. + impl.Dgebrd(m, m, work[ir:], ldworkr, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Generate right vectors bidiagonalizing L in work[ir:]. + impl.Dorgbr(lapack.GeneratePT, m, m, m, work[ir:], ldworkr, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing right singular + // vectors of L in work[ir:]. + ok = impl.Dbdsqr(blas.Upper, m, m, 0, 0, s, work[ie:], + work[ir:], ldworkr, work, 1, work, 1, work[iwork:]) + + // Multiply right singular vectors of L in work[ir:] by + // Q in A, storing result in VT. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, m, 1, + work[ir:], ldworkr, a, lda, 0, vt, ldvt) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + m + + // Compute A = L*Q. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + // Copy result to VT. + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + + // Generate Q in VT. + impl.Dorglq(m, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Zero out above L in A. + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, a[1:], lda) + + // Bidiagonalize L in A. + impl.Dgebrd(m, m, a, lda, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply right vectors bidiagonalizing L by Q in VT. + impl.Dormbr(lapack.ApplyP, blas.Left, blas.Trans, m, n, m, + a, lda, work[itaup:], vt, ldvt, work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing right + // singular vectors of A in VT. + ok = impl.Dbdsqr(blas.Upper, m, n, 0, 0, s, work[ie:], + vt, ldvt, work, 1, work, 1, work[iwork:]) + } + } else if wantuo { + // Path 5t. + panic(noSVDO) + } else if wantuas { + // Path 6t. + if lwork >= m*m+max(4*m, bdspac) { + // Sufficient workspace for a fast algorithm. + iu := 0 + var ldworku int + if lwork >= wrkbl+lda*m { + ldworku = lda + } else { + ldworku = m + } + itau := iu + ldworku*m + iwork := itau + m + + // Compute A = L*Q. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + + // Copy L to work[iu:], zeroing out above it. + impl.Dlacpy(blas.Lower, m, m, a, lda, work[iu:], ldworku) + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, work[iu+1:], ldworku) + + // Generate Q in A. + impl.Dorglq(m, n, m, a, lda, work[itau:], work[iwork:], lwork-iwork) + ie := itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in work[iu:], copying result to U. + impl.Dgebrd(m, m, work[iu:], ldworku, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, m, work[iu:], ldworku, u, ldu) + + // Generate right bidiagionalizing vectors in work[iu:]. + impl.Dorgbr(lapack.GeneratePT, m, m, m, work[iu:], ldworku, + work[itaup:], work[iwork:], lwork-iwork) + + // Generate left bidiagonalizing vectors in U. + impl.Dorgbr(lapack.GenerateQ, m, m, m, u, ldu, work[itauq:], work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing left singular + // vectors of L in U and computing right singular vectors of + // L in work[iu:]. + ok = impl.Dbdsqr(blas.Upper, m, m, m, 0, s, work[ie:], + work[iu:], ldworku, u, ldu, work, 1, work[iwork:]) + + // Multiply right singular vectors of L in work[iu:] by + // Q in A, storing result in VT. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, m, 1, + work[iu:], ldworku, a, lda, 0, vt, ldvt) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + m + + // Compute A = L*Q, copying result to VT. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + + // Generate Q in VT. + impl.Dorglq(m, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) + + // Copy L to U, zeroing out above it. + impl.Dlacpy(blas.Lower, m, m, a, lda, u, ldu) + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, u[1:], ldu) + + ie := itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in U. + impl.Dgebrd(m, m, u, ldu, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Multiply right bidiagonalizing vectors in U by Q in VT. + impl.Dormbr(lapack.ApplyP, blas.Left, blas.Trans, m, n, m, + u, ldu, work[itaup:], vt, ldvt, work[iwork:], lwork-iwork) + + // Generate left bidiagonalizing vectors in U. + impl.Dorgbr(lapack.GenerateQ, m, m, m, u, ldu, work[itauq:], work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing left singular + // vectors of A in U and computing right singular vectors + // of A in VT. + ok = impl.Dbdsqr(blas.Upper, m, n, m, 0, s, work[ie:], vt, ldvt, + u, ldu, work, 1, work[iwork:]) + } + } + } else if wantva { + if wantun { + // Path 7t. + if lwork >= m*m+max(max(n+m, 4*m), bdspac) { + // Sufficient workspace for a fast algorithm. + ir := 0 + var ldworkr int + if lwork >= wrkbl+lda*m { + ldworkr = lda + } else { + ldworkr = m + } + itau := ir + ldworkr*m + iwork := itau + m + + // Compute A = L*Q, copying result to VT. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + + // Copy L to work[ir:], zeroing out above it. + impl.Dlacpy(blas.Lower, m, m, a, lda, work[ir:], ldworkr) + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, work[ir+1:], ldworkr) + + // Generate Q in VT. + impl.Dorglq(n, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) + + ie := itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in work[ir:]. + impl.Dgebrd(m, m, work[ir:], ldworkr, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + + // Generate right bidiagonalizing vectors in work[ir:]. + impl.Dorgbr(lapack.GeneratePT, m, m, m, work[ir:], ldworkr, + work[itaup:], work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing right + // singular vectors of L in work[ir:]. + ok = impl.Dbdsqr(blas.Upper, m, m, 0, 0, s, work[ie:], + work[ir:], ldworkr, work, 1, work, 1, work[iwork:]) + + // Multiply right singular vectors of L in work[ir:] by + // Q in VT, storing result in A. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, m, 1, + work[ir:], ldworkr, vt, ldvt, 0, a, lda) + + // Copy right singular vectors of A from A to VT. + impl.Dlacpy(blas.All, m, n, a, lda, vt, ldvt) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + m + // Compute A = L * Q, copying result to VT. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + + // Generate Q in VT. + impl.Dorglq(n, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) + + ie := itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Zero out above L in A. + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, a[1:], lda) + + // Bidiagonalize L in A. + impl.Dgebrd(m, m, a, lda, s, work[ie:], work[itauq:], + work[itaup:], work[iwork:], lwork-iwork) + + // Multiply right bidiagonalizing vectors in A by Q in VT. + impl.Dormbr(lapack.ApplyP, blas.Left, blas.Trans, m, n, m, + a, lda, work[itaup:], vt, ldvt, work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing right singular + // vectors of A in VT. + ok = impl.Dbdsqr(blas.Upper, m, n, 0, 0, s, work[ie:], + vt, ldvt, work, 1, work, 1, work[iwork:]) + } + } else if wantuo { + panic(noSVDO) + } else if wantuas { + // Path 9t. + if lwork >= m*m+max(max(m+n, 4*m), bdspac) { + // Sufficient workspace for a fast algorithm. + iu := 0 + + var ldworku int + if lwork >= wrkbl+lda*m { + ldworku = lda + } else { + ldworku = m + } + itau := iu + ldworku*m + iwork := itau + m + + // Generate A = L * Q copying result to VT. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + + // Generate Q in VT. + impl.Dorglq(n, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) + + // Copy L to work[iu:], zeroing out above it. + impl.Dlacpy(blas.Lower, m, m, a, lda, work[iu:], ldworku) + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, work[iu+1:], ldworku) + ie = itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in work[iu:], copying result to U. + impl.Dgebrd(m, m, work[iu:], ldworku, s, work[ie:], + work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Lower, m, m, work[iu:], ldworku, u, ldu) + + // Generate right bidiagonalizing vectors in work[iu:]. + impl.Dorgbr(lapack.GeneratePT, m, m, m, work[iu:], ldworku, + work[itaup:], work[iwork:], lwork-iwork) + + // Generate left bidiagonalizing vectors in U. + impl.Dorgbr(lapack.GenerateQ, m, m, m, u, ldu, work[itauq:], work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing left singular + // vectors of L in U and computing right singular vectors + // of L in work[iu:]. + ok = impl.Dbdsqr(blas.Upper, m, m, m, 0, s, work[ie:], + work[iu:], ldworku, u, ldu, work, 1, work[iwork:]) + + // Multiply right singular vectors of L in work[iu:] + // Q in VT, storing result in A. + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n, m, 1, + work[iu:], ldworku, vt, ldvt, 0, a, lda) + + // Copy right singular vectors of A from A to VT. + impl.Dlacpy(blas.All, m, n, a, lda, vt, ldvt) + } else { + // Insufficient workspace for a fast algorithm. + itau := 0 + iwork := itau + m + + // Compute A = L * Q, copying result to VT. + impl.Dgelqf(m, n, a, lda, work[itau:], work[iwork:], lwork-iwork) + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + + // Generate Q in VT. + impl.Dorglq(n, n, m, vt, ldvt, work[itau:], work[iwork:], lwork-iwork) + + // Copy L to U, zeroing out above it. + impl.Dlacpy(blas.Lower, m, m, a, lda, u, ldu) + impl.Dlaset(blas.Upper, m-1, m-1, 0, 0, u[1:], ldu) + + ie = itau + itauq := ie + m + itaup := itauq + m + iwork = itaup + m + + // Bidiagonalize L in U. + impl.Dgebrd(m, m, u, ldu, s, work[ie:], work[itauq:], + work[itaup:], work[iwork:], lwork-iwork) + + // Multiply right bidiagonalizing vectors in U by Q in VT. + impl.Dormbr(lapack.ApplyP, blas.Left, blas.Trans, m, n, m, + u, ldu, work[itaup:], vt, ldvt, work[iwork:], lwork-iwork) + + // Generate left bidiagonalizing vectors in U. + impl.Dorgbr(lapack.GenerateQ, m, m, m, u, ldu, work[itauq:], work[iwork:], lwork-iwork) + iwork = ie + m + + // Perform bidiagonal QR iteration, computing left singular + // vectors of A in U and computing right singular vectors + // of A in VT. + ok = impl.Dbdsqr(blas.Upper, m, n, m, 0, s, work[ie:], + vt, ldvt, u, ldu, work, 1, work[iwork:]) + } + } + } + } else { + // Path 10t. + // N at least M, but not much larger. + ie = 0 + itauq := ie + m + itaup := itauq + m + iwork := itaup + m + + // Bidiagonalize A. + impl.Dgebrd(m, n, a, lda, s, work[ie:], work[itauq:], work[itaup:], work[iwork:], lwork-iwork) + if wantuas { + // If left singular vectors desired in U, copy result to U and + // generate left bidiagonalizing vectors in U. + impl.Dlacpy(blas.Lower, m, m, a, lda, u, ldu) + impl.Dorgbr(lapack.GenerateQ, m, m, n, u, ldu, work[itauq:], work[iwork:], lwork-iwork) + } + if wantvas { + // If right singular vectors desired in VT, copy result to VT + // and generate right bidiagonalizing vectors in VT. + impl.Dlacpy(blas.Upper, m, n, a, lda, vt, ldvt) + var nrvt int + if wantva { + nrvt = n + } else { + nrvt = m + } + impl.Dorgbr(lapack.GeneratePT, nrvt, n, m, vt, ldvt, work[itaup:], work[iwork:], lwork-iwork) + } + if wantuo { + panic(noSVDO) + } + if wantvo { + panic(noSVDO) + } + iwork = ie + m + var nru, ncvt int + if wantuas || wantuo { + nru = m + } + if wantvas || wantvo { + ncvt = n + } + if !wantuo && !wantvo { + // Perform bidiagonal QR iteration, if desired, computing left + // singular vectors in U and computing right singular vectors in + // VT. + ok = impl.Dbdsqr(blas.Lower, m, ncvt, nru, 0, s, work[ie:], + vt, ldvt, u, ldu, work, 1, work[iwork:]) + } else { + // There will be two branches when the implementation is complete. + panic(noSVDO) + } + } + } + if !ok { + if ie > 1 { + for i := 0; i < minmn-1; i++ { + work[i+1] = work[i+ie] + } + } + if ie < 1 { + for i := minmn - 2; i >= 0; i-- { + work[i+1] = work[i+ie] + } + } + } + // Undo scaling if necessary. + if iscl { + if anrm > bignum { + impl.Dlascl(lapack.General, 0, 0, bignum, anrm, 1, minmn, s, minmn) + } + if !ok && anrm > bignum { + impl.Dlascl(lapack.General, 0, 0, bignum, anrm, 1, minmn-1, work[1:], minmn) + } + if anrm < smlnum { + impl.Dlascl(lapack.General, 0, 0, smlnum, anrm, 1, minmn, s, minmn) + } + if !ok && anrm < smlnum { + impl.Dlascl(lapack.General, 0, 0, smlnum, anrm, 1, minmn-1, work[1:], minmn) + } + } + work[0] = float64(maxwrk) + return ok +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgetf2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgetf2.go new file mode 100644 index 0000000000..63ad72e99e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgetf2.go @@ -0,0 +1,84 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" +) + +// Dgetf2 computes the LU decomposition of the m×n matrix A. +// The LU decomposition is a factorization of a into +// A = P * L * U +// where P is a permutation matrix, L is a unit lower triangular matrix, and +// U is a (usually) non-unit upper triangular matrix. On exit, L and U are stored +// in place into a. +// +// ipiv is a permutation vector. It indicates that row i of the matrix was +// changed with ipiv[i]. ipiv must have length at least min(m,n), and will panic +// otherwise. ipiv is zero-indexed. +// +// Dgetf2 returns whether the matrix A is singular. The LU decomposition will +// be computed regardless of the singularity of A, but division by zero +// will occur if the false is returned and the result is used to solve a +// system of equations. +// +// Dgetf2 is an internal routine. It is exported for testing purposes. +func (Implementation) Dgetf2(m, n int, a []float64, lda int, ipiv []int) (ok bool) { + mn := min(m, n) + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if mn == 0 { + return true + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(ipiv) != mn: + panic(badLenIpiv) + } + + bi := blas64.Implementation() + + sfmin := dlamchS + ok = true + for j := 0; j < mn; j++ { + // Find a pivot and test for singularity. + jp := j + bi.Idamax(m-j, a[j*lda+j:], lda) + ipiv[j] = jp + if a[jp*lda+j] == 0 { + ok = false + } else { + // Swap the rows if necessary. + if jp != j { + bi.Dswap(n, a[j*lda:], 1, a[jp*lda:], 1) + } + if j < m-1 { + aj := a[j*lda+j] + if math.Abs(aj) >= sfmin { + bi.Dscal(m-j-1, 1/aj, a[(j+1)*lda+j:], lda) + } else { + for i := 0; i < m-j-1; i++ { + a[(j+1)*lda+j] = a[(j+1)*lda+j] / a[lda*j+j] + } + } + } + } + if j < mn-1 { + bi.Dger(m-j-1, n-j-1, -1, a[(j+1)*lda+j:], lda, a[j*lda+j+1:], 1, a[(j+1)*lda+j+1:], lda) + } + } + return ok +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrf.go new file mode 100644 index 0000000000..ad01e71e4e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrf.go @@ -0,0 +1,85 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dgetrf computes the LU decomposition of the m×n matrix A. +// The LU decomposition is a factorization of A into +// A = P * L * U +// where P is a permutation matrix, L is a unit lower triangular matrix, and +// U is a (usually) non-unit upper triangular matrix. On exit, L and U are stored +// in place into a. +// +// ipiv is a permutation vector. It indicates that row i of the matrix was +// changed with ipiv[i]. ipiv must have length at least min(m,n), and will panic +// otherwise. ipiv is zero-indexed. +// +// Dgetrf is the blocked version of the algorithm. +// +// Dgetrf returns whether the matrix A is singular. The LU decomposition will +// be computed regardless of the singularity of A, but division by zero +// will occur if the false is returned and the result is used to solve a +// system of equations. +func (impl Implementation) Dgetrf(m, n int, a []float64, lda int, ipiv []int) (ok bool) { + mn := min(m, n) + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if mn == 0 { + return true + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(ipiv) != mn: + panic(badLenIpiv) + } + + bi := blas64.Implementation() + + nb := impl.Ilaenv(1, "DGETRF", " ", m, n, -1, -1) + if nb <= 1 || mn <= nb { + // Use the unblocked algorithm. + return impl.Dgetf2(m, n, a, lda, ipiv) + } + ok = true + for j := 0; j < mn; j += nb { + jb := min(mn-j, nb) + blockOk := impl.Dgetf2(m-j, jb, a[j*lda+j:], lda, ipiv[j:j+jb]) + if !blockOk { + ok = false + } + for i := j; i <= min(m-1, j+jb-1); i++ { + ipiv[i] = j + ipiv[i] + } + impl.Dlaswp(j, a, lda, j, j+jb-1, ipiv[:j+jb], 1) + if j+jb < n { + impl.Dlaswp(n-j-jb, a[j+jb:], lda, j, j+jb-1, ipiv[:j+jb], 1) + bi.Dtrsm(blas.Left, blas.Lower, blas.NoTrans, blas.Unit, + jb, n-j-jb, 1, + a[j*lda+j:], lda, + a[j*lda+j+jb:], lda) + if j+jb < m { + bi.Dgemm(blas.NoTrans, blas.NoTrans, m-j-jb, n-j-jb, jb, -1, + a[(j+jb)*lda+j:], lda, + a[j*lda+j+jb:], lda, + 1, a[(j+jb)*lda+j+jb:], lda) + } + } + } + return ok +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgetri.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgetri.go new file mode 100644 index 0000000000..b2f2ae46b9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgetri.go @@ -0,0 +1,116 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dgetri computes the inverse of the matrix A using the LU factorization computed +// by Dgetrf. On entry, a contains the PLU decomposition of A as computed by +// Dgetrf and on exit contains the reciprocal of the original matrix. +// +// Dgetri will not perform the inversion if the matrix is singular, and returns +// a boolean indicating whether the inversion was successful. +// +// work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= n and this function will panic otherwise. +// Dgetri is a blocked inversion, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Dgetri, +// the optimal work length will be stored into work[0]. +func (impl Implementation) Dgetri(n int, a []float64, lda int, ipiv []int, work []float64, lwork int) (ok bool) { + iws := max(1, n) + switch { + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case lwork < iws && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + if n == 0 { + work[0] = 1 + return true + } + + nb := impl.Ilaenv(1, "DGETRI", " ", n, -1, -1, -1) + if lwork == -1 { + work[0] = float64(n * nb) + return true + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(ipiv) != n: + panic(badLenIpiv) + } + + // Form inv(U). + ok = impl.Dtrtri(blas.Upper, blas.NonUnit, n, a, lda) + if !ok { + return false + } + + nbmin := 2 + if 1 < nb && nb < n { + iws = max(n*nb, 1) + if lwork < iws { + nb = lwork / n + nbmin = max(2, impl.Ilaenv(2, "DGETRI", " ", n, -1, -1, -1)) + } + } + ldwork := nb + + bi := blas64.Implementation() + // Solve the equation inv(A)*L = inv(U) for inv(A). + // TODO(btracey): Replace this with a more row-major oriented algorithm. + if nb < nbmin || n <= nb { + // Unblocked code. + for j := n - 1; j >= 0; j-- { + for i := j + 1; i < n; i++ { + // Copy current column of L to work and replace with zeros. + work[i] = a[i*lda+j] + a[i*lda+j] = 0 + } + // Compute current column of inv(A). + if j < n-1 { + bi.Dgemv(blas.NoTrans, n, n-j-1, -1, a[(j+1):], lda, work[(j+1):], 1, 1, a[j:], lda) + } + } + } else { + // Blocked code. + nn := ((n - 1) / nb) * nb + for j := nn; j >= 0; j -= nb { + jb := min(nb, n-j) + // Copy current block column of L to work and replace + // with zeros. + for jj := j; jj < j+jb; jj++ { + for i := jj + 1; i < n; i++ { + work[i*ldwork+(jj-j)] = a[i*lda+jj] + a[i*lda+jj] = 0 + } + } + // Compute current block column of inv(A). + if j+jb < n { + bi.Dgemm(blas.NoTrans, blas.NoTrans, n, jb, n-j-jb, -1, a[(j+jb):], lda, work[(j+jb)*ldwork:], ldwork, 1, a[j:], lda) + } + bi.Dtrsm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, n, jb, 1, work[j*ldwork:], ldwork, a[j:], lda) + } + } + // Apply column interchanges. + for j := n - 2; j >= 0; j-- { + jp := ipiv[j] + if jp != j { + bi.Dswap(n, a[j:], lda, a[jp:], lda) + } + } + work[0] = float64(iws) + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrs.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrs.go new file mode 100644 index 0000000000..ecc20d7c96 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dgetrs.go @@ -0,0 +1,72 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dgetrs solves a system of equations using an LU factorization. +// The system of equations solved is +// A * X = B if trans == blas.Trans +// A^T * X = B if trans == blas.NoTrans +// A is a general n×n matrix with stride lda. B is a general matrix of size n×nrhs. +// +// On entry b contains the elements of the matrix B. On exit, b contains the +// elements of X, the solution to the system of equations. +// +// a and ipiv contain the LU factorization of A and the permutation indices as +// computed by Dgetrf. ipiv is zero-indexed. +func (impl Implementation) Dgetrs(trans blas.Transpose, n, nrhs int, a []float64, lda int, ipiv []int, b []float64, ldb int) { + switch { + case trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans: + panic(badTrans) + case n < 0: + panic(nLT0) + case nrhs < 0: + panic(nrhsLT0) + case lda < max(1, n): + panic(badLdA) + case ldb < max(1, nrhs): + panic(badLdB) + } + + // Quick return if possible. + if n == 0 || nrhs == 0 { + return + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(b) < (n-1)*ldb+nrhs: + panic(shortB) + case len(ipiv) != n: + panic(badLenIpiv) + } + + bi := blas64.Implementation() + + if trans == blas.NoTrans { + // Solve A * X = B. + impl.Dlaswp(nrhs, b, ldb, 0, n-1, ipiv, 1) + // Solve L * X = B, updating b. + bi.Dtrsm(blas.Left, blas.Lower, blas.NoTrans, blas.Unit, + n, nrhs, 1, a, lda, b, ldb) + // Solve U * X = B, updating b. + bi.Dtrsm(blas.Left, blas.Upper, blas.NoTrans, blas.NonUnit, + n, nrhs, 1, a, lda, b, ldb) + return + } + // Solve A^T * X = B. + // Solve U^T * X = B, updating b. + bi.Dtrsm(blas.Left, blas.Upper, blas.Trans, blas.NonUnit, + n, nrhs, 1, a, lda, b, ldb) + // Solve L^T * X = B, updating b. + bi.Dtrsm(blas.Left, blas.Lower, blas.Trans, blas.Unit, + n, nrhs, 1, a, lda, b, ldb) + impl.Dlaswp(nrhs, b, ldb, 0, n-1, ipiv, -1) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvd3.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvd3.go new file mode 100644 index 0000000000..ac234dce33 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvd3.go @@ -0,0 +1,242 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dggsvd3 computes the generalized singular value decomposition (GSVD) +// of an m×n matrix A and p×n matrix B: +// U^T*A*Q = D1*[ 0 R ] +// +// V^T*B*Q = D2*[ 0 R ] +// where U, V and Q are orthogonal matrices. +// +// Dggsvd3 returns k and l, the dimensions of the sub-blocks. k+l +// is the effective numerical rank of the (m+p)×n matrix [ A^T B^T ]^T. +// R is a (k+l)×(k+l) nonsingular upper triangular matrix, D1 and +// D2 are m×(k+l) and p×(k+l) diagonal matrices and of the following +// structures, respectively: +// +// If m-k-l >= 0, +// +// k l +// D1 = k [ I 0 ] +// l [ 0 C ] +// m-k-l [ 0 0 ] +// +// k l +// D2 = l [ 0 S ] +// p-l [ 0 0 ] +// +// n-k-l k l +// [ 0 R ] = k [ 0 R11 R12 ] k +// l [ 0 0 R22 ] l +// +// where +// +// C = diag( alpha_k, ... , alpha_{k+l} ), +// S = diag( beta_k, ... , beta_{k+l} ), +// C^2 + S^2 = I. +// +// R is stored in +// A[0:k+l, n-k-l:n] +// on exit. +// +// If m-k-l < 0, +// +// k m-k k+l-m +// D1 = k [ I 0 0 ] +// m-k [ 0 C 0 ] +// +// k m-k k+l-m +// D2 = m-k [ 0 S 0 ] +// k+l-m [ 0 0 I ] +// p-l [ 0 0 0 ] +// +// n-k-l k m-k k+l-m +// [ 0 R ] = k [ 0 R11 R12 R13 ] +// m-k [ 0 0 R22 R23 ] +// k+l-m [ 0 0 0 R33 ] +// +// where +// C = diag( alpha_k, ... , alpha_m ), +// S = diag( beta_k, ... , beta_m ), +// C^2 + S^2 = I. +// +// R = [ R11 R12 R13 ] is stored in A[1:m, n-k-l+1:n] +// [ 0 R22 R23 ] +// and R33 is stored in +// B[m-k:l, n+m-k-l:n] on exit. +// +// Dggsvd3 computes C, S, R, and optionally the orthogonal transformation +// matrices U, V and Q. +// +// jobU, jobV and jobQ are options for computing the orthogonal matrices. The behavior +// is as follows +// jobU == lapack.GSVDU Compute orthogonal matrix U +// jobU == lapack.GSVDNone Do not compute orthogonal matrix. +// The behavior is the same for jobV and jobQ with the exception that instead of +// lapack.GSVDU these accept lapack.GSVDV and lapack.GSVDQ respectively. +// The matrices U, V and Q must be m×m, p×p and n×n respectively unless the +// relevant job parameter is lapack.GSVDNone. +// +// alpha and beta must have length n or Dggsvd3 will panic. On exit, alpha and +// beta contain the generalized singular value pairs of A and B +// alpha[0:k] = 1, +// beta[0:k] = 0, +// if m-k-l >= 0, +// alpha[k:k+l] = diag(C), +// beta[k:k+l] = diag(S), +// if m-k-l < 0, +// alpha[k:m]= C, alpha[m:k+l]= 0 +// beta[k:m] = S, beta[m:k+l] = 1. +// if k+l < n, +// alpha[k+l:n] = 0 and +// beta[k+l:n] = 0. +// +// On exit, iwork contains the permutation required to sort alpha descending. +// +// iwork must have length n, work must have length at least max(1, lwork), and +// lwork must be -1 or greater than n, otherwise Dggsvd3 will panic. If +// lwork is -1, work[0] holds the optimal lwork on return, but Dggsvd3 does +// not perform the GSVD. +func (impl Implementation) Dggsvd3(jobU, jobV, jobQ lapack.GSVDJob, m, n, p int, a []float64, lda int, b []float64, ldb int, alpha, beta, u []float64, ldu int, v []float64, ldv int, q []float64, ldq int, work []float64, lwork int, iwork []int) (k, l int, ok bool) { + wantu := jobU == lapack.GSVDU + wantv := jobV == lapack.GSVDV + wantq := jobQ == lapack.GSVDQ + switch { + case !wantu && jobU != lapack.GSVDNone: + panic(badGSVDJob + "U") + case !wantv && jobV != lapack.GSVDNone: + panic(badGSVDJob + "V") + case !wantq && jobQ != lapack.GSVDNone: + panic(badGSVDJob + "Q") + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case p < 0: + panic(pLT0) + case lda < max(1, n): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + case ldu < 1, wantu && ldu < m: + panic(badLdU) + case ldv < 1, wantv && ldv < p: + panic(badLdV) + case ldq < 1, wantq && ldq < n: + panic(badLdQ) + case len(iwork) < n: + panic(shortWork) + case lwork < 1 && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Determine optimal work length. + impl.Dggsvp3(jobU, jobV, jobQ, + m, p, n, + a, lda, + b, ldb, + 0, 0, + u, ldu, + v, ldv, + q, ldq, + iwork, + work, work, -1) + lwkopt := n + int(work[0]) + lwkopt = max(lwkopt, 2*n) + lwkopt = max(lwkopt, 1) + work[0] = float64(lwkopt) + if lwork == -1 { + return 0, 0, true + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(b) < (p-1)*ldb+n: + panic(shortB) + case wantu && len(u) < (m-1)*ldu+m: + panic(shortU) + case wantv && len(v) < (p-1)*ldv+p: + panic(shortV) + case wantq && len(q) < (n-1)*ldq+n: + panic(shortQ) + case len(alpha) != n: + panic(badLenAlpha) + case len(beta) != n: + panic(badLenBeta) + } + + // Compute the Frobenius norm of matrices A and B. + anorm := impl.Dlange(lapack.Frobenius, m, n, a, lda, nil) + bnorm := impl.Dlange(lapack.Frobenius, p, n, b, ldb, nil) + + // Get machine precision and set up threshold for determining + // the effective numerical rank of the matrices A and B. + tola := float64(max(m, n)) * math.Max(anorm, dlamchS) * dlamchP + tolb := float64(max(p, n)) * math.Max(bnorm, dlamchS) * dlamchP + + // Preprocessing. + k, l = impl.Dggsvp3(jobU, jobV, jobQ, + m, p, n, + a, lda, + b, ldb, + tola, tolb, + u, ldu, + v, ldv, + q, ldq, + iwork, + work[:n], work[n:], lwork-n) + + // Compute the GSVD of two upper "triangular" matrices. + _, ok = impl.Dtgsja(jobU, jobV, jobQ, + m, p, n, + k, l, + a, lda, + b, ldb, + tola, tolb, + alpha, beta, + u, ldu, + v, ldv, + q, ldq, + work) + + // Sort the singular values and store the pivot indices in iwork + // Copy alpha to work, then sort alpha in work. + bi := blas64.Implementation() + bi.Dcopy(n, alpha, 1, work[:n], 1) + ibnd := min(l, m-k) + for i := 0; i < ibnd; i++ { + // Scan for largest alpha_{k+i}. + isub := i + smax := work[k+i] + for j := i + 1; j < ibnd; j++ { + if v := work[k+j]; v > smax { + isub = j + smax = v + } + } + if isub != i { + work[k+isub] = work[k+i] + work[k+i] = smax + iwork[k+i] = k + isub + } else { + iwork[k+i] = k + i + } + } + + work[0] = float64(lwkopt) + + return k, l, ok +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvp3.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvp3.go new file mode 100644 index 0000000000..7a9ad9fbf9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dggsvp3.go @@ -0,0 +1,281 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dggsvp3 computes orthogonal matrices U, V and Q such that +// +// n-k-l k l +// U^T*A*Q = k [ 0 A12 A13 ] if m-k-l >= 0; +// l [ 0 0 A23 ] +// m-k-l [ 0 0 0 ] +// +// n-k-l k l +// U^T*A*Q = k [ 0 A12 A13 ] if m-k-l < 0; +// m-k [ 0 0 A23 ] +// +// n-k-l k l +// V^T*B*Q = l [ 0 0 B13 ] +// p-l [ 0 0 0 ] +// +// where the k×k matrix A12 and l×l matrix B13 are non-singular +// upper triangular. A23 is l×l upper triangular if m-k-l >= 0, +// otherwise A23 is (m-k)×l upper trapezoidal. +// +// Dggsvp3 returns k and l, the dimensions of the sub-blocks. k+l +// is the effective numerical rank of the (m+p)×n matrix [ A^T B^T ]^T. +// +// jobU, jobV and jobQ are options for computing the orthogonal matrices. The behavior +// is as follows +// jobU == lapack.GSVDU Compute orthogonal matrix U +// jobU == lapack.GSVDNone Do not compute orthogonal matrix. +// The behavior is the same for jobV and jobQ with the exception that instead of +// lapack.GSVDU these accept lapack.GSVDV and lapack.GSVDQ respectively. +// The matrices U, V and Q must be m×m, p×p and n×n respectively unless the +// relevant job parameter is lapack.GSVDNone. +// +// tola and tolb are the convergence criteria for the Jacobi-Kogbetliantz +// iteration procedure. Generally, they are the same as used in the preprocessing +// step, for example, +// tola = max(m, n)*norm(A)*eps, +// tolb = max(p, n)*norm(B)*eps. +// Where eps is the machine epsilon. +// +// iwork must have length n, work must have length at least max(1, lwork), and +// lwork must be -1 or greater than zero, otherwise Dggsvp3 will panic. +// +// Dggsvp3 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dggsvp3(jobU, jobV, jobQ lapack.GSVDJob, m, p, n int, a []float64, lda int, b []float64, ldb int, tola, tolb float64, u []float64, ldu int, v []float64, ldv int, q []float64, ldq int, iwork []int, tau, work []float64, lwork int) (k, l int) { + wantu := jobU == lapack.GSVDU + wantv := jobV == lapack.GSVDV + wantq := jobQ == lapack.GSVDQ + switch { + case !wantu && jobU != lapack.GSVDNone: + panic(badGSVDJob + "U") + case !wantv && jobV != lapack.GSVDNone: + panic(badGSVDJob + "V") + case !wantq && jobQ != lapack.GSVDNone: + panic(badGSVDJob + "Q") + case m < 0: + panic(mLT0) + case p < 0: + panic(pLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + case ldu < 1, wantu && ldu < m: + panic(badLdU) + case ldv < 1, wantv && ldv < p: + panic(badLdV) + case ldq < 1, wantq && ldq < n: + panic(badLdQ) + case len(iwork) != n: + panic(shortWork) + case lwork < 1 && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + var lwkopt int + impl.Dgeqp3(p, n, b, ldb, iwork, tau, work, -1) + lwkopt = int(work[0]) + if wantv { + lwkopt = max(lwkopt, p) + } + lwkopt = max(lwkopt, min(n, p)) + lwkopt = max(lwkopt, m) + if wantq { + lwkopt = max(lwkopt, n) + } + impl.Dgeqp3(m, n, a, lda, iwork, tau, work, -1) + lwkopt = max(lwkopt, int(work[0])) + lwkopt = max(1, lwkopt) + if lwork == -1 { + work[0] = float64(lwkopt) + return 0, 0 + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(b) < (p-1)*ldb+n: + panic(shortB) + case wantu && len(u) < (m-1)*ldu+m: + panic(shortU) + case wantv && len(v) < (p-1)*ldv+p: + panic(shortV) + case wantq && len(q) < (n-1)*ldq+n: + panic(shortQ) + case len(tau) < n: + // tau check must come after lwkopt query since + // the Dggsvd3 call for lwkopt query may have + // lwork == -1, and tau is provided by work. + panic(shortTau) + } + + const forward = true + + // QR with column pivoting of B: B*P = V*[ S11 S12 ]. + // [ 0 0 ] + for i := range iwork[:n] { + iwork[i] = 0 + } + impl.Dgeqp3(p, n, b, ldb, iwork, tau, work, lwork) + + // Update A := A*P. + impl.Dlapmt(forward, m, n, a, lda, iwork) + + // Determine the effective rank of matrix B. + for i := 0; i < min(p, n); i++ { + if math.Abs(b[i*ldb+i]) > tolb { + l++ + } + } + + if wantv { + // Copy the details of V, and form V. + impl.Dlaset(blas.All, p, p, 0, 0, v, ldv) + if p > 1 { + impl.Dlacpy(blas.Lower, p-1, min(p, n), b[ldb:], ldb, v[ldv:], ldv) + } + impl.Dorg2r(p, p, min(p, n), v, ldv, tau, work) + } + + // Clean up B. + for i := 1; i < l; i++ { + r := b[i*ldb : i*ldb+i] + for j := range r { + r[j] = 0 + } + } + if p > l { + impl.Dlaset(blas.All, p-l, n, 0, 0, b[l*ldb:], ldb) + } + + if wantq { + // Set Q = I and update Q := Q*P. + impl.Dlaset(blas.All, n, n, 0, 1, q, ldq) + impl.Dlapmt(forward, n, n, q, ldq, iwork) + } + + if p >= l && n != l { + // RQ factorization of [ S11 S12 ]: [ S11 S12 ] = [ 0 S12 ]*Z. + impl.Dgerq2(l, n, b, ldb, tau, work) + + // Update A := A*Z^T. + impl.Dormr2(blas.Right, blas.Trans, m, n, l, b, ldb, tau, a, lda, work) + + if wantq { + // Update Q := Q*Z^T. + impl.Dormr2(blas.Right, blas.Trans, n, n, l, b, ldb, tau, q, ldq, work) + } + + // Clean up B. + impl.Dlaset(blas.All, l, n-l, 0, 0, b, ldb) + for i := 1; i < l; i++ { + r := b[i*ldb+n-l : i*ldb+i+n-l] + for j := range r { + r[j] = 0 + } + } + } + + // Let N-L L + // A = [ A11 A12 ] M, + // + // then the following does the complete QR decomposition of A11: + // + // A11 = U*[ 0 T12 ]*P1^T. + // [ 0 0 ] + for i := range iwork[:n-l] { + iwork[i] = 0 + } + impl.Dgeqp3(m, n-l, a, lda, iwork[:n-l], tau, work, lwork) + + // Determine the effective rank of A11. + for i := 0; i < min(m, n-l); i++ { + if math.Abs(a[i*lda+i]) > tola { + k++ + } + } + + // Update A12 := U^T*A12, where A12 = A[0:m, n-l:n]. + impl.Dorm2r(blas.Left, blas.Trans, m, l, min(m, n-l), a, lda, tau, a[n-l:], lda, work) + + if wantu { + // Copy the details of U, and form U. + impl.Dlaset(blas.All, m, m, 0, 0, u, ldu) + if m > 1 { + impl.Dlacpy(blas.Lower, m-1, min(m, n-l), a[lda:], lda, u[ldu:], ldu) + } + impl.Dorg2r(m, m, min(m, n-l), u, ldu, tau, work) + } + + if wantq { + // Update Q[0:n, 0:n-l] := Q[0:n, 0:n-l]*P1. + impl.Dlapmt(forward, n, n-l, q, ldq, iwork[:n-l]) + } + + // Clean up A: set the strictly lower triangular part of + // A[0:k, 0:k] = 0, and A[k:m, 0:n-l] = 0. + for i := 1; i < k; i++ { + r := a[i*lda : i*lda+i] + for j := range r { + r[j] = 0 + } + } + if m > k { + impl.Dlaset(blas.All, m-k, n-l, 0, 0, a[k*lda:], lda) + } + + if n-l > k { + // RQ factorization of [ T11 T12 ] = [ 0 T12 ]*Z1. + impl.Dgerq2(k, n-l, a, lda, tau, work) + + if wantq { + // Update Q[0:n, 0:n-l] := Q[0:n, 0:n-l]*Z1^T. + impl.Dorm2r(blas.Right, blas.Trans, n, n-l, k, a, lda, tau, q, ldq, work) + } + + // Clean up A. + impl.Dlaset(blas.All, k, n-l-k, 0, 0, a, lda) + for i := 1; i < k; i++ { + r := a[i*lda+n-k-l : i*lda+i+n-k-l] + for j := range r { + a[j] = 0 + } + } + } + + if m > k { + // QR factorization of A[k:m, n-l:n]. + impl.Dgeqr2(m-k, l, a[k*lda+n-l:], lda, tau, work) + if wantu { + // Update U[:, k:m) := U[:, k:m]*U1. + impl.Dorm2r(blas.Right, blas.NoTrans, m, m-k, min(m-k, l), a[k*lda+n-l:], lda, tau, u[k:], ldu, work) + } + + // Clean up A. + for i := k + 1; i < m; i++ { + r := a[i*lda+n-l : i*lda+min(n-l+i-k, n)] + for j := range r { + r[j] = 0 + } + } + } + + work[0] = float64(lwkopt) + return k, l +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dhseqr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dhseqr.go new file mode 100644 index 0000000000..ed3fbca851 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dhseqr.go @@ -0,0 +1,252 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dhseqr computes the eigenvalues of an n×n Hessenberg matrix H and, +// optionally, the matrices T and Z from the Schur decomposition +// H = Z T Z^T, +// where T is an n×n upper quasi-triangular matrix (the Schur form), and Z is +// the n×n orthogonal matrix of Schur vectors. +// +// Optionally Z may be postmultiplied into an input orthogonal matrix Q so that +// this routine can give the Schur factorization of a matrix A which has been +// reduced to the Hessenberg form H by the orthogonal matrix Q: +// A = Q H Q^T = (QZ) T (QZ)^T. +// +// If job == lapack.EigenvaluesOnly, only the eigenvalues will be computed. +// If job == lapack.EigenvaluesAndSchur, the eigenvalues and the Schur form T will +// be computed. +// For other values of job Dhseqr will panic. +// +// If compz == lapack.SchurNone, no Schur vectors will be computed and Z will not be +// referenced. +// If compz == lapack.SchurHess, on return Z will contain the matrix of Schur +// vectors of H. +// If compz == lapack.SchurOrig, on entry z is assumed to contain the orthogonal +// matrix Q that is the identity except for the submatrix +// Q[ilo:ihi+1,ilo:ihi+1]. On return z will be updated to the product Q*Z. +// +// ilo and ihi determine the block of H on which Dhseqr operates. It is assumed +// that H is already upper triangular in rows and columns [0:ilo] and [ihi+1:n], +// although it will be only checked that the block is isolated, that is, +// ilo == 0 or H[ilo,ilo-1] == 0, +// ihi == n-1 or H[ihi+1,ihi] == 0, +// and Dhseqr will panic otherwise. ilo and ihi are typically set by a previous +// call to Dgebal, otherwise they should be set to 0 and n-1, respectively. It +// must hold that +// 0 <= ilo <= ihi < n, if n > 0, +// ilo == 0 and ihi == -1, if n == 0. +// +// wr and wi must have length n. +// +// work must have length at least lwork and lwork must be at least max(1,n) +// otherwise Dhseqr will panic. The minimum lwork delivers very good and +// sometimes optimal performance, although lwork as large as 11*n may be +// required. On return, work[0] will contain the optimal value of lwork. +// +// If lwork is -1, instead of performing Dhseqr, the function only estimates the +// optimal workspace size and stores it into work[0]. Neither h nor z are +// accessed. +// +// unconverged indicates whether Dhseqr computed all the eigenvalues. +// +// If unconverged == 0, all the eigenvalues have been computed and their real +// and imaginary parts will be stored on return in wr and wi, respectively. If +// two eigenvalues are computed as a complex conjugate pair, they are stored in +// consecutive elements of wr and wi, say the i-th and (i+1)th, with wi[i] > 0 +// and wi[i+1] < 0. +// +// If unconverged == 0 and job == lapack.EigenvaluesAndSchur, on return H will +// contain the upper quasi-triangular matrix T from the Schur decomposition (the +// Schur form). 2×2 diagonal blocks (corresponding to complex conjugate pairs of +// eigenvalues) will be returned in standard form, with +// H[i,i] == H[i+1,i+1], +// and +// H[i+1,i]*H[i,i+1] < 0. +// The eigenvalues will be stored in wr and wi in the same order as on the +// diagonal of the Schur form returned in H, with +// wr[i] = H[i,i], +// and, if H[i:i+2,i:i+2] is a 2×2 diagonal block, +// wi[i] = sqrt(-H[i+1,i]*H[i,i+1]), +// wi[i+1] = -wi[i]. +// +// If unconverged == 0 and job == lapack.EigenvaluesOnly, the contents of h +// on return is unspecified. +// +// If unconverged > 0, some eigenvalues have not converged, and the blocks +// [0:ilo] and [unconverged:n] of wr and wi will contain those eigenvalues which +// have been successfully computed. Failures are rare. +// +// If unconverged > 0 and job == lapack.EigenvaluesOnly, on return the +// remaining unconverged eigenvalues are the eigenvalues of the upper Hessenberg +// matrix H[ilo:unconverged,ilo:unconverged]. +// +// If unconverged > 0 and job == lapack.EigenvaluesAndSchur, then on +// return +// (initial H) U = U (final H), (*) +// where U is an orthogonal matrix. The final H is upper Hessenberg and +// H[unconverged:ihi+1,unconverged:ihi+1] is upper quasi-triangular. +// +// If unconverged > 0 and compz == lapack.SchurOrig, then on return +// (final Z) = (initial Z) U, +// where U is the orthogonal matrix in (*) regardless of the value of job. +// +// If unconverged > 0 and compz == lapack.SchurHess, then on return +// (final Z) = U, +// where U is the orthogonal matrix in (*) regardless of the value of job. +// +// References: +// [1] R. Byers. LAPACK 3.1 xHSEQR: Tuning and Implementation Notes on the +// Small Bulge Multi-Shift QR Algorithm with Aggressive Early Deflation. +// LAPACK Working Note 187 (2007) +// URL: http://www.netlib.org/lapack/lawnspdf/lawn187.pdf +// [2] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part I: +// Maintaining Well-Focused Shifts and Level 3 Performance. SIAM J. Matrix +// Anal. Appl. 23(4) (2002), pp. 929—947 +// URL: http://dx.doi.org/10.1137/S0895479801384573 +// [3] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part II: +// Aggressive Early Deflation. SIAM J. Matrix Anal. Appl. 23(4) (2002), pp. 948—973 +// URL: http://dx.doi.org/10.1137/S0895479801384585 +// +// Dhseqr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dhseqr(job lapack.SchurJob, compz lapack.SchurComp, n, ilo, ihi int, h []float64, ldh int, wr, wi []float64, z []float64, ldz int, work []float64, lwork int) (unconverged int) { + wantt := job == lapack.EigenvaluesAndSchur + wantz := compz == lapack.SchurHess || compz == lapack.SchurOrig + + switch { + case job != lapack.EigenvaluesOnly && job != lapack.EigenvaluesAndSchur: + panic(badSchurJob) + case compz != lapack.SchurNone && compz != lapack.SchurHess && compz != lapack.SchurOrig: + panic(badSchurComp) + case n < 0: + panic(nLT0) + case ilo < 0 || max(0, n-1) < ilo: + panic(badIlo) + case ihi < min(ilo, n-1) || n <= ihi: + panic(badIhi) + case ldh < max(1, n): + panic(badLdH) + case ldz < 1, wantz && ldz < n: + panic(badLdZ) + case lwork < max(1, n) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if n == 0 { + work[0] = 1 + return 0 + } + + // Quick return in case of a workspace query. + if lwork == -1 { + impl.Dlaqr04(wantt, wantz, n, ilo, ihi, h, ldh, wr, wi, ilo, ihi, z, ldz, work, -1, 1) + work[0] = math.Max(float64(n), work[0]) + return 0 + } + + switch { + case len(h) < (n-1)*ldh+n: + panic(shortH) + case wantz && len(z) < (n-1)*ldz+n: + panic(shortZ) + case len(wr) < n: + panic(shortWr) + case len(wi) < n: + panic(shortWi) + } + + const ( + // Matrices of order ntiny or smaller must be processed by + // Dlahqr because of insufficient subdiagonal scratch space. + // This is a hard limit. + ntiny = 11 + + // nl is the size of a local workspace to help small matrices + // through a rare Dlahqr failure. nl > ntiny is required and + // nl <= nmin = Ilaenv(ispec=12,...) is recommended (the default + // value of nmin is 75). Using nl = 49 allows up to six + // simultaneous shifts and a 16×16 deflation window. + nl = 49 + ) + + // Copy eigenvalues isolated by Dgebal. + for i := 0; i < ilo; i++ { + wr[i] = h[i*ldh+i] + wi[i] = 0 + } + for i := ihi + 1; i < n; i++ { + wr[i] = h[i*ldh+i] + wi[i] = 0 + } + + // Initialize Z to identity matrix if requested. + if compz == lapack.SchurHess { + impl.Dlaset(blas.All, n, n, 0, 1, z, ldz) + } + + // Quick return if possible. + if ilo == ihi { + wr[ilo] = h[ilo*ldh+ilo] + wi[ilo] = 0 + return 0 + } + + // Dlahqr/Dlaqr04 crossover point. + nmin := impl.Ilaenv(12, "DHSEQR", string(job)+string(compz), n, ilo, ihi, lwork) + nmin = max(ntiny, nmin) + + if n > nmin { + // Dlaqr0 for big matrices. + unconverged = impl.Dlaqr04(wantt, wantz, n, ilo, ihi, h, ldh, wr[:ihi+1], wi[:ihi+1], + ilo, ihi, z, ldz, work, lwork, 1) + } else { + // Dlahqr for small matrices. + unconverged = impl.Dlahqr(wantt, wantz, n, ilo, ihi, h, ldh, wr[:ihi+1], wi[:ihi+1], + ilo, ihi, z, ldz) + if unconverged > 0 { + // A rare Dlahqr failure! Dlaqr04 sometimes succeeds + // when Dlahqr fails. + kbot := unconverged + if n >= nl { + // Larger matrices have enough subdiagonal + // scratch space to call Dlaqr04 directly. + unconverged = impl.Dlaqr04(wantt, wantz, n, ilo, kbot, h, ldh, + wr[:ihi+1], wi[:ihi+1], ilo, ihi, z, ldz, work, lwork, 1) + } else { + // Tiny matrices don't have enough subdiagonal + // scratch space to benefit from Dlaqr04. Hence, + // tiny matrices must be copied into a larger + // array before calling Dlaqr04. + var hl [nl * nl]float64 + impl.Dlacpy(blas.All, n, n, h, ldh, hl[:], nl) + impl.Dlaset(blas.All, nl, nl-n, 0, 0, hl[n:], nl) + var workl [nl]float64 + unconverged = impl.Dlaqr04(wantt, wantz, nl, ilo, kbot, hl[:], nl, + wr[:ihi+1], wi[:ihi+1], ilo, ihi, z, ldz, workl[:], nl, 1) + work[0] = workl[0] + if wantt || unconverged > 0 { + impl.Dlacpy(blas.All, n, n, hl[:], nl, h, ldh) + } + } + } + } + // Zero out under the first subdiagonal, if necessary. + if (wantt || unconverged > 0) && n > 2 { + impl.Dlaset(blas.Lower, n-2, n-2, 0, 0, h[2*ldh:], ldh) + } + + work[0] = math.Max(float64(n), work[0]) + return unconverged +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlabrd.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlabrd.go new file mode 100644 index 0000000000..babc0b7c0d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlabrd.go @@ -0,0 +1,173 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlabrd reduces the first NB rows and columns of a real general m×n matrix +// A to upper or lower bidiagonal form by an orthogonal transformation +// Q**T * A * P +// If m >= n, A is reduced to upper bidiagonal form and upon exit the elements +// on and below the diagonal in the first nb columns represent the elementary +// reflectors, and the elements above the diagonal in the first nb rows represent +// the matrix P. If m < n, A is reduced to lower bidiagonal form and the elements +// P is instead stored above the diagonal. +// +// The reduction to bidiagonal form is stored in d and e, where d are the diagonal +// elements, and e are the off-diagonal elements. +// +// The matrices Q and P are products of elementary reflectors +// Q = H_0 * H_1 * ... * H_{nb-1} +// P = G_0 * G_1 * ... * G_{nb-1} +// where +// H_i = I - tauQ[i] * v_i * v_i^T +// G_i = I - tauP[i] * u_i * u_i^T +// +// As an example, on exit the entries of A when m = 6, n = 5, and nb = 2 +// [ 1 1 u1 u1 u1] +// [v1 1 1 u2 u2] +// [v1 v2 a a a] +// [v1 v2 a a a] +// [v1 v2 a a a] +// [v1 v2 a a a] +// and when m = 5, n = 6, and nb = 2 +// [ 1 u1 u1 u1 u1 u1] +// [ 1 1 u2 u2 u2 u2] +// [v1 1 a a a a] +// [v1 v2 a a a a] +// [v1 v2 a a a a] +// +// Dlabrd also returns the matrices X and Y which are used with U and V to +// apply the transformation to the unreduced part of the matrix +// A := A - V*Y^T - X*U^T +// and returns the matrices X and Y which are needed to apply the +// transformation to the unreduced part of A. +// +// X is an m×nb matrix, Y is an n×nb matrix. d, e, taup, and tauq must all have +// length at least nb. Dlabrd will panic if these size constraints are violated. +// +// Dlabrd is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlabrd(m, n, nb int, a []float64, lda int, d, e, tauQ, tauP, x []float64, ldx int, y []float64, ldy int) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case nb < 0: + panic(nbLT0) + case nb > n: + panic(nbGTN) + case nb > m: + panic(nbGTM) + case lda < max(1, n): + panic(badLdA) + case ldx < max(1, nb): + panic(badLdX) + case ldy < max(1, nb): + panic(badLdY) + } + + if m == 0 || n == 0 || nb == 0 { + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(d) < nb: + panic(shortD) + case len(e) < nb: + panic(shortE) + case len(tauQ) < nb: + panic(shortTauQ) + case len(tauP) < nb: + panic(shortTauP) + case len(x) < (m-1)*ldx+nb: + panic(shortX) + case len(y) < (n-1)*ldy+nb: + panic(shortY) + } + + bi := blas64.Implementation() + + if m >= n { + // Reduce to upper bidiagonal form. + for i := 0; i < nb; i++ { + bi.Dgemv(blas.NoTrans, m-i, i, -1, a[i*lda:], lda, y[i*ldy:], 1, 1, a[i*lda+i:], lda) + bi.Dgemv(blas.NoTrans, m-i, i, -1, x[i*ldx:], ldx, a[i:], lda, 1, a[i*lda+i:], lda) + + a[i*lda+i], tauQ[i] = impl.Dlarfg(m-i, a[i*lda+i], a[min(i+1, m-1)*lda+i:], lda) + d[i] = a[i*lda+i] + if i < n-1 { + // Compute Y[i+1:n, i]. + a[i*lda+i] = 1 + bi.Dgemv(blas.Trans, m-i, n-i-1, 1, a[i*lda+i+1:], lda, a[i*lda+i:], lda, 0, y[(i+1)*ldy+i:], ldy) + bi.Dgemv(blas.Trans, m-i, i, 1, a[i*lda:], lda, a[i*lda+i:], lda, 0, y[i:], ldy) + bi.Dgemv(blas.NoTrans, n-i-1, i, -1, y[(i+1)*ldy:], ldy, y[i:], ldy, 1, y[(i+1)*ldy+i:], ldy) + bi.Dgemv(blas.Trans, m-i, i, 1, x[i*ldx:], ldx, a[i*lda+i:], lda, 0, y[i:], ldy) + bi.Dgemv(blas.Trans, i, n-i-1, -1, a[i+1:], lda, y[i:], ldy, 1, y[(i+1)*ldy+i:], ldy) + bi.Dscal(n-i-1, tauQ[i], y[(i+1)*ldy+i:], ldy) + + // Update A[i, i+1:n]. + bi.Dgemv(blas.NoTrans, n-i-1, i+1, -1, y[(i+1)*ldy:], ldy, a[i*lda:], 1, 1, a[i*lda+i+1:], 1) + bi.Dgemv(blas.Trans, i, n-i-1, -1, a[i+1:], lda, x[i*ldx:], 1, 1, a[i*lda+i+1:], 1) + + // Generate reflection P[i] to annihilate A[i, i+2:n]. + a[i*lda+i+1], tauP[i] = impl.Dlarfg(n-i-1, a[i*lda+i+1], a[i*lda+min(i+2, n-1):], 1) + e[i] = a[i*lda+i+1] + a[i*lda+i+1] = 1 + + // Compute X[i+1:m, i]. + bi.Dgemv(blas.NoTrans, m-i-1, n-i-1, 1, a[(i+1)*lda+i+1:], lda, a[i*lda+i+1:], 1, 0, x[(i+1)*ldx+i:], ldx) + bi.Dgemv(blas.Trans, n-i-1, i+1, 1, y[(i+1)*ldy:], ldy, a[i*lda+i+1:], 1, 0, x[i:], ldx) + bi.Dgemv(blas.NoTrans, m-i-1, i+1, -1, a[(i+1)*lda:], lda, x[i:], ldx, 1, x[(i+1)*ldx+i:], ldx) + bi.Dgemv(blas.NoTrans, i, n-i-1, 1, a[i+1:], lda, a[i*lda+i+1:], 1, 0, x[i:], ldx) + bi.Dgemv(blas.NoTrans, m-i-1, i, -1, x[(i+1)*ldx:], ldx, x[i:], ldx, 1, x[(i+1)*ldx+i:], ldx) + bi.Dscal(m-i-1, tauP[i], x[(i+1)*ldx+i:], ldx) + } + } + return + } + // Reduce to lower bidiagonal form. + for i := 0; i < nb; i++ { + // Update A[i,i:n] + bi.Dgemv(blas.NoTrans, n-i, i, -1, y[i*ldy:], ldy, a[i*lda:], 1, 1, a[i*lda+i:], 1) + bi.Dgemv(blas.Trans, i, n-i, -1, a[i:], lda, x[i*ldx:], 1, 1, a[i*lda+i:], 1) + + // Generate reflection P[i] to annihilate A[i, i+1:n] + a[i*lda+i], tauP[i] = impl.Dlarfg(n-i, a[i*lda+i], a[i*lda+min(i+1, n-1):], 1) + d[i] = a[i*lda+i] + if i < m-1 { + a[i*lda+i] = 1 + // Compute X[i+1:m, i]. + bi.Dgemv(blas.NoTrans, m-i-1, n-i, 1, a[(i+1)*lda+i:], lda, a[i*lda+i:], 1, 0, x[(i+1)*ldx+i:], ldx) + bi.Dgemv(blas.Trans, n-i, i, 1, y[i*ldy:], ldy, a[i*lda+i:], 1, 0, x[i:], ldx) + bi.Dgemv(blas.NoTrans, m-i-1, i, -1, a[(i+1)*lda:], lda, x[i:], ldx, 1, x[(i+1)*ldx+i:], ldx) + bi.Dgemv(blas.NoTrans, i, n-i, 1, a[i:], lda, a[i*lda+i:], 1, 0, x[i:], ldx) + bi.Dgemv(blas.NoTrans, m-i-1, i, -1, x[(i+1)*ldx:], ldx, x[i:], ldx, 1, x[(i+1)*ldx+i:], ldx) + bi.Dscal(m-i-1, tauP[i], x[(i+1)*ldx+i:], ldx) + + // Update A[i+1:m, i]. + bi.Dgemv(blas.NoTrans, m-i-1, i, -1, a[(i+1)*lda:], lda, y[i*ldy:], 1, 1, a[(i+1)*lda+i:], lda) + bi.Dgemv(blas.NoTrans, m-i-1, i+1, -1, x[(i+1)*ldx:], ldx, a[i:], lda, 1, a[(i+1)*lda+i:], lda) + + // Generate reflection Q[i] to annihilate A[i+2:m, i]. + a[(i+1)*lda+i], tauQ[i] = impl.Dlarfg(m-i-1, a[(i+1)*lda+i], a[min(i+2, m-1)*lda+i:], lda) + e[i] = a[(i+1)*lda+i] + a[(i+1)*lda+i] = 1 + + // Compute Y[i+1:n, i]. + bi.Dgemv(blas.Trans, m-i-1, n-i-1, 1, a[(i+1)*lda+i+1:], lda, a[(i+1)*lda+i:], lda, 0, y[(i+1)*ldy+i:], ldy) + bi.Dgemv(blas.Trans, m-i-1, i, 1, a[(i+1)*lda:], lda, a[(i+1)*lda+i:], lda, 0, y[i:], ldy) + bi.Dgemv(blas.NoTrans, n-i-1, i, -1, y[(i+1)*ldy:], ldy, y[i:], ldy, 1, y[(i+1)*ldy+i:], ldy) + bi.Dgemv(blas.Trans, m-i-1, i+1, 1, x[(i+1)*ldx:], ldx, a[(i+1)*lda+i:], lda, 0, y[i:], ldy) + bi.Dgemv(blas.Trans, i+1, n-i-1, -1, a[i+1:], lda, y[i:], ldy, 1, y[(i+1)*ldy+i:], ldy) + bi.Dscal(n-i-1, tauQ[i], y[(i+1)*ldy+i:], ldy) + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlacn2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlacn2.go new file mode 100644 index 0000000000..e8ac1e439c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlacn2.go @@ -0,0 +1,134 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlacn2 estimates the 1-norm of an n×n matrix A using sequential updates with +// matrix-vector products provided externally. +// +// Dlacn2 is called sequentially and it returns the value of est and kase to be +// used on the next call. +// On the initial call, kase must be 0. +// In between calls, x must be overwritten by +// A * X if kase was returned as 1, +// A^T * X if kase was returned as 2, +// and all other parameters must not be changed. +// On the final return, kase is returned as 0, v contains A*W where W is a +// vector, and est = norm(V)/norm(W) is a lower bound for 1-norm of A. +// +// v, x, and isgn must all have length n and n must be at least 1, otherwise +// Dlacn2 will panic. isave is used for temporary storage. +// +// Dlacn2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlacn2(n int, v, x []float64, isgn []int, est float64, kase int, isave *[3]int) (float64, int) { + switch { + case n < 1: + panic(nLT1) + case len(v) < n: + panic(shortV) + case len(x) < n: + panic(shortX) + case len(isgn) < n: + panic(shortIsgn) + case isave[0] < 0 || 5 < isave[0]: + panic(badIsave) + case isave[0] == 0 && kase != 0: + panic(badIsave) + } + + const itmax = 5 + bi := blas64.Implementation() + + if kase == 0 { + for i := 0; i < n; i++ { + x[i] = 1 / float64(n) + } + kase = 1 + isave[0] = 1 + return est, kase + } + switch isave[0] { + case 1: + if n == 1 { + v[0] = x[0] + est = math.Abs(v[0]) + kase = 0 + return est, kase + } + est = bi.Dasum(n, x, 1) + for i := 0; i < n; i++ { + x[i] = math.Copysign(1, x[i]) + isgn[i] = int(x[i]) + } + kase = 2 + isave[0] = 2 + return est, kase + case 2: + isave[1] = bi.Idamax(n, x, 1) + isave[2] = 2 + for i := 0; i < n; i++ { + x[i] = 0 + } + x[isave[1]] = 1 + kase = 1 + isave[0] = 3 + return est, kase + case 3: + bi.Dcopy(n, x, 1, v, 1) + estold := est + est = bi.Dasum(n, v, 1) + sameSigns := true + for i := 0; i < n; i++ { + if int(math.Copysign(1, x[i])) != isgn[i] { + sameSigns = false + break + } + } + if !sameSigns && est > estold { + for i := 0; i < n; i++ { + x[i] = math.Copysign(1, x[i]) + isgn[i] = int(x[i]) + } + kase = 2 + isave[0] = 4 + return est, kase + } + case 4: + jlast := isave[1] + isave[1] = bi.Idamax(n, x, 1) + if x[jlast] != math.Abs(x[isave[1]]) && isave[2] < itmax { + isave[2] += 1 + for i := 0; i < n; i++ { + x[i] = 0 + } + x[isave[1]] = 1 + kase = 1 + isave[0] = 3 + return est, kase + } + case 5: + tmp := 2 * (bi.Dasum(n, x, 1)) / float64(3*n) + if tmp > est { + bi.Dcopy(n, x, 1, v, 1) + est = tmp + } + kase = 0 + return est, kase + } + // Iteration complete. Final stage + altsgn := 1.0 + for i := 0; i < n; i++ { + x[i] = altsgn * (1 + float64(i)/float64(n-1)) + altsgn *= -1 + } + kase = 1 + isave[0] = 5 + return est, kase +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlacpy.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlacpy.go new file mode 100644 index 0000000000..a37f3b0dbd --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlacpy.go @@ -0,0 +1,59 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dlacpy copies the elements of A specified by uplo into B. Uplo can specify +// a triangular portion with blas.Upper or blas.Lower, or can specify all of the +// elemest with blas.All. +// +// Dlacpy is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlacpy(uplo blas.Uplo, m, n int, a []float64, lda int, b []float64, ldb int) { + switch { + case uplo != blas.Upper && uplo != blas.Lower && uplo != blas.All: + panic(badUplo) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case ldb < max(1, n): + panic(badLdB) + } + + if m == 0 || n == 0 { + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(b) < (m-1)*ldb+n: + panic(shortB) + } + + switch uplo { + case blas.Upper: + for i := 0; i < m; i++ { + for j := i; j < n; j++ { + b[i*ldb+j] = a[i*lda+j] + } + } + case blas.Lower: + for i := 0; i < m; i++ { + for j := 0; j < min(i+1, n); j++ { + b[i*ldb+j] = a[i*lda+j] + } + } + case blas.All: + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + b[i*ldb+j] = a[i*lda+j] + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlae2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlae2.go new file mode 100644 index 0000000000..c071fec7de --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlae2.go @@ -0,0 +1,49 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlae2 computes the eigenvalues of a 2×2 symmetric matrix +// [a b] +// [b c] +// and returns the eigenvalue with the larger absolute value as rt1 and the +// smaller as rt2. +// +// Dlae2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlae2(a, b, c float64) (rt1, rt2 float64) { + sm := a + c + df := a - c + adf := math.Abs(df) + tb := b + b + ab := math.Abs(tb) + acmx := c + acmn := a + if math.Abs(a) > math.Abs(c) { + acmx = a + acmn = c + } + var rt float64 + if adf > ab { + rt = adf * math.Sqrt(1+(ab/adf)*(ab/adf)) + } else if adf < ab { + rt = ab * math.Sqrt(1+(adf/ab)*(adf/ab)) + } else { + rt = ab * math.Sqrt2 + } + if sm < 0 { + rt1 = 0.5 * (sm - rt) + rt2 = (acmx/rt1)*acmn - (b/rt1)*b + return rt1, rt2 + } + if sm > 0 { + rt1 = 0.5 * (sm + rt) + rt2 = (acmx/rt1)*acmn - (b/rt1)*b + return rt1, rt2 + } + rt1 = 0.5 * rt + rt2 = -0.5 * rt + return rt1, rt2 +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaev2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaev2.go new file mode 100644 index 0000000000..74d75b9137 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaev2.go @@ -0,0 +1,82 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlaev2 computes the Eigen decomposition of a symmetric 2×2 matrix. +// The matrix is given by +// [a b] +// [b c] +// Dlaev2 returns rt1 and rt2, the eigenvalues of the matrix where |RT1| > |RT2|, +// and [cs1, sn1] which is the unit right eigenvalue for RT1. +// [ cs1 sn1] [a b] [cs1 -sn1] = [rt1 0] +// [-sn1 cs1] [b c] [sn1 cs1] [ 0 rt2] +// +// Dlaev2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaev2(a, b, c float64) (rt1, rt2, cs1, sn1 float64) { + sm := a + c + df := a - c + adf := math.Abs(df) + tb := b + b + ab := math.Abs(tb) + acmx := c + acmn := a + if math.Abs(a) > math.Abs(c) { + acmx = a + acmn = c + } + var rt float64 + if adf > ab { + rt = adf * math.Sqrt(1+(ab/adf)*(ab/adf)) + } else if adf < ab { + rt = ab * math.Sqrt(1+(adf/ab)*(adf/ab)) + } else { + rt = ab * math.Sqrt(2) + } + var sgn1 float64 + if sm < 0 { + rt1 = 0.5 * (sm - rt) + sgn1 = -1 + rt2 = (acmx/rt1)*acmn - (b/rt1)*b + } else if sm > 0 { + rt1 = 0.5 * (sm + rt) + sgn1 = 1 + rt2 = (acmx/rt1)*acmn - (b/rt1)*b + } else { + rt1 = 0.5 * rt + rt2 = -0.5 * rt + sgn1 = 1 + } + var cs, sgn2 float64 + if df >= 0 { + cs = df + rt + sgn2 = 1 + } else { + cs = df - rt + sgn2 = -1 + } + acs := math.Abs(cs) + if acs > ab { + ct := -tb / cs + sn1 = 1 / math.Sqrt(1+ct*ct) + cs1 = ct * sn1 + } else { + if ab == 0 { + cs1 = 1 + sn1 = 0 + } else { + tn := -cs / tb + cs1 = 1 / math.Sqrt(1+tn*tn) + sn1 = tn * cs1 + } + } + if sgn1 == sgn2 { + tn := cs1 + cs1 = -sn1 + sn1 = tn + } + return rt1, rt2, cs1, sn1 +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaexc.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaexc.go new file mode 100644 index 0000000000..2b79bd8ae7 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaexc.go @@ -0,0 +1,269 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dlaexc swaps two adjacent diagonal blocks of order 1 or 2 in an n×n upper +// quasi-triangular matrix T by an orthogonal similarity transformation. +// +// T must be in Schur canonical form, that is, block upper triangular with 1×1 +// and 2×2 diagonal blocks; each 2×2 diagonal block has its diagonal elements +// equal and its off-diagonal elements of opposite sign. On return, T will +// contain the updated matrix again in Schur canonical form. +// +// If wantq is true, the transformation is accumulated in the n×n matrix Q, +// otherwise Q is not referenced. +// +// j1 is the index of the first row of the first block. n1 and n2 are the order +// of the first and second block, respectively. +// +// work must have length at least n, otherwise Dlaexc will panic. +// +// If ok is false, the transformed matrix T would be too far from Schur form. +// The blocks are not swapped, and T and Q are not modified. +// +// If n1 and n2 are both equal to 1, Dlaexc will always return true. +// +// Dlaexc is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaexc(wantq bool, n int, t []float64, ldt int, q []float64, ldq int, j1, n1, n2 int, work []float64) (ok bool) { + switch { + case n < 0: + panic(nLT0) + case ldt < max(1, n): + panic(badLdT) + case wantq && ldt < max(1, n): + panic(badLdQ) + case j1 < 0 || n <= j1: + panic(badJ1) + case len(work) < n: + panic(shortWork) + case n1 < 0 || 2 < n1: + panic(badN1) + case n2 < 0 || 2 < n2: + panic(badN2) + } + + if n == 0 || n1 == 0 || n2 == 0 { + return true + } + + switch { + case len(t) < (n-1)*ldt+n: + panic(shortT) + case wantq && len(q) < (n-1)*ldq+n: + panic(shortQ) + } + + if j1+n1 >= n { + // TODO(vladimir-ch): Reference LAPACK does this check whether + // the start of the second block is in the matrix T. It returns + // true if it is not and moreover it does not check whether the + // whole second block fits into T. This does not feel + // satisfactory. The only caller of Dlaexc is Dtrexc, so if the + // caller makes sure that this does not happen, we could be + // stricter here. + return true + } + + j2 := j1 + 1 + j3 := j1 + 2 + + bi := blas64.Implementation() + + if n1 == 1 && n2 == 1 { + // Swap two 1×1 blocks. + t11 := t[j1*ldt+j1] + t22 := t[j2*ldt+j2] + + // Determine the transformation to perform the interchange. + cs, sn, _ := impl.Dlartg(t[j1*ldt+j2], t22-t11) + + // Apply transformation to the matrix T. + if n-j3 > 0 { + bi.Drot(n-j3, t[j1*ldt+j3:], 1, t[j2*ldt+j3:], 1, cs, sn) + } + if j1 > 0 { + bi.Drot(j1, t[j1:], ldt, t[j2:], ldt, cs, sn) + } + + t[j1*ldt+j1] = t22 + t[j2*ldt+j2] = t11 + + if wantq { + // Accumulate transformation in the matrix Q. + bi.Drot(n, q[j1:], ldq, q[j2:], ldq, cs, sn) + } + + return true + } + + // Swapping involves at least one 2×2 block. + // + // Copy the diagonal block of order n1+n2 to the local array d and + // compute its norm. + nd := n1 + n2 + var d [16]float64 + const ldd = 4 + impl.Dlacpy(blas.All, nd, nd, t[j1*ldt+j1:], ldt, d[:], ldd) + dnorm := impl.Dlange(lapack.MaxAbs, nd, nd, d[:], ldd, work) + + // Compute machine-dependent threshold for test for accepting swap. + eps := dlamchP + thresh := math.Max(10*eps*dnorm, dlamchS/eps) + + // Solve T11*X - X*T22 = scale*T12 for X. + var x [4]float64 + const ldx = 2 + scale, _, _ := impl.Dlasy2(false, false, -1, n1, n2, d[:], ldd, d[n1*ldd+n1:], ldd, d[n1:], ldd, x[:], ldx) + + // Swap the adjacent diagonal blocks. + switch { + case n1 == 1 && n2 == 2: + // Generate elementary reflector H so that + // ( scale, X11, X12 ) H = ( 0, 0, * ) + u := [3]float64{scale, x[0], 1} + _, tau := impl.Dlarfg(3, x[1], u[:2], 1) + t11 := t[j1*ldt+j1] + + // Perform swap provisionally on diagonal block in d. + impl.Dlarfx(blas.Left, 3, 3, u[:], tau, d[:], ldd, work) + impl.Dlarfx(blas.Right, 3, 3, u[:], tau, d[:], ldd, work) + + // Test whether to reject swap. + if math.Max(math.Abs(d[2*ldd]), math.Max(math.Abs(d[2*ldd+1]), math.Abs(d[2*ldd+2]-t11))) > thresh { + return false + } + + // Accept swap: apply transformation to the entire matrix T. + impl.Dlarfx(blas.Left, 3, n-j1, u[:], tau, t[j1*ldt+j1:], ldt, work) + impl.Dlarfx(blas.Right, j2+1, 3, u[:], tau, t[j1:], ldt, work) + + t[j3*ldt+j1] = 0 + t[j3*ldt+j2] = 0 + t[j3*ldt+j3] = t11 + + if wantq { + // Accumulate transformation in the matrix Q. + impl.Dlarfx(blas.Right, n, 3, u[:], tau, q[j1:], ldq, work) + } + + case n1 == 2 && n2 == 1: + // Generate elementary reflector H so that: + // H ( -X11 ) = ( * ) + // ( -X21 ) = ( 0 ) + // ( scale ) = ( 0 ) + u := [3]float64{1, -x[ldx], scale} + _, tau := impl.Dlarfg(3, -x[0], u[1:], 1) + t33 := t[j3*ldt+j3] + + // Perform swap provisionally on diagonal block in D. + impl.Dlarfx(blas.Left, 3, 3, u[:], tau, d[:], ldd, work) + impl.Dlarfx(blas.Right, 3, 3, u[:], tau, d[:], ldd, work) + + // Test whether to reject swap. + if math.Max(math.Abs(d[ldd]), math.Max(math.Abs(d[2*ldd]), math.Abs(d[0]-t33))) > thresh { + return false + } + + // Accept swap: apply transformation to the entire matrix T. + impl.Dlarfx(blas.Right, j3+1, 3, u[:], tau, t[j1:], ldt, work) + impl.Dlarfx(blas.Left, 3, n-j1-1, u[:], tau, t[j1*ldt+j2:], ldt, work) + + t[j1*ldt+j1] = t33 + t[j2*ldt+j1] = 0 + t[j3*ldt+j1] = 0 + + if wantq { + // Accumulate transformation in the matrix Q. + impl.Dlarfx(blas.Right, n, 3, u[:], tau, q[j1:], ldq, work) + } + + default: // n1 == 2 && n2 == 2 + // Generate elementary reflectors H_1 and H_2 so that: + // H_2 H_1 ( -X11 -X12 ) = ( * * ) + // ( -X21 -X22 ) ( 0 * ) + // ( scale 0 ) ( 0 0 ) + // ( 0 scale ) ( 0 0 ) + u1 := [3]float64{1, -x[ldx], scale} + _, tau1 := impl.Dlarfg(3, -x[0], u1[1:], 1) + + temp := -tau1 * (x[1] + u1[1]*x[ldx+1]) + u2 := [3]float64{1, -temp * u1[2], scale} + _, tau2 := impl.Dlarfg(3, -temp*u1[1]-x[ldx+1], u2[1:], 1) + + // Perform swap provisionally on diagonal block in D. + impl.Dlarfx(blas.Left, 3, 4, u1[:], tau1, d[:], ldd, work) + impl.Dlarfx(blas.Right, 4, 3, u1[:], tau1, d[:], ldd, work) + impl.Dlarfx(blas.Left, 3, 4, u2[:], tau2, d[ldd:], ldd, work) + impl.Dlarfx(blas.Right, 4, 3, u2[:], tau2, d[1:], ldd, work) + + // Test whether to reject swap. + m1 := math.Max(math.Abs(d[2*ldd]), math.Abs(d[2*ldd+1])) + m2 := math.Max(math.Abs(d[3*ldd]), math.Abs(d[3*ldd+1])) + if math.Max(m1, m2) > thresh { + return false + } + + // Accept swap: apply transformation to the entire matrix T. + j4 := j1 + 3 + impl.Dlarfx(blas.Left, 3, n-j1, u1[:], tau1, t[j1*ldt+j1:], ldt, work) + impl.Dlarfx(blas.Right, j4+1, 3, u1[:], tau1, t[j1:], ldt, work) + impl.Dlarfx(blas.Left, 3, n-j1, u2[:], tau2, t[j2*ldt+j1:], ldt, work) + impl.Dlarfx(blas.Right, j4+1, 3, u2[:], tau2, t[j2:], ldt, work) + + t[j3*ldt+j1] = 0 + t[j3*ldt+j2] = 0 + t[j4*ldt+j1] = 0 + t[j4*ldt+j2] = 0 + + if wantq { + // Accumulate transformation in the matrix Q. + impl.Dlarfx(blas.Right, n, 3, u1[:], tau1, q[j1:], ldq, work) + impl.Dlarfx(blas.Right, n, 3, u2[:], tau2, q[j2:], ldq, work) + } + } + + if n2 == 2 { + // Standardize new 2×2 block T11. + a, b := t[j1*ldt+j1], t[j1*ldt+j2] + c, d := t[j2*ldt+j1], t[j2*ldt+j2] + var cs, sn float64 + t[j1*ldt+j1], t[j1*ldt+j2], t[j2*ldt+j1], t[j2*ldt+j2], _, _, _, _, cs, sn = impl.Dlanv2(a, b, c, d) + if n-j1-2 > 0 { + bi.Drot(n-j1-2, t[j1*ldt+j1+2:], 1, t[j2*ldt+j1+2:], 1, cs, sn) + } + if j1 > 0 { + bi.Drot(j1, t[j1:], ldt, t[j2:], ldt, cs, sn) + } + if wantq { + bi.Drot(n, q[j1:], ldq, q[j2:], ldq, cs, sn) + } + } + if n1 == 2 { + // Standardize new 2×2 block T22. + j3 := j1 + n2 + j4 := j3 + 1 + a, b := t[j3*ldt+j3], t[j3*ldt+j4] + c, d := t[j4*ldt+j3], t[j4*ldt+j4] + var cs, sn float64 + t[j3*ldt+j3], t[j3*ldt+j4], t[j4*ldt+j3], t[j4*ldt+j4], _, _, _, _, cs, sn = impl.Dlanv2(a, b, c, d) + if n-j3-2 > 0 { + bi.Drot(n-j3-2, t[j3*ldt+j3+2:], 1, t[j4*ldt+j3+2:], 1, cs, sn) + } + bi.Drot(j3, t[j3:], ldt, t[j4:], ldt, cs, sn) + if wantq { + bi.Drot(n, q[j3:], ldq, q[j4:], ldq, cs, sn) + } + } + + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlags2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlags2.go new file mode 100644 index 0000000000..6954deb424 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlags2.go @@ -0,0 +1,182 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlags2 computes 2-by-2 orthogonal matrices U, V and Q with the +// triangles of A and B specified by upper. +// +// If upper is true +// +// U^T*A*Q = U^T*[ a1 a2 ]*Q = [ x 0 ] +// [ 0 a3 ] [ x x ] +// and +// V^T*B*Q = V^T*[ b1 b2 ]*Q = [ x 0 ] +// [ 0 b3 ] [ x x ] +// +// otherwise +// +// U^T*A*Q = U^T*[ a1 0 ]*Q = [ x x ] +// [ a2 a3 ] [ 0 x ] +// and +// V^T*B*Q = V^T*[ b1 0 ]*Q = [ x x ] +// [ b2 b3 ] [ 0 x ]. +// +// The rows of the transformed A and B are parallel, where +// +// U = [ csu snu ], V = [ csv snv ], Q = [ csq snq ] +// [ -snu csu ] [ -snv csv ] [ -snq csq ] +// +// Dlags2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlags2(upper bool, a1, a2, a3, b1, b2, b3 float64) (csu, snu, csv, snv, csq, snq float64) { + if upper { + // Input matrices A and B are upper triangular matrices. + // + // Form matrix C = A*adj(B) = [ a b ] + // [ 0 d ] + a := a1 * b3 + d := a3 * b1 + b := a2*b1 - a1*b2 + + // The SVD of real 2-by-2 triangular C. + // + // [ csl -snl ]*[ a b ]*[ csr snr ] = [ r 0 ] + // [ snl csl ] [ 0 d ] [ -snr csr ] [ 0 t ] + _, _, snr, csr, snl, csl := impl.Dlasv2(a, b, d) + + if math.Abs(csl) >= math.Abs(snl) || math.Abs(csr) >= math.Abs(snr) { + // Compute the [0, 0] and [0, 1] elements of U^T*A and V^T*B, + // and [0, 1] element of |U|^T*|A| and |V|^T*|B|. + + ua11r := csl * a1 + ua12 := csl*a2 + snl*a3 + + vb11r := csr * b1 + vb12 := csr*b2 + snr*b3 + + aua12 := math.Abs(csl)*math.Abs(a2) + math.Abs(snl)*math.Abs(a3) + avb12 := math.Abs(csr)*math.Abs(b2) + math.Abs(snr)*math.Abs(b3) + + // Zero [0, 1] elements of U^T*A and V^T*B. + if math.Abs(ua11r)+math.Abs(ua12) != 0 { + if aua12/(math.Abs(ua11r)+math.Abs(ua12)) <= avb12/(math.Abs(vb11r)+math.Abs(vb12)) { + csq, snq, _ = impl.Dlartg(-ua11r, ua12) + } else { + csq, snq, _ = impl.Dlartg(-vb11r, vb12) + } + } else { + csq, snq, _ = impl.Dlartg(-vb11r, vb12) + } + + csu = csl + snu = -snl + csv = csr + snv = -snr + } else { + // Compute the [1, 0] and [1, 1] elements of U^T*A and V^T*B, + // and [1, 1] element of |U|^T*|A| and |V|^T*|B|. + + ua21 := -snl * a1 + ua22 := -snl*a2 + csl*a3 + + vb21 := -snr * b1 + vb22 := -snr*b2 + csr*b3 + + aua22 := math.Abs(snl)*math.Abs(a2) + math.Abs(csl)*math.Abs(a3) + avb22 := math.Abs(snr)*math.Abs(b2) + math.Abs(csr)*math.Abs(b3) + + // Zero [1, 1] elements of U^T*A and V^T*B, and then swap. + if math.Abs(ua21)+math.Abs(ua22) != 0 { + if aua22/(math.Abs(ua21)+math.Abs(ua22)) <= avb22/(math.Abs(vb21)+math.Abs(vb22)) { + csq, snq, _ = impl.Dlartg(-ua21, ua22) + } else { + csq, snq, _ = impl.Dlartg(-vb21, vb22) + } + } else { + csq, snq, _ = impl.Dlartg(-vb21, vb22) + } + + csu = snl + snu = csl + csv = snr + snv = csr + } + } else { + // Input matrices A and B are lower triangular matrices + // + // Form matrix C = A*adj(B) = [ a 0 ] + // [ c d ] + a := a1 * b3 + d := a3 * b1 + c := a2*b3 - a3*b2 + + // The SVD of real 2-by-2 triangular C + // + // [ csl -snl ]*[ a 0 ]*[ csr snr ] = [ r 0 ] + // [ snl csl ] [ c d ] [ -snr csr ] [ 0 t ] + _, _, snr, csr, snl, csl := impl.Dlasv2(a, c, d) + + if math.Abs(csr) >= math.Abs(snr) || math.Abs(csl) >= math.Abs(snl) { + // Compute the [1, 0] and [1, 1] elements of U^T*A and V^T*B, + // and [1, 0] element of |U|^T*|A| and |V|^T*|B|. + + ua21 := -snr*a1 + csr*a2 + ua22r := csr * a3 + + vb21 := -snl*b1 + csl*b2 + vb22r := csl * b3 + + aua21 := math.Abs(snr)*math.Abs(a1) + math.Abs(csr)*math.Abs(a2) + avb21 := math.Abs(snl)*math.Abs(b1) + math.Abs(csl)*math.Abs(b2) + + // Zero [1, 0] elements of U^T*A and V^T*B. + if (math.Abs(ua21) + math.Abs(ua22r)) != 0 { + if aua21/(math.Abs(ua21)+math.Abs(ua22r)) <= avb21/(math.Abs(vb21)+math.Abs(vb22r)) { + csq, snq, _ = impl.Dlartg(ua22r, ua21) + } else { + csq, snq, _ = impl.Dlartg(vb22r, vb21) + } + } else { + csq, snq, _ = impl.Dlartg(vb22r, vb21) + } + + csu = csr + snu = -snr + csv = csl + snv = -snl + } else { + // Compute the [0, 0] and [0, 1] elements of U^T *A and V^T *B, + // and [0, 0] element of |U|^T*|A| and |V|^T*|B|. + + ua11 := csr*a1 + snr*a2 + ua12 := snr * a3 + + vb11 := csl*b1 + snl*b2 + vb12 := snl * b3 + + aua11 := math.Abs(csr)*math.Abs(a1) + math.Abs(snr)*math.Abs(a2) + avb11 := math.Abs(csl)*math.Abs(b1) + math.Abs(snl)*math.Abs(b2) + + // Zero [0, 0] elements of U^T*A and V^T*B, and then swap. + if (math.Abs(ua11) + math.Abs(ua12)) != 0 { + if aua11/(math.Abs(ua11)+math.Abs(ua12)) <= avb11/(math.Abs(vb11)+math.Abs(vb12)) { + csq, snq, _ = impl.Dlartg(ua12, ua11) + } else { + csq, snq, _ = impl.Dlartg(vb12, vb11) + } + } else { + csq, snq, _ = impl.Dlartg(vb12, vb11) + } + + csu = snr + snu = csr + csv = snl + snv = csl + } + } + + return csu, snu, csv, snv, csq, snq +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlahqr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlahqr.go new file mode 100644 index 0000000000..00a869bce8 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlahqr.go @@ -0,0 +1,431 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlahqr computes the eigenvalues and Schur factorization of a block of an n×n +// upper Hessenberg matrix H, using the double-shift/single-shift QR algorithm. +// +// h and ldh represent the matrix H. Dlahqr works primarily with the Hessenberg +// submatrix H[ilo:ihi+1,ilo:ihi+1], but applies transformations to all of H if +// wantt is true. It is assumed that H[ihi+1:n,ihi+1:n] is already upper +// quasi-triangular, although this is not checked. +// +// It must hold that +// 0 <= ilo <= max(0,ihi), and ihi < n, +// and that +// H[ilo,ilo-1] == 0, if ilo > 0, +// otherwise Dlahqr will panic. +// +// If unconverged is zero on return, wr[ilo:ihi+1] and wi[ilo:ihi+1] will contain +// respectively the real and imaginary parts of the computed eigenvalues ilo +// to ihi. If two eigenvalues are computed as a complex conjugate pair, they are +// stored in consecutive elements of wr and wi, say the i-th and (i+1)th, with +// wi[i] > 0 and wi[i+1] < 0. If wantt is true, the eigenvalues are stored in +// the same order as on the diagonal of the Schur form returned in H, with +// wr[i] = H[i,i], and, if H[i:i+2,i:i+2] is a 2×2 diagonal block, +// wi[i] = sqrt(abs(H[i+1,i]*H[i,i+1])) and wi[i+1] = -wi[i]. +// +// wr and wi must have length ihi+1. +// +// z and ldz represent an n×n matrix Z. If wantz is true, the transformations +// will be applied to the submatrix Z[iloz:ihiz+1,ilo:ihi+1] and it must hold that +// 0 <= iloz <= ilo, and ihi <= ihiz < n. +// If wantz is false, z is not referenced. +// +// unconverged indicates whether Dlahqr computed all the eigenvalues ilo to ihi +// in a total of 30 iterations per eigenvalue. +// +// If unconverged is zero, all the eigenvalues ilo to ihi have been computed and +// will be stored on return in wr[ilo:ihi+1] and wi[ilo:ihi+1]. +// +// If unconverged is zero and wantt is true, H[ilo:ihi+1,ilo:ihi+1] will be +// overwritten on return by upper quasi-triangular full Schur form with any +// 2×2 diagonal blocks in standard form. +// +// If unconverged is zero and if wantt is false, the contents of h on return is +// unspecified. +// +// If unconverged is positive, some eigenvalues have not converged, and +// wr[unconverged:ihi+1] and wi[unconverged:ihi+1] contain those eigenvalues +// which have been successfully computed. +// +// If unconverged is positive and wantt is true, then on return +// (initial H)*U = U*(final H), (*) +// where U is an orthogonal matrix. The final H is upper Hessenberg and +// H[unconverged:ihi+1,unconverged:ihi+1] is upper quasi-triangular. +// +// If unconverged is positive and wantt is false, on return the remaining +// unconverged eigenvalues are the eigenvalues of the upper Hessenberg matrix +// H[ilo:unconverged,ilo:unconverged]. +// +// If unconverged is positive and wantz is true, then on return +// (final Z) = (initial Z)*U, +// where U is the orthogonal matrix in (*) regardless of the value of wantt. +// +// Dlahqr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlahqr(wantt, wantz bool, n, ilo, ihi int, h []float64, ldh int, wr, wi []float64, iloz, ihiz int, z []float64, ldz int) (unconverged int) { + switch { + case n < 0: + panic(nLT0) + case ilo < 0, max(0, ihi) < ilo: + panic(badIlo) + case ihi >= n: + panic(badIhi) + case ldh < max(1, n): + panic(badLdH) + case wantz && (iloz < 0 || ilo < iloz): + panic(badIloz) + case wantz && (ihiz < ihi || n <= ihiz): + panic(badIhiz) + case ldz < 1, wantz && ldz < n: + panic(badLdZ) + } + + // Quick return if possible. + if n == 0 { + return 0 + } + + switch { + case len(h) < (n-1)*ldh+n: + panic(shortH) + case len(wr) != ihi+1: + panic(shortWr) + case len(wi) != ihi+1: + panic(shortWi) + case wantz && len(z) < (n-1)*ldz+n: + panic(shortZ) + case ilo > 0 && h[ilo*ldh+ilo-1] != 0: + panic(notIsolated) + } + + if ilo == ihi { + wr[ilo] = h[ilo*ldh+ilo] + wi[ilo] = 0 + return 0 + } + + // Clear out the trash. + for j := ilo; j < ihi-2; j++ { + h[(j+2)*ldh+j] = 0 + h[(j+3)*ldh+j] = 0 + } + if ilo <= ihi-2 { + h[ihi*ldh+ihi-2] = 0 + } + + nh := ihi - ilo + 1 + nz := ihiz - iloz + 1 + + // Set machine-dependent constants for the stopping criterion. + ulp := dlamchP + smlnum := float64(nh) / ulp * dlamchS + + // i1 and i2 are the indices of the first row and last column of H to + // which transformations must be applied. If eigenvalues only are being + // computed, i1 and i2 are set inside the main loop. + var i1, i2 int + if wantt { + i1 = 0 + i2 = n - 1 + } + + itmax := 30 * max(10, nh) // Total number of QR iterations allowed. + + // The main loop begins here. i is the loop index and decreases from ihi + // to ilo in steps of 1 or 2. Each iteration of the loop works with the + // active submatrix in rows and columns l to i. Eigenvalues i+1 to ihi + // have already converged. Either l = ilo or H[l,l-1] is negligible so + // that the matrix splits. + bi := blas64.Implementation() + i := ihi + for i >= ilo { + l := ilo + + // Perform QR iterations on rows and columns ilo to i until a + // submatrix of order 1 or 2 splits off at the bottom because a + // subdiagonal element has become negligible. + converged := false + for its := 0; its <= itmax; its++ { + // Look for a single small subdiagonal element. + var k int + for k = i; k > l; k-- { + if math.Abs(h[k*ldh+k-1]) <= smlnum { + break + } + tst := math.Abs(h[(k-1)*ldh+k-1]) + math.Abs(h[k*ldh+k]) + if tst == 0 { + if k-2 >= ilo { + tst += math.Abs(h[(k-1)*ldh+k-2]) + } + if k+1 <= ihi { + tst += math.Abs(h[(k+1)*ldh+k]) + } + } + // The following is a conservative small + // subdiagonal deflation criterion due to Ahues + // & Tisseur (LAWN 122, 1997). It has better + // mathematical foundation and improves accuracy + // in some cases. + if math.Abs(h[k*ldh+k-1]) <= ulp*tst { + ab := math.Max(math.Abs(h[k*ldh+k-1]), math.Abs(h[(k-1)*ldh+k])) + ba := math.Min(math.Abs(h[k*ldh+k-1]), math.Abs(h[(k-1)*ldh+k])) + aa := math.Max(math.Abs(h[k*ldh+k]), math.Abs(h[(k-1)*ldh+k-1]-h[k*ldh+k])) + bb := math.Min(math.Abs(h[k*ldh+k]), math.Abs(h[(k-1)*ldh+k-1]-h[k*ldh+k])) + s := aa + ab + if ab/s*ba <= math.Max(smlnum, aa/s*bb*ulp) { + break + } + } + } + l = k + if l > ilo { + // H[l,l-1] is negligible. + h[l*ldh+l-1] = 0 + } + if l >= i-1 { + // Break the loop because a submatrix of order 1 + // or 2 has split off. + converged = true + break + } + + // Now the active submatrix is in rows and columns l to + // i. If eigenvalues only are being computed, only the + // active submatrix need be transformed. + if !wantt { + i1 = l + i2 = i + } + + const ( + dat1 = 3.0 + dat2 = -0.4375 + ) + var h11, h21, h12, h22 float64 + switch its { + case 10: // Exceptional shift. + s := math.Abs(h[(l+1)*ldh+l]) + math.Abs(h[(l+2)*ldh+l+1]) + h11 = dat1*s + h[l*ldh+l] + h12 = dat2 * s + h21 = s + h22 = h11 + case 20: // Exceptional shift. + s := math.Abs(h[i*ldh+i-1]) + math.Abs(h[(i-1)*ldh+i-2]) + h11 = dat1*s + h[i*ldh+i] + h12 = dat2 * s + h21 = s + h22 = h11 + default: // Prepare to use Francis' double shift (i.e., + // 2nd degree generalized Rayleigh quotient). + h11 = h[(i-1)*ldh+i-1] + h21 = h[i*ldh+i-1] + h12 = h[(i-1)*ldh+i] + h22 = h[i*ldh+i] + } + s := math.Abs(h11) + math.Abs(h12) + math.Abs(h21) + math.Abs(h22) + var ( + rt1r, rt1i float64 + rt2r, rt2i float64 + ) + if s != 0 { + h11 /= s + h21 /= s + h12 /= s + h22 /= s + tr := (h11 + h22) / 2 + det := (h11-tr)*(h22-tr) - h12*h21 + rtdisc := math.Sqrt(math.Abs(det)) + if det >= 0 { + // Complex conjugate shifts. + rt1r = tr * s + rt2r = rt1r + rt1i = rtdisc * s + rt2i = -rt1i + } else { + // Real shifts (use only one of them). + rt1r = tr + rtdisc + rt2r = tr - rtdisc + if math.Abs(rt1r-h22) <= math.Abs(rt2r-h22) { + rt1r *= s + rt2r = rt1r + } else { + rt2r *= s + rt1r = rt2r + } + rt1i = 0 + rt2i = 0 + } + } + + // Look for two consecutive small subdiagonal elements. + var m int + var v [3]float64 + for m = i - 2; m >= l; m-- { + // Determine the effect of starting the + // double-shift QR iteration at row m, and see + // if this would make H[m,m-1] negligible. The + // following uses scaling to avoid overflows and + // most underflows. + h21s := h[(m+1)*ldh+m] + s := math.Abs(h[m*ldh+m]-rt2r) + math.Abs(rt2i) + math.Abs(h21s) + h21s /= s + v[0] = h21s*h[m*ldh+m+1] + (h[m*ldh+m]-rt1r)*((h[m*ldh+m]-rt2r)/s) - rt2i/s*rt1i + v[1] = h21s * (h[m*ldh+m] + h[(m+1)*ldh+m+1] - rt1r - rt2r) + v[2] = h21s * h[(m+2)*ldh+m+1] + s = math.Abs(v[0]) + math.Abs(v[1]) + math.Abs(v[2]) + v[0] /= s + v[1] /= s + v[2] /= s + if m == l { + break + } + dsum := math.Abs(h[(m-1)*ldh+m-1]) + math.Abs(h[m*ldh+m]) + math.Abs(h[(m+1)*ldh+m+1]) + if math.Abs(h[m*ldh+m-1])*(math.Abs(v[1])+math.Abs(v[2])) <= ulp*math.Abs(v[0])*dsum { + break + } + } + + // Double-shift QR step. + for k := m; k < i; k++ { + // The first iteration of this loop determines a + // reflection G from the vector V and applies it + // from left and right to H, thus creating a + // non-zero bulge below the subdiagonal. + // + // Each subsequent iteration determines a + // reflection G to restore the Hessenberg form + // in the (k-1)th column, and thus chases the + // bulge one step toward the bottom of the + // active submatrix. nr is the order of G. + + nr := min(3, i-k+1) + if k > m { + bi.Dcopy(nr, h[k*ldh+k-1:], ldh, v[:], 1) + } + var t0 float64 + v[0], t0 = impl.Dlarfg(nr, v[0], v[1:], 1) + if k > m { + h[k*ldh+k-1] = v[0] + h[(k+1)*ldh+k-1] = 0 + if k < i-1 { + h[(k+2)*ldh+k-1] = 0 + } + } else if m > l { + // Use the following instead of H[k,k-1] = -H[k,k-1] + // to avoid a bug when v[1] and v[2] underflow. + h[k*ldh+k-1] *= 1 - t0 + } + t1 := t0 * v[1] + if nr == 3 { + t2 := t0 * v[2] + + // Apply G from the left to transform + // the rows of the matrix in columns k + // to i2. + for j := k; j <= i2; j++ { + sum := h[k*ldh+j] + v[1]*h[(k+1)*ldh+j] + v[2]*h[(k+2)*ldh+j] + h[k*ldh+j] -= sum * t0 + h[(k+1)*ldh+j] -= sum * t1 + h[(k+2)*ldh+j] -= sum * t2 + } + + // Apply G from the right to transform + // the columns of the matrix in rows i1 + // to min(k+3,i). + for j := i1; j <= min(k+3, i); j++ { + sum := h[j*ldh+k] + v[1]*h[j*ldh+k+1] + v[2]*h[j*ldh+k+2] + h[j*ldh+k] -= sum * t0 + h[j*ldh+k+1] -= sum * t1 + h[j*ldh+k+2] -= sum * t2 + } + + if wantz { + // Accumulate transformations in the matrix Z. + for j := iloz; j <= ihiz; j++ { + sum := z[j*ldz+k] + v[1]*z[j*ldz+k+1] + v[2]*z[j*ldz+k+2] + z[j*ldz+k] -= sum * t0 + z[j*ldz+k+1] -= sum * t1 + z[j*ldz+k+2] -= sum * t2 + } + } + } else if nr == 2 { + // Apply G from the left to transform + // the rows of the matrix in columns k + // to i2. + for j := k; j <= i2; j++ { + sum := h[k*ldh+j] + v[1]*h[(k+1)*ldh+j] + h[k*ldh+j] -= sum * t0 + h[(k+1)*ldh+j] -= sum * t1 + } + + // Apply G from the right to transform + // the columns of the matrix in rows i1 + // to min(k+3,i). + for j := i1; j <= i; j++ { + sum := h[j*ldh+k] + v[1]*h[j*ldh+k+1] + h[j*ldh+k] -= sum * t0 + h[j*ldh+k+1] -= sum * t1 + } + + if wantz { + // Accumulate transformations in the matrix Z. + for j := iloz; j <= ihiz; j++ { + sum := z[j*ldz+k] + v[1]*z[j*ldz+k+1] + z[j*ldz+k] -= sum * t0 + z[j*ldz+k+1] -= sum * t1 + } + } + } + } + } + + if !converged { + // The QR iteration finished without splitting off a + // submatrix of order 1 or 2. + return i + 1 + } + + if l == i { + // H[i,i-1] is negligible: one eigenvalue has converged. + wr[i] = h[i*ldh+i] + wi[i] = 0 + } else if l == i-1 { + // H[i-1,i-2] is negligible: a pair of eigenvalues have converged. + + // Transform the 2×2 submatrix to standard Schur form, + // and compute and store the eigenvalues. + var cs, sn float64 + a, b := h[(i-1)*ldh+i-1], h[(i-1)*ldh+i] + c, d := h[i*ldh+i-1], h[i*ldh+i] + a, b, c, d, wr[i-1], wi[i-1], wr[i], wi[i], cs, sn = impl.Dlanv2(a, b, c, d) + h[(i-1)*ldh+i-1], h[(i-1)*ldh+i] = a, b + h[i*ldh+i-1], h[i*ldh+i] = c, d + + if wantt { + // Apply the transformation to the rest of H. + if i2 > i { + bi.Drot(i2-i, h[(i-1)*ldh+i+1:], 1, h[i*ldh+i+1:], 1, cs, sn) + } + bi.Drot(i-i1-1, h[i1*ldh+i-1:], ldh, h[i1*ldh+i:], ldh, cs, sn) + } + + if wantz { + // Apply the transformation to Z. + bi.Drot(nz, z[iloz*ldz+i-1:], ldz, z[iloz*ldz+i:], ldz, cs, sn) + } + } + + // Return to start of the main loop with new value of i. + i = l - 1 + } + return 0 +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlahr2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlahr2.go new file mode 100644 index 0000000000..a47dc8fed1 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlahr2.go @@ -0,0 +1,195 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlahr2 reduces the first nb columns of a real general n×(n-k+1) matrix A so +// that elements below the k-th subdiagonal are zero. The reduction is performed +// by an orthogonal similarity transformation Q^T * A * Q. Dlahr2 returns the +// matrices V and T which determine Q as a block reflector I - V*T*V^T, and +// also the matrix Y = A * V * T. +// +// The matrix Q is represented as a product of nb elementary reflectors +// Q = H_0 * H_1 * ... * H_{nb-1}. +// Each H_i has the form +// H_i = I - tau[i] * v * v^T, +// where v is a real vector with v[0:i+k-1] = 0 and v[i+k-1] = 1. v[i+k:n] is +// stored on exit in A[i+k+1:n,i]. +// +// The elements of the vectors v together form the (n-k+1)×nb matrix +// V which is needed, with T and Y, to apply the transformation to the +// unreduced part of the matrix, using an update of the form +// A = (I - V*T*V^T) * (A - Y*V^T). +// +// On entry, a contains the n×(n-k+1) general matrix A. On return, the elements +// on and above the k-th subdiagonal in the first nb columns are overwritten +// with the corresponding elements of the reduced matrix; the elements below the +// k-th subdiagonal, with the slice tau, represent the matrix Q as a product of +// elementary reflectors. The other columns of A are unchanged. +// +// The contents of A on exit are illustrated by the following example +// with n = 7, k = 3 and nb = 2: +// [ a a a a a ] +// [ a a a a a ] +// [ a a a a a ] +// [ h h a a a ] +// [ v0 h a a a ] +// [ v0 v1 a a a ] +// [ v0 v1 a a a ] +// where a denotes an element of the original matrix A, h denotes a +// modified element of the upper Hessenberg matrix H, and vi denotes an +// element of the vector defining H_i. +// +// k is the offset for the reduction. Elements below the k-th subdiagonal in the +// first nb columns are reduced to zero. +// +// nb is the number of columns to be reduced. +// +// On entry, a represents the n×(n-k+1) matrix A. On return, the elements on and +// above the k-th subdiagonal in the first nb columns are overwritten with the +// corresponding elements of the reduced matrix. The elements below the k-th +// subdiagonal, with the slice tau, represent the matrix Q as a product of +// elementary reflectors. The other columns of A are unchanged. +// +// tau will contain the scalar factors of the elementary reflectors. It must +// have length at least nb. +// +// t and ldt represent the nb×nb upper triangular matrix T, and y and ldy +// represent the n×nb matrix Y. +// +// Dlahr2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlahr2(n, k, nb int, a []float64, lda int, tau, t []float64, ldt int, y []float64, ldy int) { + switch { + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case nb < 0: + panic(nbLT0) + case nb > n: + panic(nbGTN) + case lda < max(1, n-k+1): + panic(badLdA) + case ldt < max(1, nb): + panic(badLdT) + case ldy < max(1, nb): + panic(badLdY) + } + + // Quick return if possible. + if n < 0 { + return + } + + switch { + case len(a) < (n-1)*lda+n-k+1: + panic(shortA) + case len(tau) < nb: + panic(shortTau) + case len(t) < (nb-1)*ldt+nb: + panic(shortT) + case len(y) < (n-1)*ldy+nb: + panic(shortY) + } + + // Quick return if possible. + if n == 1 { + return + } + + bi := blas64.Implementation() + var ei float64 + for i := 0; i < nb; i++ { + if i > 0 { + // Update A[k:n,i]. + + // Update i-th column of A - Y * V^T. + bi.Dgemv(blas.NoTrans, n-k, i, + -1, y[k*ldy:], ldy, + a[(k+i-1)*lda:], 1, + 1, a[k*lda+i:], lda) + + // Apply I - V * T^T * V^T to this column (call it b) + // from the left, using the last column of T as + // workspace. + // Let V = [ V1 ] and b = [ b1 ] (first i rows) + // [ V2 ] [ b2 ] + // where V1 is unit lower triangular. + // + // w := V1^T * b1. + bi.Dcopy(i, a[k*lda+i:], lda, t[nb-1:], ldt) + bi.Dtrmv(blas.Lower, blas.Trans, blas.Unit, i, + a[k*lda:], lda, t[nb-1:], ldt) + + // w := w + V2^T * b2. + bi.Dgemv(blas.Trans, n-k-i, i, + 1, a[(k+i)*lda:], lda, + a[(k+i)*lda+i:], lda, + 1, t[nb-1:], ldt) + + // w := T^T * w. + bi.Dtrmv(blas.Upper, blas.Trans, blas.NonUnit, i, + t, ldt, t[nb-1:], ldt) + + // b2 := b2 - V2*w. + bi.Dgemv(blas.NoTrans, n-k-i, i, + -1, a[(k+i)*lda:], lda, + t[nb-1:], ldt, + 1, a[(k+i)*lda+i:], lda) + + // b1 := b1 - V1*w. + bi.Dtrmv(blas.Lower, blas.NoTrans, blas.Unit, i, + a[k*lda:], lda, t[nb-1:], ldt) + bi.Daxpy(i, -1, t[nb-1:], ldt, a[k*lda+i:], lda) + + a[(k+i-1)*lda+i-1] = ei + } + + // Generate the elementary reflector H_i to annihilate + // A[k+i+1:n,i]. + ei, tau[i] = impl.Dlarfg(n-k-i, a[(k+i)*lda+i], a[min(k+i+1, n-1)*lda+i:], lda) + a[(k+i)*lda+i] = 1 + + // Compute Y[k:n,i]. + bi.Dgemv(blas.NoTrans, n-k, n-k-i, + 1, a[k*lda+i+1:], lda, + a[(k+i)*lda+i:], lda, + 0, y[k*ldy+i:], ldy) + bi.Dgemv(blas.Trans, n-k-i, i, + 1, a[(k+i)*lda:], lda, + a[(k+i)*lda+i:], lda, + 0, t[i:], ldt) + bi.Dgemv(blas.NoTrans, n-k, i, + -1, y[k*ldy:], ldy, + t[i:], ldt, + 1, y[k*ldy+i:], ldy) + bi.Dscal(n-k, tau[i], y[k*ldy+i:], ldy) + + // Compute T[0:i,i]. + bi.Dscal(i, -tau[i], t[i:], ldt) + bi.Dtrmv(blas.Upper, blas.NoTrans, blas.NonUnit, i, + t, ldt, t[i:], ldt) + + t[i*ldt+i] = tau[i] + } + a[(k+nb-1)*lda+nb-1] = ei + + // Compute Y[0:k,0:nb]. + impl.Dlacpy(blas.All, k, nb, a[1:], lda, y, ldy) + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, k, nb, + 1, a[k*lda:], lda, y, ldy) + if n > k+nb { + bi.Dgemm(blas.NoTrans, blas.NoTrans, k, nb, n-k-nb, + 1, a[1+nb:], lda, + a[(k+nb)*lda:], lda, + 1, y, ldy) + } + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.NonUnit, k, nb, + 1, t, ldt, y, ldy) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaln2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaln2.go new file mode 100644 index 0000000000..ca0b2f78c8 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaln2.go @@ -0,0 +1,405 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlaln2 solves a linear equation or a system of 2 linear equations of the form +// (ca A - w D) X = scale B, if trans == false, +// (ca A^T - w D) X = scale B, if trans == true, +// where A is a na×na real matrix, ca is a real scalar, D is a na×na diagonal +// real matrix, w is a scalar, real if nw == 1, complex if nw == 2, and X and B +// are na×1 matrices, real if w is real, complex if w is complex. +// +// If w is complex, X and B are represented as na×2 matrices, the first column +// of each being the real part and the second being the imaginary part. +// +// na and nw must be 1 or 2, otherwise Dlaln2 will panic. +// +// d1 and d2 are the diagonal elements of D. d2 is not used if na == 1. +// +// wr and wi represent the real and imaginary part, respectively, of the scalar +// w. wi is not used if nw == 1. +// +// smin is the desired lower bound on the singular values of A. This should be +// a safe distance away from underflow or overflow, say, between +// (underflow/machine precision) and (overflow*machine precision). +// +// If both singular values of (ca A - w D) are less than smin, smin*identity +// will be used instead of (ca A - w D). If only one singular value is less than +// smin, one element of (ca A - w D) will be perturbed enough to make the +// smallest singular value roughly smin. If both singular values are at least +// smin, (ca A - w D) will not be perturbed. In any case, the perturbation will +// be at most some small multiple of max(smin, ulp*norm(ca A - w D)). The +// singular values are computed by infinity-norm approximations, and thus will +// only be correct to a factor of 2 or so. +// +// All input quantities are assumed to be smaller than overflow by a reasonable +// factor. +// +// scale is a scaling factor less than or equal to 1 which is chosen so that X +// can be computed without overflow. X is further scaled if necessary to assure +// that norm(ca A - w D)*norm(X) is less than overflow. +// +// xnorm contains the infinity-norm of X when X is regarded as a na×nw real +// matrix. +// +// ok will be false if (ca A - w D) had to be perturbed to make its smallest +// singular value greater than smin, otherwise ok will be true. +// +// Dlaln2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaln2(trans bool, na, nw int, smin, ca float64, a []float64, lda int, d1, d2 float64, b []float64, ldb int, wr, wi float64, x []float64, ldx int) (scale, xnorm float64, ok bool) { + // TODO(vladimir-ch): Consider splitting this function into two, one + // handling the real case (nw == 1) and the other handling the complex + // case (nw == 2). Given that Go has complex types, their signatures + // would be simpler and more natural, and the implementation not as + // convoluted. + + switch { + case na != 1 && na != 2: + panic(badNa) + case nw != 1 && nw != 2: + panic(badNw) + case lda < na: + panic(badLdA) + case len(a) < (na-1)*lda+na: + panic(shortA) + case ldb < nw: + panic(badLdB) + case len(b) < (na-1)*ldb+nw: + panic(shortB) + case ldx < nw: + panic(badLdX) + case len(x) < (na-1)*ldx+nw: + panic(shortX) + } + + smlnum := 2 * dlamchS + bignum := 1 / smlnum + smini := math.Max(smin, smlnum) + + ok = true + scale = 1 + + if na == 1 { + // 1×1 (i.e., scalar) system C X = B. + + if nw == 1 { + // Real 1×1 system. + + // C = ca A - w D. + csr := ca*a[0] - wr*d1 + cnorm := math.Abs(csr) + + // If |C| < smini, use C = smini. + if cnorm < smini { + csr = smini + cnorm = smini + ok = false + } + + // Check scaling for X = B / C. + bnorm := math.Abs(b[0]) + if cnorm < 1 && bnorm > math.Max(1, bignum*cnorm) { + scale = 1 / bnorm + } + + // Compute X. + x[0] = b[0] * scale / csr + xnorm = math.Abs(x[0]) + + return scale, xnorm, ok + } + + // Complex 1×1 system (w is complex). + + // C = ca A - w D. + csr := ca*a[0] - wr*d1 + csi := -wi * d1 + cnorm := math.Abs(csr) + math.Abs(csi) + + // If |C| < smini, use C = smini. + if cnorm < smini { + csr = smini + csi = 0 + cnorm = smini + ok = false + } + + // Check scaling for X = B / C. + bnorm := math.Abs(b[0]) + math.Abs(b[1]) + if cnorm < 1 && bnorm > math.Max(1, bignum*cnorm) { + scale = 1 / bnorm + } + + // Compute X. + cx := complex(scale*b[0], scale*b[1]) / complex(csr, csi) + x[0], x[1] = real(cx), imag(cx) + xnorm = math.Abs(x[0]) + math.Abs(x[1]) + + return scale, xnorm, ok + } + + // 2×2 system. + + // Compute the real part of + // C = ca A - w D + // or + // C = ca A^T - w D. + crv := [4]float64{ + ca*a[0] - wr*d1, + ca * a[1], + ca * a[lda], + ca*a[lda+1] - wr*d2, + } + if trans { + crv[1] = ca * a[lda] + crv[2] = ca * a[1] + } + + pivot := [4][4]int{ + {0, 1, 2, 3}, + {1, 0, 3, 2}, + {2, 3, 0, 1}, + {3, 2, 1, 0}, + } + + if nw == 1 { + // Real 2×2 system (w is real). + + // Find the largest element in C. + var cmax float64 + var icmax int + for j, v := range crv { + v = math.Abs(v) + if v > cmax { + cmax = v + icmax = j + } + } + + // If norm(C) < smini, use smini*identity. + if cmax < smini { + bnorm := math.Max(math.Abs(b[0]), math.Abs(b[ldb])) + if smini < 1 && bnorm > math.Max(1, bignum*smini) { + scale = 1 / bnorm + } + temp := scale / smini + x[0] = temp * b[0] + x[ldx] = temp * b[ldb] + xnorm = temp * bnorm + ok = false + + return scale, xnorm, ok + } + + // Gaussian elimination with complete pivoting. + // Form upper triangular matrix + // [ur11 ur12] + // [ 0 ur22] + ur11 := crv[icmax] + ur12 := crv[pivot[icmax][1]] + cr21 := crv[pivot[icmax][2]] + cr22 := crv[pivot[icmax][3]] + ur11r := 1 / ur11 + lr21 := ur11r * cr21 + ur22 := cr22 - ur12*lr21 + + // If smaller pivot < smini, use smini. + if math.Abs(ur22) < smini { + ur22 = smini + ok = false + } + + var br1, br2 float64 + if icmax > 1 { + // If the pivot lies in the second row, swap the rows. + br1 = b[ldb] + br2 = b[0] + } else { + br1 = b[0] + br2 = b[ldb] + } + br2 -= lr21 * br1 // Apply the Gaussian elimination step to the right-hand side. + + bbnd := math.Max(math.Abs(ur22*ur11r*br1), math.Abs(br2)) + if bbnd > 1 && math.Abs(ur22) < 1 && bbnd >= bignum*math.Abs(ur22) { + scale = 1 / bbnd + } + + // Solve the linear system ur*xr=br. + xr2 := br2 * scale / ur22 + xr1 := scale*br1*ur11r - ur11r*ur12*xr2 + if icmax&0x1 != 0 { + // If the pivot lies in the second column, swap the components of the solution. + x[0] = xr2 + x[ldx] = xr1 + } else { + x[0] = xr1 + x[ldx] = xr2 + } + xnorm = math.Max(math.Abs(xr1), math.Abs(xr2)) + + // Further scaling if norm(A)*norm(X) > overflow. + if xnorm > 1 && cmax > 1 && xnorm > bignum/cmax { + temp := cmax / bignum + x[0] *= temp + x[ldx] *= temp + xnorm *= temp + scale *= temp + } + + return scale, xnorm, ok + } + + // Complex 2×2 system (w is complex). + + // Find the largest element in C. + civ := [4]float64{ + -wi * d1, + 0, + 0, + -wi * d2, + } + var cmax float64 + var icmax int + for j, v := range crv { + v := math.Abs(v) + if v+math.Abs(civ[j]) > cmax { + cmax = v + math.Abs(civ[j]) + icmax = j + } + } + + // If norm(C) < smini, use smini*identity. + if cmax < smini { + br1 := math.Abs(b[0]) + math.Abs(b[1]) + br2 := math.Abs(b[ldb]) + math.Abs(b[ldb+1]) + bnorm := math.Max(br1, br2) + if smini < 1 && bnorm > 1 && bnorm > bignum*smini { + scale = 1 / bnorm + } + temp := scale / smini + x[0] = temp * b[0] + x[1] = temp * b[1] + x[ldb] = temp * b[ldb] + x[ldb+1] = temp * b[ldb+1] + xnorm = temp * bnorm + ok = false + + return scale, xnorm, ok + } + + // Gaussian elimination with complete pivoting. + ur11 := crv[icmax] + ui11 := civ[icmax] + ur12 := crv[pivot[icmax][1]] + ui12 := civ[pivot[icmax][1]] + cr21 := crv[pivot[icmax][2]] + ci21 := civ[pivot[icmax][2]] + cr22 := crv[pivot[icmax][3]] + ci22 := civ[pivot[icmax][3]] + var ( + ur11r, ui11r float64 + lr21, li21 float64 + ur12s, ui12s float64 + ur22, ui22 float64 + ) + if icmax == 0 || icmax == 3 { + // Off-diagonals of pivoted C are real. + if math.Abs(ur11) > math.Abs(ui11) { + temp := ui11 / ur11 + ur11r = 1 / (ur11 * (1 + temp*temp)) + ui11r = -temp * ur11r + } else { + temp := ur11 / ui11 + ui11r = -1 / (ui11 * (1 + temp*temp)) + ur11r = -temp * ui11r + } + lr21 = cr21 * ur11r + li21 = cr21 * ui11r + ur12s = ur12 * ur11r + ui12s = ur12 * ui11r + ur22 = cr22 - ur12*lr21 + ui22 = ci22 - ur12*li21 + } else { + // Diagonals of pivoted C are real. + ur11r = 1 / ur11 + // ui11r is already 0. + lr21 = cr21 * ur11r + li21 = ci21 * ur11r + ur12s = ur12 * ur11r + ui12s = ui12 * ur11r + ur22 = cr22 - ur12*lr21 + ui12*li21 + ui22 = -ur12*li21 - ui12*lr21 + } + u22abs := math.Abs(ur22) + math.Abs(ui22) + + // If smaller pivot < smini, use smini. + if u22abs < smini { + ur22 = smini + ui22 = 0 + ok = false + } + + var br1, bi1 float64 + var br2, bi2 float64 + if icmax > 1 { + // If the pivot lies in the second row, swap the rows. + br1 = b[ldb] + bi1 = b[ldb+1] + br2 = b[0] + bi2 = b[1] + } else { + br1 = b[0] + bi1 = b[1] + br2 = b[ldb] + bi2 = b[ldb+1] + } + br2 += -lr21*br1 + li21*bi1 + bi2 += -li21*br1 - lr21*bi1 + + bbnd1 := u22abs * (math.Abs(ur11r) + math.Abs(ui11r)) * (math.Abs(br1) + math.Abs(bi1)) + bbnd2 := math.Abs(br2) + math.Abs(bi2) + bbnd := math.Max(bbnd1, bbnd2) + if bbnd > 1 && u22abs < 1 && bbnd >= bignum*u22abs { + scale = 1 / bbnd + br1 *= scale + bi1 *= scale + br2 *= scale + bi2 *= scale + } + + cx2 := complex(br2, bi2) / complex(ur22, ui22) + xr2, xi2 := real(cx2), imag(cx2) + xr1 := ur11r*br1 - ui11r*bi1 - ur12s*xr2 + ui12s*xi2 + xi1 := ui11r*br1 + ur11r*bi1 - ui12s*xr2 - ur12s*xi2 + if icmax&0x1 != 0 { + // If the pivot lies in the second column, swap the components of the solution. + x[0] = xr2 + x[1] = xi2 + x[ldx] = xr1 + x[ldx+1] = xi1 + } else { + x[0] = xr1 + x[1] = xi1 + x[ldx] = xr2 + x[ldx+1] = xi2 + } + xnorm = math.Max(math.Abs(xr1)+math.Abs(xi1), math.Abs(xr2)+math.Abs(xi2)) + + // Further scaling if norm(A)*norm(X) > overflow. + if xnorm > 1 && cmax > 1 && xnorm > bignum/cmax { + temp := cmax / bignum + x[0] *= temp + x[1] *= temp + x[ldx] *= temp + x[ldx+1] *= temp + xnorm *= temp + scale *= temp + } + + return scale, xnorm, ok +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlange.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlange.go new file mode 100644 index 0000000000..9edfa83c43 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlange.go @@ -0,0 +1,89 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/lapack" +) + +// Dlange computes the matrix norm of the general m×n matrix a. The input norm +// specifies the norm computed. +// lapack.MaxAbs: the maximum absolute value of an element. +// lapack.MaxColumnSum: the maximum column sum of the absolute values of the entries. +// lapack.MaxRowSum: the maximum row sum of the absolute values of the entries. +// lapack.Frobenius: the square root of the sum of the squares of the entries. +// If norm == lapack.MaxColumnSum, work must be of length n, and this function will panic otherwise. +// There are no restrictions on work for the other matrix norms. +func (impl Implementation) Dlange(norm lapack.MatrixNorm, m, n int, a []float64, lda int, work []float64) float64 { + // TODO(btracey): These should probably be refactored to use BLAS calls. + switch { + case norm != lapack.MaxRowSum && norm != lapack.MaxColumnSum && norm != lapack.Frobenius && norm != lapack.MaxAbs: + panic(badNorm) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return 0 + } + + switch { + case len(a) < (m-1)*lda+n: + panic(badLdA) + case norm == lapack.MaxColumnSum && len(work) < n: + panic(shortWork) + } + + if norm == lapack.MaxAbs { + var value float64 + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + value = math.Max(value, math.Abs(a[i*lda+j])) + } + } + return value + } + if norm == lapack.MaxColumnSum { + if len(work) < n { + panic(shortWork) + } + for i := 0; i < n; i++ { + work[i] = 0 + } + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + work[j] += math.Abs(a[i*lda+j]) + } + } + var value float64 + for i := 0; i < n; i++ { + value = math.Max(value, work[i]) + } + return value + } + if norm == lapack.MaxRowSum { + var value float64 + for i := 0; i < m; i++ { + var sum float64 + for j := 0; j < n; j++ { + sum += math.Abs(a[i*lda+j]) + } + value = math.Max(value, sum) + } + return value + } + // norm == lapack.Frobenius + var value float64 + scale := 0.0 + sum := 1.0 + for i := 0; i < m; i++ { + scale, sum = impl.Dlassq(n, a[i*lda:], 1, scale, sum) + } + value = scale * math.Sqrt(sum) + return value +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlanst.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlanst.go new file mode 100644 index 0000000000..9ca1897e34 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlanst.go @@ -0,0 +1,75 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/lapack" +) + +// Dlanst computes the specified norm of a symmetric tridiagonal matrix A. +// The diagonal elements of A are stored in d and the off-diagonal elements +// are stored in e. +func (impl Implementation) Dlanst(norm lapack.MatrixNorm, n int, d, e []float64) float64 { + switch { + case norm != lapack.MaxRowSum && norm != lapack.MaxColumnSum && norm != lapack.Frobenius && norm != lapack.MaxAbs: + panic(badNorm) + case n < 0: + panic(nLT0) + } + if n == 0 { + return 0 + } + switch { + case len(d) < n: + panic(shortD) + case len(e) < n-1: + panic(shortE) + } + + switch norm { + default: + panic(badNorm) + case lapack.MaxAbs: + anorm := math.Abs(d[n-1]) + for i := 0; i < n-1; i++ { + sum := math.Abs(d[i]) + if anorm < sum || math.IsNaN(sum) { + anorm = sum + } + sum = math.Abs(e[i]) + if anorm < sum || math.IsNaN(sum) { + anorm = sum + } + } + return anorm + case lapack.MaxColumnSum, lapack.MaxRowSum: + if n == 1 { + return math.Abs(d[0]) + } + anorm := math.Abs(d[0]) + math.Abs(e[0]) + sum := math.Abs(e[n-2]) + math.Abs(d[n-1]) + if anorm < sum || math.IsNaN(sum) { + anorm = sum + } + for i := 1; i < n-1; i++ { + sum := math.Abs(d[i]) + math.Abs(e[i]) + math.Abs(e[i-1]) + if anorm < sum || math.IsNaN(sum) { + anorm = sum + } + } + return anorm + case lapack.Frobenius: + var scale float64 + sum := 1.0 + if n > 1 { + scale, sum = impl.Dlassq(n-1, e, 1, scale, sum) + sum = 2 * sum + } + scale, sum = impl.Dlassq(n, d, 1, scale, sum) + return scale * math.Sqrt(sum) + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlansy.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlansy.go new file mode 100644 index 0000000000..97ba5b2438 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlansy.go @@ -0,0 +1,132 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dlansy computes the specified norm of an n×n symmetric matrix. If +// norm == lapack.MaxColumnSum or norm == lapackMaxRowSum work must have length +// at least n, otherwise work is unused. +func (impl Implementation) Dlansy(norm lapack.MatrixNorm, uplo blas.Uplo, n int, a []float64, lda int, work []float64) float64 { + switch { + case norm != lapack.MaxRowSum && norm != lapack.MaxColumnSum && norm != lapack.Frobenius && norm != lapack.MaxAbs: + panic(badNorm) + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if n == 0 { + return 0 + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case (norm == lapack.MaxColumnSum || norm == lapack.MaxRowSum) && len(work) < n: + panic(shortWork) + } + + switch norm { + default: + panic(badNorm) + case lapack.MaxAbs: + if uplo == blas.Upper { + var max float64 + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + v := math.Abs(a[i*lda+j]) + if math.IsNaN(v) { + return math.NaN() + } + if v > max { + max = v + } + } + } + return max + } + var max float64 + for i := 0; i < n; i++ { + for j := 0; j <= i; j++ { + v := math.Abs(a[i*lda+j]) + if math.IsNaN(v) { + return math.NaN() + } + if v > max { + max = v + } + } + } + return max + case lapack.MaxRowSum, lapack.MaxColumnSum: + // A symmetric matrix has the same 1-norm and ∞-norm. + for i := 0; i < n; i++ { + work[i] = 0 + } + if uplo == blas.Upper { + for i := 0; i < n; i++ { + work[i] += math.Abs(a[i*lda+i]) + for j := i + 1; j < n; j++ { + v := math.Abs(a[i*lda+j]) + work[i] += v + work[j] += v + } + } + } else { + for i := 0; i < n; i++ { + for j := 0; j < i; j++ { + v := math.Abs(a[i*lda+j]) + work[i] += v + work[j] += v + } + work[i] += math.Abs(a[i*lda+i]) + } + } + var max float64 + for i := 0; i < n; i++ { + v := work[i] + if math.IsNaN(v) { + return math.NaN() + } + if v > max { + max = v + } + } + return max + case lapack.Frobenius: + if uplo == blas.Upper { + var sum float64 + for i := 0; i < n; i++ { + v := a[i*lda+i] + sum += v * v + for j := i + 1; j < n; j++ { + v := a[i*lda+j] + sum += 2 * v * v + } + } + return math.Sqrt(sum) + } + var sum float64 + for i := 0; i < n; i++ { + for j := 0; j < i; j++ { + v := a[i*lda+j] + sum += 2 * v * v + } + v := a[i*lda+i] + sum += v * v + } + return math.Sqrt(sum) + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlantr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlantr.go new file mode 100644 index 0000000000..cc96391d94 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlantr.go @@ -0,0 +1,260 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dlantr computes the specified norm of an m×n trapezoidal matrix A. If +// norm == lapack.MaxColumnSum work must have length at least n, otherwise work +// is unused. +func (impl Implementation) Dlantr(norm lapack.MatrixNorm, uplo blas.Uplo, diag blas.Diag, m, n int, a []float64, lda int, work []float64) float64 { + switch { + case norm != lapack.MaxRowSum && norm != lapack.MaxColumnSum && norm != lapack.Frobenius && norm != lapack.MaxAbs: + panic(badNorm) + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case diag != blas.Unit && diag != blas.NonUnit: + panic(badDiag) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + minmn := min(m, n) + if minmn == 0 { + return 0 + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case norm == lapack.MaxColumnSum && len(work) < n: + panic(shortWork) + } + + switch norm { + default: + panic(badNorm) + case lapack.MaxAbs: + if diag == blas.Unit { + value := 1.0 + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i + 1; j < n; j++ { + tmp := math.Abs(a[i*lda+j]) + if math.IsNaN(tmp) { + return tmp + } + if tmp > value { + value = tmp + } + } + } + return value + } + for i := 1; i < m; i++ { + for j := 0; j < min(i, n); j++ { + tmp := math.Abs(a[i*lda+j]) + if math.IsNaN(tmp) { + return tmp + } + if tmp > value { + value = tmp + } + } + } + return value + } + var value float64 + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i; j < n; j++ { + tmp := math.Abs(a[i*lda+j]) + if math.IsNaN(tmp) { + return tmp + } + if tmp > value { + value = tmp + } + } + } + return value + } + for i := 0; i < m; i++ { + for j := 0; j <= min(i, n-1); j++ { + tmp := math.Abs(a[i*lda+j]) + if math.IsNaN(tmp) { + return tmp + } + if tmp > value { + value = tmp + } + } + } + return value + case lapack.MaxColumnSum: + if diag == blas.Unit { + for i := 0; i < minmn; i++ { + work[i] = 1 + } + for i := minmn; i < n; i++ { + work[i] = 0 + } + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i + 1; j < n; j++ { + work[j] += math.Abs(a[i*lda+j]) + } + } + } else { + for i := 1; i < m; i++ { + for j := 0; j < min(i, n); j++ { + work[j] += math.Abs(a[i*lda+j]) + } + } + } + } else { + for i := 0; i < n; i++ { + work[i] = 0 + } + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i; j < n; j++ { + work[j] += math.Abs(a[i*lda+j]) + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j <= min(i, n-1); j++ { + work[j] += math.Abs(a[i*lda+j]) + } + } + } + } + var max float64 + for _, v := range work[:n] { + if math.IsNaN(v) { + return math.NaN() + } + if v > max { + max = v + } + } + return max + case lapack.MaxRowSum: + var maxsum float64 + if diag == blas.Unit { + if uplo == blas.Upper { + for i := 0; i < m; i++ { + var sum float64 + if i < minmn { + sum = 1 + } + for j := i + 1; j < n; j++ { + sum += math.Abs(a[i*lda+j]) + } + if math.IsNaN(sum) { + return math.NaN() + } + if sum > maxsum { + maxsum = sum + } + } + return maxsum + } else { + for i := 1; i < m; i++ { + var sum float64 + if i < minmn { + sum = 1 + } + for j := 0; j < min(i, n); j++ { + sum += math.Abs(a[i*lda+j]) + } + if math.IsNaN(sum) { + return math.NaN() + } + if sum > maxsum { + maxsum = sum + } + } + return maxsum + } + } else { + if uplo == blas.Upper { + for i := 0; i < m; i++ { + var sum float64 + for j := i; j < n; j++ { + sum += math.Abs(a[i*lda+j]) + } + if math.IsNaN(sum) { + return sum + } + if sum > maxsum { + maxsum = sum + } + } + return maxsum + } else { + for i := 0; i < m; i++ { + var sum float64 + for j := 0; j <= min(i, n-1); j++ { + sum += math.Abs(a[i*lda+j]) + } + if math.IsNaN(sum) { + return sum + } + if sum > maxsum { + maxsum = sum + } + } + return maxsum + } + } + case lapack.Frobenius: + var nrm float64 + if diag == blas.Unit { + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i + 1; j < n; j++ { + tmp := a[i*lda+j] + nrm += tmp * tmp + } + } + } else { + for i := 1; i < m; i++ { + for j := 0; j < min(i, n); j++ { + tmp := a[i*lda+j] + nrm += tmp * tmp + } + } + } + nrm += float64(minmn) + } else { + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i; j < n; j++ { + tmp := math.Abs(a[i*lda+j]) + nrm += tmp * tmp + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j <= min(i, n-1); j++ { + tmp := math.Abs(a[i*lda+j]) + nrm += tmp * tmp + } + } + } + } + return math.Sqrt(nrm) + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlanv2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlanv2.go new file mode 100644 index 0000000000..e5dcfb7522 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlanv2.go @@ -0,0 +1,132 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlanv2 computes the Schur factorization of a real 2×2 matrix: +// [ a b ] = [ cs -sn ] * [ aa bb ] * [ cs sn ] +// [ c d ] [ sn cs ] [ cc dd ] * [-sn cs ] +// If cc is zero, aa and dd are real eigenvalues of the matrix. Otherwise it +// holds that aa = dd and bb*cc < 0, and aa ± sqrt(bb*cc) are complex conjugate +// eigenvalues. The real and imaginary parts of the eigenvalues are returned in +// (rt1r,rt1i) and (rt2r,rt2i). +func (impl Implementation) Dlanv2(a, b, c, d float64) (aa, bb, cc, dd float64, rt1r, rt1i, rt2r, rt2i float64, cs, sn float64) { + switch { + case c == 0: // Matrix is already upper triangular. + aa = a + bb = b + cc = 0 + dd = d + cs = 1 + sn = 0 + case b == 0: // Matrix is lower triangular, swap rows and columns. + aa = d + bb = -c + cc = 0 + dd = a + cs = 0 + sn = 1 + case a == d && math.Signbit(b) != math.Signbit(c): // Matrix is already in the standard Schur form. + aa = a + bb = b + cc = c + dd = d + cs = 1 + sn = 0 + default: + temp := a - d + p := temp / 2 + bcmax := math.Max(math.Abs(b), math.Abs(c)) + bcmis := math.Min(math.Abs(b), math.Abs(c)) + if b*c < 0 { + bcmis *= -1 + } + scale := math.Max(math.Abs(p), bcmax) + z := p/scale*p + bcmax/scale*bcmis + eps := dlamchP + + if z >= 4*eps { + // Real eigenvalues. Compute aa and dd. + if p > 0 { + z = p + math.Sqrt(scale)*math.Sqrt(z) + } else { + z = p - math.Sqrt(scale)*math.Sqrt(z) + } + aa = d + z + dd = d - bcmax/z*bcmis + // Compute bb and the rotation matrix. + tau := impl.Dlapy2(c, z) + cs = z / tau + sn = c / tau + bb = b - c + cc = 0 + } else { + // Complex eigenvalues, or real (almost) equal eigenvalues. + // Make diagonal elements equal. + sigma := b + c + tau := impl.Dlapy2(sigma, temp) + cs = math.Sqrt((1 + math.Abs(sigma)/tau) / 2) + sn = -p / (tau * cs) + if sigma < 0 { + sn *= -1 + } + // Compute [ aa bb ] = [ a b ] [ cs -sn ] + // [ cc dd ] [ c d ] [ sn cs ] + aa = a*cs + b*sn + bb = -a*sn + b*cs + cc = c*cs + d*sn + dd = -c*sn + d*cs + // Compute [ a b ] = [ cs sn ] [ aa bb ] + // [ c d ] [-sn cs ] [ cc dd ] + a = aa*cs + cc*sn + b = bb*cs + dd*sn + c = -aa*sn + cc*cs + d = -bb*sn + dd*cs + + temp = (a + d) / 2 + aa = temp + bb = b + cc = c + dd = temp + + if cc != 0 { + if bb != 0 { + if math.Signbit(bb) == math.Signbit(cc) { + // Real eigenvalues, reduce to + // upper triangular form. + sab := math.Sqrt(math.Abs(bb)) + sac := math.Sqrt(math.Abs(cc)) + p = sab * sac + if cc < 0 { + p *= -1 + } + tau = 1 / math.Sqrt(math.Abs(bb+cc)) + aa = temp + p + bb = bb - cc + cc = 0 + dd = temp - p + cs1 := sab * tau + sn1 := sac * tau + cs, sn = cs*cs1-sn*sn1, cs*sn1+sn+cs1 + } + } else { + bb = -cc + cc = 0 + cs, sn = -sn, cs + } + } + } + } + + // Store eigenvalues in (rt1r,rt1i) and (rt2r,rt2i). + rt1r = aa + rt2r = dd + if cc != 0 { + rt1i = math.Sqrt(math.Abs(bb)) * math.Sqrt(math.Abs(cc)) + rt2i = -rt1i + } + return +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlapll.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlapll.go new file mode 100644 index 0000000000..bf98c338eb --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlapll.go @@ -0,0 +1,55 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas/blas64" + +// Dlapll returns the smallest singular value of the n×2 matrix A = [ x y ]. +// The function first computes the QR factorization of A = Q*R, and then computes +// the SVD of the 2-by-2 upper triangular matrix r. +// +// The contents of x and y are overwritten during the call. +// +// Dlapll is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlapll(n int, x []float64, incX int, y []float64, incY int) float64 { + switch { + case n < 0: + panic(nLT0) + case incX <= 0: + panic(badIncX) + case incY <= 0: + panic(badIncY) + } + + // Quick return if possible. + if n == 0 { + return 0 + } + + switch { + case len(x) < 1+(n-1)*incX: + panic(shortX) + case len(y) < 1+(n-1)*incY: + panic(shortY) + } + + // Quick return if possible. + if n == 1 { + return 0 + } + + // Compute the QR factorization of the N-by-2 matrix [ X Y ]. + a00, tau := impl.Dlarfg(n, x[0], x[incX:], incX) + x[0] = 1 + + bi := blas64.Implementation() + c := -tau * bi.Ddot(n, x, incX, y, incY) + bi.Daxpy(n, c, x, incX, y, incY) + a11, _ := impl.Dlarfg(n-1, y[incY], y[2*incY:], incY) + + // Compute the SVD of 2-by-2 upper triangular matrix. + ssmin, _ := impl.Dlas2(a00, y[0], a11) + return ssmin +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlapmt.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlapmt.go new file mode 100644 index 0000000000..55f1567f3a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlapmt.go @@ -0,0 +1,89 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas/blas64" + +// Dlapmt rearranges the columns of the m×n matrix X as specified by the +// permutation k_0, k_1, ..., k_n-1 of the integers 0, ..., n-1. +// +// If forward is true a forward permutation is performed: +// +// X[0:m, k[j]] is moved to X[0:m, j] for j = 0, 1, ..., n-1. +// +// otherwise a backward permutation is performed: +// +// X[0:m, j] is moved to X[0:m, k[j]] for j = 0, 1, ..., n-1. +// +// k must have length n, otherwise Dlapmt will panic. k is zero-indexed. +func (impl Implementation) Dlapmt(forward bool, m, n int, x []float64, ldx int, k []int) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case ldx < max(1, n): + panic(badLdX) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + switch { + case len(x) < (m-1)*ldx+n: + panic(shortX) + case len(k) != n: + panic(badLenK) + } + + // Quick return if possible. + if n == 1 { + return + } + + for i, v := range k { + v++ + k[i] = -v + } + + bi := blas64.Implementation() + + if forward { + for j, v := range k { + if v >= 0 { + continue + } + k[j] = -v + i := -v - 1 + for k[i] < 0 { + bi.Dswap(m, x[j:], ldx, x[i:], ldx) + + k[i] = -k[i] + j = i + i = k[i] - 1 + } + } + } else { + for i, v := range k { + if v >= 0 { + continue + } + k[i] = -v + j := -v - 1 + for j != i { + bi.Dswap(m, x[j:], ldx, x[i:], ldx) + + k[j] = -k[j] + j = k[j] - 1 + } + } + } + + for i := range k { + k[i]-- + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlapy2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlapy2.go new file mode 100644 index 0000000000..19f73ffabd --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlapy2.go @@ -0,0 +1,14 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlapy2 is the LAPACK version of math.Hypot. +// +// Dlapy2 is an internal routine. It is exported for testing purposes. +func (Implementation) Dlapy2(x, y float64) float64 { + return math.Hypot(x, y) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqp2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqp2.go new file mode 100644 index 0000000000..d3a0def639 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqp2.go @@ -0,0 +1,127 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlaqp2 computes a QR factorization with column pivoting of the block A[offset:m, 0:n] +// of the m×n matrix A. The block A[0:offset, 0:n] is accordingly pivoted, but not factorized. +// +// On exit, the upper triangle of block A[offset:m, 0:n] is the triangular factor obtained. +// The elements in block A[offset:m, 0:n] below the diagonal, together with tau, represent +// the orthogonal matrix Q as a product of elementary reflectors. +// +// offset is number of rows of the matrix A that must be pivoted but not factorized. +// offset must not be negative otherwise Dlaqp2 will panic. +// +// On exit, jpvt holds the permutation that was applied; the jth column of A*P was the +// jpvt[j] column of A. jpvt must have length n, otherwise Dlaqp2 will panic. +// +// On exit tau holds the scalar factors of the elementary reflectors. It must have length +// at least min(m-offset, n) otherwise Dlaqp2 will panic. +// +// vn1 and vn2 hold the partial and complete column norms respectively. They must have length n, +// otherwise Dlaqp2 will panic. +// +// work must have length n, otherwise Dlaqp2 will panic. +// +// Dlaqp2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaqp2(m, n, offset int, a []float64, lda int, jpvt []int, tau, vn1, vn2, work []float64) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case offset < 0: + panic(offsetLT0) + case offset > m: + panic(offsetGTM) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + mn := min(m-offset, n) + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(jpvt) != n: + panic(badLenJpvt) + case len(tau) < mn: + panic(shortTau) + case len(vn1) < n: + panic(shortVn1) + case len(vn2) < n: + panic(shortVn2) + case len(work) < n: + panic(shortWork) + } + + tol3z := math.Sqrt(dlamchE) + + bi := blas64.Implementation() + + // Compute factorization. + for i := 0; i < mn; i++ { + offpi := offset + i + + // Determine ith pivot column and swap if necessary. + p := i + bi.Idamax(n-i, vn1[i:], 1) + if p != i { + bi.Dswap(m, a[p:], lda, a[i:], lda) + jpvt[p], jpvt[i] = jpvt[i], jpvt[p] + vn1[p] = vn1[i] + vn2[p] = vn2[i] + } + + // Generate elementary reflector H_i. + if offpi < m-1 { + a[offpi*lda+i], tau[i] = impl.Dlarfg(m-offpi, a[offpi*lda+i], a[(offpi+1)*lda+i:], lda) + } else { + tau[i] = 0 + } + + if i < n-1 { + // Apply H_i^T to A[offset+i:m, i:n] from the left. + aii := a[offpi*lda+i] + a[offpi*lda+i] = 1 + impl.Dlarf(blas.Left, m-offpi, n-i-1, a[offpi*lda+i:], lda, tau[i], a[offpi*lda+i+1:], lda, work) + a[offpi*lda+i] = aii + } + + // Update partial column norms. + for j := i + 1; j < n; j++ { + if vn1[j] == 0 { + continue + } + + // The following marked lines follow from the + // analysis in Lapack Working Note 176. + r := math.Abs(a[offpi*lda+j]) / vn1[j] // * + temp := math.Max(0, 1-r*r) // * + r = vn1[j] / vn2[j] // * + temp2 := temp * r * r // * + if temp2 < tol3z { + var v float64 + if offpi < m-1 { + v = bi.Dnrm2(m-offpi-1, a[(offpi+1)*lda+j:], lda) + } + vn1[j] = v + vn2[j] = v + } else { + vn1[j] *= math.Sqrt(temp) // * + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqps.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqps.go new file mode 100644 index 0000000000..dd683b62ae --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqps.go @@ -0,0 +1,244 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlaqps computes a step of QR factorization with column pivoting +// of an m×n matrix A by using Blas-3. It tries to factorize nb +// columns from A starting from the row offset, and updates all +// of the matrix with Dgemm. +// +// In some cases, due to catastrophic cancellations, it cannot +// factorize nb columns. Hence, the actual number of factorized +// columns is returned in kb. +// +// Dlaqps computes a QR factorization with column pivoting of the +// block A[offset:m, 0:nb] of the m×n matrix A. The block +// A[0:offset, 0:n] is accordingly pivoted, but not factorized. +// +// On exit, the upper triangle of block A[offset:m, 0:kb] is the +// triangular factor obtained. The elements in block A[offset:m, 0:n] +// below the diagonal, together with tau, represent the orthogonal +// matrix Q as a product of elementary reflectors. +// +// offset is number of rows of the matrix A that must be pivoted but +// not factorized. offset must not be negative otherwise Dlaqps will panic. +// +// On exit, jpvt holds the permutation that was applied; the jth column +// of A*P was the jpvt[j] column of A. jpvt must have length n, +// otherwise Dlapqs will panic. +// +// On exit tau holds the scalar factors of the elementary reflectors. +// It must have length nb, otherwise Dlapqs will panic. +// +// vn1 and vn2 hold the partial and complete column norms respectively. +// They must have length n, otherwise Dlapqs will panic. +// +// auxv must have length nb, otherwise Dlaqps will panic. +// +// f and ldf represent an n×nb matrix F that is overwritten during the +// call. +// +// Dlaqps is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaqps(m, n, offset, nb int, a []float64, lda int, jpvt []int, tau, vn1, vn2, auxv, f []float64, ldf int) (kb int) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case offset < 0: + panic(offsetLT0) + case offset > m: + panic(offsetGTM) + case nb < 0: + panic(nbLT0) + case nb > n: + panic(nbGTN) + case lda < max(1, n): + panic(badLdA) + case ldf < max(1, nb): + panic(badLdF) + } + + if m == 0 || n == 0 { + return 0 + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(jpvt) != n: + panic(badLenJpvt) + case len(vn1) < n: + panic(shortVn1) + case len(vn2) < n: + panic(shortVn2) + } + + if nb == 0 { + return 0 + } + + switch { + case len(tau) < nb: + panic(shortTau) + case len(auxv) < nb: + panic(shortAuxv) + case len(f) < (n-1)*ldf+nb: + panic(shortF) + } + + if offset == m { + return 0 + } + + lastrk := min(m, n+offset) + lsticc := -1 + tol3z := math.Sqrt(dlamchE) + + bi := blas64.Implementation() + + var k, rk int + for ; k < nb && lsticc == -1; k++ { + rk = offset + k + + // Determine kth pivot column and swap if necessary. + p := k + bi.Idamax(n-k, vn1[k:], 1) + if p != k { + bi.Dswap(m, a[p:], lda, a[k:], lda) + bi.Dswap(k, f[p*ldf:], 1, f[k*ldf:], 1) + jpvt[p], jpvt[k] = jpvt[k], jpvt[p] + vn1[p] = vn1[k] + vn2[p] = vn2[k] + } + + // Apply previous Householder reflectors to column K: + // + // A[rk:m, k] = A[rk:m, k] - A[rk:m, 0:k-1]*F[k, 0:k-1]^T. + if k > 0 { + bi.Dgemv(blas.NoTrans, m-rk, k, -1, + a[rk*lda:], lda, + f[k*ldf:], 1, + 1, + a[rk*lda+k:], lda) + } + + // Generate elementary reflector H_k. + if rk < m-1 { + a[rk*lda+k], tau[k] = impl.Dlarfg(m-rk, a[rk*lda+k], a[(rk+1)*lda+k:], lda) + } else { + tau[k] = 0 + } + + akk := a[rk*lda+k] + a[rk*lda+k] = 1 + + // Compute kth column of F: + // + // Compute F[k+1:n, k] = tau[k]*A[rk:m, k+1:n]^T*A[rk:m, k]. + if k < n-1 { + bi.Dgemv(blas.Trans, m-rk, n-k-1, tau[k], + a[rk*lda+k+1:], lda, + a[rk*lda+k:], lda, + 0, + f[(k+1)*ldf+k:], ldf) + } + + // Padding F[0:k, k] with zeros. + for j := 0; j < k; j++ { + f[j*ldf+k] = 0 + } + + // Incremental updating of F: + // + // F[0:n, k] := F[0:n, k] - tau[k]*F[0:n, 0:k-1]*A[rk:m, 0:k-1]^T*A[rk:m,k]. + if k > 0 { + bi.Dgemv(blas.Trans, m-rk, k, -tau[k], + a[rk*lda:], lda, + a[rk*lda+k:], lda, + 0, + auxv, 1) + bi.Dgemv(blas.NoTrans, n, k, 1, + f, ldf, + auxv, 1, + 1, + f[k:], ldf) + } + + // Update the current row of A: + // + // A[rk, k+1:n] = A[rk, k+1:n] - A[rk, 0:k]*F[k+1:n, 0:k]^T. + if k < n-1 { + bi.Dgemv(blas.NoTrans, n-k-1, k+1, -1, + f[(k+1)*ldf:], ldf, + a[rk*lda:], 1, + 1, + a[rk*lda+k+1:], 1) + } + + // Update partial column norms. + if rk < lastrk-1 { + for j := k + 1; j < n; j++ { + if vn1[j] == 0 { + continue + } + + // The following marked lines follow from the + // analysis in Lapack Working Note 176. + r := math.Abs(a[rk*lda+j]) / vn1[j] // * + temp := math.Max(0, 1-r*r) // * + r = vn1[j] / vn2[j] // * + temp2 := temp * r * r // * + if temp2 < tol3z { + // vn2 is used here as a collection of + // indices into vn2 and also a collection + // of column norms. + vn2[j] = float64(lsticc) + lsticc = j + } else { + vn1[j] *= math.Sqrt(temp) // * + } + } + } + + a[rk*lda+k] = akk + } + kb = k + rk = offset + kb + + // Apply the block reflector to the rest of the matrix: + // + // A[offset+kb+1:m, kb+1:n] := A[offset+kb+1:m, kb+1:n] - A[offset+kb+1:m, 1:kb]*F[kb+1:n, 1:kb]^T. + if kb < min(n, m-offset) { + bi.Dgemm(blas.NoTrans, blas.Trans, + m-rk, n-kb, kb, -1, + a[rk*lda:], lda, + f[kb*ldf:], ldf, + 1, + a[rk*lda+kb:], lda) + } + + // Recomputation of difficult columns. + for lsticc >= 0 { + itemp := int(vn2[lsticc]) + + // NOTE: The computation of vn1[lsticc] relies on the fact that + // Dnrm2 does not fail on vectors with norm below the value of + // sqrt(dlamchS) + v := bi.Dnrm2(m-rk, a[rk*lda+lsticc:], lda) + vn1[lsticc] = v + vn2[lsticc] = v + + lsticc = itemp + } + + return kb +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr04.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr04.go new file mode 100644 index 0000000000..e9fbb6007e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr04.go @@ -0,0 +1,478 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" +) + +// Dlaqr04 computes the eigenvalues of a block of an n×n upper Hessenberg matrix +// H, and optionally the matrices T and Z from the Schur decomposition +// H = Z T Z^T +// where T is an upper quasi-triangular matrix (the Schur form), and Z is the +// orthogonal matrix of Schur vectors. +// +// wantt indicates whether the full Schur form T is required. If wantt is false, +// then only enough of H will be updated to preserve the eigenvalues. +// +// wantz indicates whether the n×n matrix of Schur vectors Z is required. If it +// is true, the orthogonal similarity transformation will be accumulated into +// Z[iloz:ihiz+1,ilo:ihi+1], otherwise Z will not be referenced. +// +// ilo and ihi determine the block of H on which Dlaqr04 operates. It must hold that +// 0 <= ilo <= ihi < n, if n > 0, +// ilo == 0 and ihi == -1, if n == 0, +// and the block must be isolated, that is, +// ilo == 0 or H[ilo,ilo-1] == 0, +// ihi == n-1 or H[ihi+1,ihi] == 0, +// otherwise Dlaqr04 will panic. +// +// wr and wi must have length ihi+1. +// +// iloz and ihiz specify the rows of Z to which transformations will be applied +// if wantz is true. It must hold that +// 0 <= iloz <= ilo, and ihi <= ihiz < n, +// otherwise Dlaqr04 will panic. +// +// work must have length at least lwork and lwork must be +// lwork >= 1, if n <= 11, +// lwork >= n, if n > 11, +// otherwise Dlaqr04 will panic. lwork as large as 6*n may be required for +// optimal performance. On return, work[0] will contain the optimal value of +// lwork. +// +// If lwork is -1, instead of performing Dlaqr04, the function only estimates the +// optimal workspace size and stores it into work[0]. Neither h nor z are +// accessed. +// +// recur is the non-negative recursion depth. For recur > 0, Dlaqr04 behaves +// as DLAQR0, for recur == 0 it behaves as DLAQR4. +// +// unconverged indicates whether Dlaqr04 computed all the eigenvalues of H[ilo:ihi+1,ilo:ihi+1]. +// +// If unconverged is zero and wantt is true, H will contain on return the upper +// quasi-triangular matrix T from the Schur decomposition. 2×2 diagonal blocks +// (corresponding to complex conjugate pairs of eigenvalues) will be returned in +// standard form, with H[i,i] == H[i+1,i+1] and H[i+1,i]*H[i,i+1] < 0. +// +// If unconverged is zero and if wantt is false, the contents of h on return is +// unspecified. +// +// If unconverged is zero, all the eigenvalues have been computed and their real +// and imaginary parts will be stored on return in wr[ilo:ihi+1] and +// wi[ilo:ihi+1], respectively. If two eigenvalues are computed as a complex +// conjugate pair, they are stored in consecutive elements of wr and wi, say the +// i-th and (i+1)th, with wi[i] > 0 and wi[i+1] < 0. If wantt is true, then the +// eigenvalues are stored in the same order as on the diagonal of the Schur form +// returned in H, with wr[i] = H[i,i] and, if H[i:i+2,i:i+2] is a 2×2 diagonal +// block, wi[i] = sqrt(-H[i+1,i]*H[i,i+1]) and wi[i+1] = -wi[i]. +// +// If unconverged is positive, some eigenvalues have not converged, and +// wr[unconverged:ihi+1] and wi[unconverged:ihi+1] will contain those +// eigenvalues which have been successfully computed. Failures are rare. +// +// If unconverged is positive and wantt is true, then on return +// (initial H)*U = U*(final H), (*) +// where U is an orthogonal matrix. The final H is upper Hessenberg and +// H[unconverged:ihi+1,unconverged:ihi+1] is upper quasi-triangular. +// +// If unconverged is positive and wantt is false, on return the remaining +// unconverged eigenvalues are the eigenvalues of the upper Hessenberg matrix +// H[ilo:unconverged,ilo:unconverged]. +// +// If unconverged is positive and wantz is true, then on return +// (final Z) = (initial Z)*U, +// where U is the orthogonal matrix in (*) regardless of the value of wantt. +// +// References: +// [1] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part I: +// Maintaining Well-Focused Shifts and Level 3 Performance. SIAM J. Matrix +// Anal. Appl. 23(4) (2002), pp. 929—947 +// URL: http://dx.doi.org/10.1137/S0895479801384573 +// [2] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part II: +// Aggressive Early Deflation. SIAM J. Matrix Anal. Appl. 23(4) (2002), pp. 948—973 +// URL: http://dx.doi.org/10.1137/S0895479801384585 +// +// Dlaqr04 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaqr04(wantt, wantz bool, n, ilo, ihi int, h []float64, ldh int, wr, wi []float64, iloz, ihiz int, z []float64, ldz int, work []float64, lwork int, recur int) (unconverged int) { + const ( + // Matrices of order ntiny or smaller must be processed by + // Dlahqr because of insufficient subdiagonal scratch space. + // This is a hard limit. + ntiny = 11 + // Exceptional deflation windows: try to cure rare slow + // convergence by varying the size of the deflation window after + // kexnw iterations. + kexnw = 5 + // Exceptional shifts: try to cure rare slow convergence with + // ad-hoc exceptional shifts every kexsh iterations. + kexsh = 6 + + // See https://github.com/gonum/lapack/pull/151#discussion_r68162802 + // and the surrounding discussion for an explanation where these + // constants come from. + // TODO(vladimir-ch): Similar constants for exceptional shifts + // are used also in dlahqr.go. The first constant is different + // there, it is equal to 3. Why? And does it matter? + wilk1 = 0.75 + wilk2 = -0.4375 + ) + + switch { + case n < 0: + panic(nLT0) + case ilo < 0 || max(0, n-1) < ilo: + panic(badIlo) + case ihi < min(ilo, n-1) || n <= ihi: + panic(badIhi) + case ldh < max(1, n): + panic(badLdH) + case wantz && (iloz < 0 || ilo < iloz): + panic(badIloz) + case wantz && (ihiz < ihi || n <= ihiz): + panic(badIhiz) + case ldz < 1, wantz && ldz < n: + panic(badLdZ) + case lwork < 1 && lwork != -1: + panic(badLWork) + // TODO(vladimir-ch): Enable if and when we figure out what the minimum + // necessary lwork value is. Dlaqr04 says that the minimum is n which + // clashes with Dlaqr23's opinion about optimal work when nw <= 2 + // (independent of n). + // case lwork < n && n > ntiny && lwork != -1: + // panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + case recur < 0: + panic(recurLT0) + } + + // Quick return. + if n == 0 { + work[0] = 1 + return 0 + } + + if lwork != -1 { + switch { + case len(h) < (n-1)*ldh+n: + panic(shortH) + case len(wr) != ihi+1: + panic(badLenWr) + case len(wi) != ihi+1: + panic(badLenWi) + case wantz && len(z) < (n-1)*ldz+n: + panic(shortZ) + case ilo > 0 && h[ilo*ldh+ilo-1] != 0: + panic(notIsolated) + case ihi+1 < n && h[(ihi+1)*ldh+ihi] != 0: + panic(notIsolated) + } + } + + if n <= ntiny { + // Tiny matrices must use Dlahqr. + if lwork == -1 { + work[0] = 1 + return 0 + } + return impl.Dlahqr(wantt, wantz, n, ilo, ihi, h, ldh, wr, wi, iloz, ihiz, z, ldz) + } + + // Use small bulge multi-shift QR with aggressive early deflation on + // larger-than-tiny matrices. + var jbcmpz string + if wantt { + jbcmpz = "S" + } else { + jbcmpz = "E" + } + if wantz { + jbcmpz += "V" + } else { + jbcmpz += "N" + } + + var fname string + if recur > 0 { + fname = "DLAQR0" + } else { + fname = "DLAQR4" + } + // nwr is the recommended deflation window size. n is greater than 11, + // so there is enough subdiagonal workspace for nwr >= 2 as required. + // (In fact, there is enough subdiagonal space for nwr >= 3.) + // TODO(vladimir-ch): If there is enough space for nwr >= 3, should we + // use it? + nwr := impl.Ilaenv(13, fname, jbcmpz, n, ilo, ihi, lwork) + nwr = max(2, nwr) + nwr = min(ihi-ilo+1, min((n-1)/3, nwr)) + + // nsr is the recommended number of simultaneous shifts. n is greater + // than 11, so there is enough subdiagonal workspace for nsr to be even + // and greater than or equal to two as required. + nsr := impl.Ilaenv(15, fname, jbcmpz, n, ilo, ihi, lwork) + nsr = min(nsr, min((n+6)/9, ihi-ilo)) + nsr = max(2, nsr&^1) + + // Workspace query call to Dlaqr23. + impl.Dlaqr23(wantt, wantz, n, ilo, ihi, nwr+1, h, ldh, iloz, ihiz, z, ldz, + wr, wi, h, ldh, n, h, ldh, n, h, ldh, work, -1, recur) + // Optimal workspace is max(Dlaqr5, Dlaqr23). + lwkopt := max(3*nsr/2, int(work[0])) + // Quick return in case of workspace query. + if lwork == -1 { + work[0] = float64(lwkopt) + return 0 + } + + // Dlahqr/Dlaqr04 crossover point. + nmin := impl.Ilaenv(12, fname, jbcmpz, n, ilo, ihi, lwork) + nmin = max(ntiny, nmin) + + // Nibble determines when to skip a multi-shift QR sweep (Dlaqr5). + nibble := impl.Ilaenv(14, fname, jbcmpz, n, ilo, ihi, lwork) + nibble = max(0, nibble) + + // Computation mode of far-from-diagonal orthogonal updates in Dlaqr5. + kacc22 := impl.Ilaenv(16, fname, jbcmpz, n, ilo, ihi, lwork) + kacc22 = max(0, min(kacc22, 2)) + + // nwmax is the largest possible deflation window for which there is + // sufficient workspace. + nwmax := min((n-1)/3, lwork/2) + nw := nwmax // Start with maximum deflation window size. + + // nsmax is the largest number of simultaneous shifts for which there is + // sufficient workspace. + nsmax := min((n+6)/9, 2*lwork/3) &^ 1 + + ndfl := 1 // Number of iterations since last deflation. + ndec := 0 // Deflation window size decrement. + + // Main loop. + var ( + itmax = max(30, 2*kexsh) * max(10, (ihi-ilo+1)) + it = 0 + ) + for kbot := ihi; kbot >= ilo; { + if it == itmax { + unconverged = kbot + 1 + break + } + it++ + + // Locate active block. + ktop := ilo + for k := kbot; k >= ilo+1; k-- { + if h[k*ldh+k-1] == 0 { + ktop = k + break + } + } + + // Select deflation window size nw. + // + // Typical Case: + // If possible and advisable, nibble the entire active block. + // If not, use size min(nwr,nwmax) or min(nwr+1,nwmax) + // depending upon which has the smaller corresponding + // subdiagonal entry (a heuristic). + // + // Exceptional Case: + // If there have been no deflations in kexnw or more + // iterations, then vary the deflation window size. At first, + // because larger windows are, in general, more powerful than + // smaller ones, rapidly increase the window to the maximum + // possible. Then, gradually reduce the window size. + nh := kbot - ktop + 1 + nwupbd := min(nh, nwmax) + if ndfl < kexnw { + nw = min(nwupbd, nwr) + } else { + nw = min(nwupbd, 2*nw) + } + if nw < nwmax { + if nw >= nh-1 { + nw = nh + } else { + kwtop := kbot - nw + 1 + if math.Abs(h[kwtop*ldh+kwtop-1]) > math.Abs(h[(kwtop-1)*ldh+kwtop-2]) { + nw++ + } + } + } + if ndfl < kexnw { + ndec = -1 + } else if ndec >= 0 || nw >= nwupbd { + ndec++ + if nw-ndec < 2 { + ndec = 0 + } + nw -= ndec + } + + // Split workspace under the subdiagonal of H into: + // - an nw×nw work array V in the lower left-hand corner, + // - an nw×nhv horizontal work array along the bottom edge (nhv + // must be at least nw but more is better), + // - an nve×nw vertical work array along the left-hand-edge + // (nhv can be any positive integer but more is better). + kv := n - nw + kt := nw + kwv := nw + 1 + nhv := n - kwv - kt + // Aggressive early deflation. + ls, ld := impl.Dlaqr23(wantt, wantz, n, ktop, kbot, nw, + h, ldh, iloz, ihiz, z, ldz, wr[:kbot+1], wi[:kbot+1], + h[kv*ldh:], ldh, nhv, h[kv*ldh+kt:], ldh, nhv, h[kwv*ldh:], ldh, work, lwork, recur) + + // Adjust kbot accounting for new deflations. + kbot -= ld + // ks points to the shifts. + ks := kbot - ls + 1 + + // Skip an expensive QR sweep if there is a (partly heuristic) + // reason to expect that many eigenvalues will deflate without + // it. Here, the QR sweep is skipped if many eigenvalues have + // just been deflated or if the remaining active block is small. + if ld > 0 && (100*ld > nw*nibble || kbot-ktop+1 <= min(nmin, nwmax)) { + // ld is positive, note progress. + ndfl = 1 + continue + } + + // ns is the nominal number of simultaneous shifts. This may be + // lowered (slightly) if Dlaqr23 did not provide that many + // shifts. + ns := min(min(nsmax, nsr), max(2, kbot-ktop)) &^ 1 + + // If there have been no deflations in a multiple of kexsh + // iterations, then try exceptional shifts. Otherwise use shifts + // provided by Dlaqr23 above or from the eigenvalues of a + // trailing principal submatrix. + if ndfl%kexsh == 0 { + ks = kbot - ns + 1 + for i := kbot; i > max(ks, ktop+1); i -= 2 { + ss := math.Abs(h[i*ldh+i-1]) + math.Abs(h[(i-1)*ldh+i-2]) + aa := wilk1*ss + h[i*ldh+i] + _, _, _, _, wr[i-1], wi[i-1], wr[i], wi[i], _, _ = + impl.Dlanv2(aa, ss, wilk2*ss, aa) + } + if ks == ktop { + wr[ks+1] = h[(ks+1)*ldh+ks+1] + wi[ks+1] = 0 + wr[ks] = wr[ks+1] + wi[ks] = wi[ks+1] + } + } else { + // If we got ns/2 or fewer shifts, use Dlahqr or recur + // into Dlaqr04 on a trailing principal submatrix to get + // more. Since ns <= nsmax <=(n+6)/9, there is enough + // space below the subdiagonal to fit an ns×ns scratch + // array. + if kbot-ks+1 <= ns/2 { + ks = kbot - ns + 1 + kt = n - ns + impl.Dlacpy(blas.All, ns, ns, h[ks*ldh+ks:], ldh, h[kt*ldh:], ldh) + if ns > nmin && recur > 0 { + ks += impl.Dlaqr04(false, false, ns, 1, ns-1, h[kt*ldh:], ldh, + wr[ks:ks+ns], wi[ks:ks+ns], 0, 0, nil, 0, work, lwork, recur-1) + } else { + ks += impl.Dlahqr(false, false, ns, 0, ns-1, h[kt*ldh:], ldh, + wr[ks:ks+ns], wi[ks:ks+ns], 0, 0, nil, 1) + } + // In case of a rare QR failure use eigenvalues + // of the trailing 2×2 principal submatrix. + if ks >= kbot { + aa := h[(kbot-1)*ldh+kbot-1] + bb := h[(kbot-1)*ldh+kbot] + cc := h[kbot*ldh+kbot-1] + dd := h[kbot*ldh+kbot] + _, _, _, _, wr[kbot-1], wi[kbot-1], wr[kbot], wi[kbot], _, _ = + impl.Dlanv2(aa, bb, cc, dd) + ks = kbot - 1 + } + } + + if kbot-ks+1 > ns { + // Sorting the shifts helps a little. Bubble + // sort keeps complex conjugate pairs together. + sorted := false + for k := kbot; k > ks; k-- { + if sorted { + break + } + sorted = true + for i := ks; i < k; i++ { + if math.Abs(wr[i])+math.Abs(wi[i]) >= math.Abs(wr[i+1])+math.Abs(wi[i+1]) { + continue + } + sorted = false + wr[i], wr[i+1] = wr[i+1], wr[i] + wi[i], wi[i+1] = wi[i+1], wi[i] + } + } + } + + // Shuffle shifts into pairs of real shifts and pairs of + // complex conjugate shifts using the fact that complex + // conjugate shifts are already adjacent to one another. + // TODO(vladimir-ch): The shuffling here could probably + // be removed but I'm not sure right now and it's safer + // to leave it. + for i := kbot; i > ks+1; i -= 2 { + if wi[i] == -wi[i-1] { + continue + } + wr[i], wr[i-1], wr[i-2] = wr[i-1], wr[i-2], wr[i] + wi[i], wi[i-1], wi[i-2] = wi[i-1], wi[i-2], wi[i] + } + } + + // If there are only two shifts and both are real, then use only one. + if kbot-ks+1 == 2 && wi[kbot] == 0 { + if math.Abs(wr[kbot]-h[kbot*ldh+kbot]) < math.Abs(wr[kbot-1]-h[kbot*ldh+kbot]) { + wr[kbot-1] = wr[kbot] + } else { + wr[kbot] = wr[kbot-1] + } + } + + // Use up to ns of the smallest magnitude shifts. If there + // aren't ns shifts available, then use them all, possibly + // dropping one to make the number of shifts even. + ns = min(ns, kbot-ks+1) &^ 1 + ks = kbot - ns + 1 + + // Split workspace under the subdiagonal into: + // - a kdu×kdu work array U in the lower left-hand-corner, + // - a kdu×nhv horizontal work array WH along the bottom edge + // (nhv must be at least kdu but more is better), + // - an nhv×kdu vertical work array WV along the left-hand-edge + // (nhv must be at least kdu but more is better). + kdu := 3*ns - 3 + ku := n - kdu + kwh := kdu + kwv = kdu + 3 + nhv = n - kwv - kdu + // Small-bulge multi-shift QR sweep. + impl.Dlaqr5(wantt, wantz, kacc22, n, ktop, kbot, ns, + wr[ks:ks+ns], wi[ks:ks+ns], h, ldh, iloz, ihiz, z, ldz, + work, 3, h[ku*ldh:], ldh, nhv, h[kwv*ldh:], ldh, nhv, h[ku*ldh+kwh:], ldh) + + // Note progress (or the lack of it). + if ld > 0 { + ndfl = 1 + } else { + ndfl++ + } + } + + work[0] = float64(lwkopt) + return unconverged +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr1.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr1.go new file mode 100644 index 0000000000..e21373bd10 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr1.go @@ -0,0 +1,59 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlaqr1 sets v to a scalar multiple of the first column of the product +// (H - (sr1 + i*si1)*I)*(H - (sr2 + i*si2)*I) +// where H is a 2×2 or 3×3 matrix, I is the identity matrix of the same size, +// and i is the imaginary unit. Scaling is done to avoid overflows and most +// underflows. +// +// n is the order of H and must be either 2 or 3. It must hold that either sr1 = +// sr2 and si1 = -si2, or si1 = si2 = 0. The length of v must be equal to n. If +// any of these conditions is not met, Dlaqr1 will panic. +// +// Dlaqr1 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaqr1(n int, h []float64, ldh int, sr1, si1, sr2, si2 float64, v []float64) { + switch { + case n != 2 && n != 3: + panic("lapack: n must be 2 or 3") + case ldh < n: + panic(badLdH) + case len(h) < (n-1)*ldh+n: + panic(shortH) + case !((sr1 == sr2 && si1 == -si2) || (si1 == 0 && si2 == 0)): + panic(badShifts) + case len(v) != n: + panic(shortV) + } + + if n == 2 { + s := math.Abs(h[0]-sr2) + math.Abs(si2) + math.Abs(h[ldh]) + if s == 0 { + v[0] = 0 + v[1] = 0 + } else { + h21s := h[ldh] / s + v[0] = h21s*h[1] + (h[0]-sr1)*((h[0]-sr2)/s) - si1*(si2/s) + v[1] = h21s * (h[0] + h[ldh+1] - sr1 - sr2) + } + return + } + + s := math.Abs(h[0]-sr2) + math.Abs(si2) + math.Abs(h[ldh]) + math.Abs(h[2*ldh]) + if s == 0 { + v[0] = 0 + v[1] = 0 + v[2] = 0 + } else { + h21s := h[ldh] / s + h31s := h[2*ldh] / s + v[0] = (h[0]-sr1)*((h[0]-sr2)/s) - si1*(si2/s) + h[1]*h21s + h[2]*h31s + v[1] = h21s*(h[0]+h[ldh+1]-sr1-sr2) + h[ldh+2]*h31s + v[2] = h31s*(h[0]+h[2*ldh+2]-sr1-sr2) + h21s*h[2*ldh+1] + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr23.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr23.go new file mode 100644 index 0000000000..ff299a73aa --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr23.go @@ -0,0 +1,415 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dlaqr23 performs the orthogonal similarity transformation of an n×n upper +// Hessenberg matrix to detect and deflate fully converged eigenvalues from a +// trailing principal submatrix using aggressive early deflation [1]. +// +// On return, H will be overwritten by a new Hessenberg matrix that is a +// perturbation of an orthogonal similarity transformation of H. It is hoped +// that on output H will have many zero subdiagonal entries. +// +// If wantt is true, the matrix H will be fully updated so that the +// quasi-triangular Schur factor can be computed. If wantt is false, then only +// enough of H will be updated to preserve the eigenvalues. +// +// If wantz is true, the orthogonal similarity transformation will be +// accumulated into Z[iloz:ihiz+1,ktop:kbot+1], otherwise Z is not referenced. +// +// ktop and kbot determine a block [ktop:kbot+1,ktop:kbot+1] along the diagonal +// of H. It must hold that +// 0 <= ilo <= ihi < n, if n > 0, +// ilo == 0 and ihi == -1, if n == 0, +// and the block must be isolated, that is, it must hold that +// ktop == 0 or H[ktop,ktop-1] == 0, +// kbot == n-1 or H[kbot+1,kbot] == 0, +// otherwise Dlaqr23 will panic. +// +// nw is the deflation window size. It must hold that +// 0 <= nw <= kbot-ktop+1, +// otherwise Dlaqr23 will panic. +// +// iloz and ihiz specify the rows of the n×n matrix Z to which transformations +// will be applied if wantz is true. It must hold that +// 0 <= iloz <= ktop, and kbot <= ihiz < n, +// otherwise Dlaqr23 will panic. +// +// sr and si must have length kbot+1, otherwise Dlaqr23 will panic. +// +// v and ldv represent an nw×nw work matrix. +// t and ldt represent an nw×nh work matrix, and nh must be at least nw. +// wv and ldwv represent an nv×nw work matrix. +// +// work must have length at least lwork and lwork must be at least max(1,2*nw), +// otherwise Dlaqr23 will panic. Larger values of lwork may result in greater +// efficiency. On return, work[0] will contain the optimal value of lwork. +// +// If lwork is -1, instead of performing Dlaqr23, the function only estimates the +// optimal workspace size and stores it into work[0]. Neither h nor z are +// accessed. +// +// recur is the non-negative recursion depth. For recur > 0, Dlaqr23 behaves +// as DLAQR3, for recur == 0 it behaves as DLAQR2. +// +// On return, ns and nd will contain respectively the number of unconverged +// (i.e., approximate) eigenvalues and converged eigenvalues that are stored in +// sr and si. +// +// On return, the real and imaginary parts of approximate eigenvalues that may +// be used for shifts will be stored respectively in sr[kbot-nd-ns+1:kbot-nd+1] +// and si[kbot-nd-ns+1:kbot-nd+1]. +// +// On return, the real and imaginary parts of converged eigenvalues will be +// stored respectively in sr[kbot-nd+1:kbot+1] and si[kbot-nd+1:kbot+1]. +// +// References: +// [1] K. Braman, R. Byers, R. Mathias. The Multishift QR Algorithm. Part II: +// Aggressive Early Deflation. SIAM J. Matrix Anal. Appl 23(4) (2002), pp. 948—973 +// URL: http://dx.doi.org/10.1137/S0895479801384585 +// +func (impl Implementation) Dlaqr23(wantt, wantz bool, n, ktop, kbot, nw int, h []float64, ldh int, iloz, ihiz int, z []float64, ldz int, sr, si []float64, v []float64, ldv int, nh int, t []float64, ldt int, nv int, wv []float64, ldwv int, work []float64, lwork int, recur int) (ns, nd int) { + switch { + case n < 0: + panic(nLT0) + case ktop < 0 || max(0, n-1) < ktop: + panic(badKtop) + case kbot < min(ktop, n-1) || n <= kbot: + panic(badKbot) + case nw < 0 || kbot-ktop+1+1 < nw: + panic(badNw) + case ldh < max(1, n): + panic(badLdH) + case wantz && (iloz < 0 || ktop < iloz): + panic(badIloz) + case wantz && (ihiz < kbot || n <= ihiz): + panic(badIhiz) + case ldz < 1, wantz && ldz < n: + panic(badLdZ) + case ldv < max(1, nw): + panic(badLdV) + case nh < nw: + panic(badNh) + case ldt < max(1, nh): + panic(badLdT) + case nv < 0: + panic(nvLT0) + case ldwv < max(1, nw): + panic(badLdWV) + case lwork < max(1, 2*nw) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + case recur < 0: + panic(recurLT0) + } + + // Quick return for zero window size. + if nw == 0 { + work[0] = 1 + return 0, 0 + } + + // LAPACK code does not enforce the documented behavior + // nw <= kbot-ktop+1 + // but we do (we panic above). + jw := nw + lwkopt := max(1, 2*nw) + if jw > 2 { + // Workspace query call to Dgehrd. + impl.Dgehrd(jw, 0, jw-2, t, ldt, work, work, -1) + lwk1 := int(work[0]) + // Workspace query call to Dormhr. + impl.Dormhr(blas.Right, blas.NoTrans, jw, jw, 0, jw-2, t, ldt, work, v, ldv, work, -1) + lwk2 := int(work[0]) + if recur > 0 { + // Workspace query call to Dlaqr04. + impl.Dlaqr04(true, true, jw, 0, jw-1, t, ldt, sr, si, 0, jw-1, v, ldv, work, -1, recur-1) + lwk3 := int(work[0]) + // Optimal workspace. + lwkopt = max(jw+max(lwk1, lwk2), lwk3) + } else { + // Optimal workspace. + lwkopt = jw + max(lwk1, lwk2) + } + } + // Quick return in case of workspace query. + if lwork == -1 { + work[0] = float64(lwkopt) + return 0, 0 + } + + // Check input slices only if not doing workspace query. + switch { + case len(h) < (n-1)*ldh+n: + panic(shortH) + case len(v) < (nw-1)*ldv+nw: + panic(shortV) + case len(t) < (nw-1)*ldt+nh: + panic(shortT) + case len(wv) < (nv-1)*ldwv+nw: + panic(shortWV) + case wantz && len(z) < (n-1)*ldz+n: + panic(shortZ) + case len(sr) != kbot+1: + panic(badLenSr) + case len(si) != kbot+1: + panic(badLenSi) + case ktop > 0 && h[ktop*ldh+ktop-1] != 0: + panic(notIsolated) + case kbot+1 < n && h[(kbot+1)*ldh+kbot] != 0: + panic(notIsolated) + } + + // Machine constants. + ulp := dlamchP + smlnum := float64(n) / ulp * dlamchS + + // Setup deflation window. + var s float64 + kwtop := kbot - jw + 1 + if kwtop != ktop { + s = h[kwtop*ldh+kwtop-1] + } + if kwtop == kbot { + // 1×1 deflation window. + sr[kwtop] = h[kwtop*ldh+kwtop] + si[kwtop] = 0 + ns = 1 + nd = 0 + if math.Abs(s) <= math.Max(smlnum, ulp*math.Abs(h[kwtop*ldh+kwtop])) { + ns = 0 + nd = 1 + if kwtop > ktop { + h[kwtop*ldh+kwtop-1] = 0 + } + } + work[0] = 1 + return ns, nd + } + + // Convert to spike-triangular form. In case of a rare QR failure, this + // routine continues to do aggressive early deflation using that part of + // the deflation window that converged using infqr here and there to + // keep track. + impl.Dlacpy(blas.Upper, jw, jw, h[kwtop*ldh+kwtop:], ldh, t, ldt) + bi := blas64.Implementation() + bi.Dcopy(jw-1, h[(kwtop+1)*ldh+kwtop:], ldh+1, t[ldt:], ldt+1) + impl.Dlaset(blas.All, jw, jw, 0, 1, v, ldv) + nmin := impl.Ilaenv(12, "DLAQR3", "SV", jw, 0, jw-1, lwork) + var infqr int + if recur > 0 && jw > nmin { + infqr = impl.Dlaqr04(true, true, jw, 0, jw-1, t, ldt, sr[kwtop:], si[kwtop:], 0, jw-1, v, ldv, work, lwork, recur-1) + } else { + infqr = impl.Dlahqr(true, true, jw, 0, jw-1, t, ldt, sr[kwtop:], si[kwtop:], 0, jw-1, v, ldv) + } + // Note that ilo == 0 which conveniently coincides with the success + // value of infqr, that is, infqr as an index always points to the first + // converged eigenvalue. + + // Dtrexc needs a clean margin near the diagonal. + for j := 0; j < jw-3; j++ { + t[(j+2)*ldt+j] = 0 + t[(j+3)*ldt+j] = 0 + } + if jw >= 3 { + t[(jw-1)*ldt+jw-3] = 0 + } + + ns = jw + ilst := infqr + // Deflation detection loop. + for ilst < ns { + bulge := false + if ns >= 2 { + bulge = t[(ns-1)*ldt+ns-2] != 0 + } + if !bulge { + // Real eigenvalue. + abst := math.Abs(t[(ns-1)*ldt+ns-1]) + if abst == 0 { + abst = math.Abs(s) + } + if math.Abs(s*v[ns-1]) <= math.Max(smlnum, ulp*abst) { + // Deflatable. + ns-- + } else { + // Undeflatable, move it up out of the way. + // Dtrexc can not fail in this case. + _, ilst, _ = impl.Dtrexc(lapack.UpdateSchur, jw, t, ldt, v, ldv, ns-1, ilst, work) + ilst++ + } + continue + } + // Complex conjugate pair. + abst := math.Abs(t[(ns-1)*ldt+ns-1]) + math.Sqrt(math.Abs(t[(ns-1)*ldt+ns-2]))*math.Sqrt(math.Abs(t[(ns-2)*ldt+ns-1])) + if abst == 0 { + abst = math.Abs(s) + } + if math.Max(math.Abs(s*v[ns-1]), math.Abs(s*v[ns-2])) <= math.Max(smlnum, ulp*abst) { + // Deflatable. + ns -= 2 + } else { + // Undeflatable, move them up out of the way. + // Dtrexc does the right thing with ilst in case of a + // rare exchange failure. + _, ilst, _ = impl.Dtrexc(lapack.UpdateSchur, jw, t, ldt, v, ldv, ns-1, ilst, work) + ilst += 2 + } + } + + // Return to Hessenberg form. + if ns == 0 { + s = 0 + } + if ns < jw { + // Sorting diagonal blocks of T improves accuracy for graded + // matrices. Bubble sort deals well with exchange failures. + sorted := false + i := ns + for !sorted { + sorted = true + kend := i - 1 + i = infqr + var k int + if i == ns-1 || t[(i+1)*ldt+i] == 0 { + k = i + 1 + } else { + k = i + 2 + } + for k <= kend { + var evi float64 + if k == i+1 { + evi = math.Abs(t[i*ldt+i]) + } else { + evi = math.Abs(t[i*ldt+i]) + math.Sqrt(math.Abs(t[(i+1)*ldt+i]))*math.Sqrt(math.Abs(t[i*ldt+i+1])) + } + + var evk float64 + if k == kend || t[(k+1)*ldt+k] == 0 { + evk = math.Abs(t[k*ldt+k]) + } else { + evk = math.Abs(t[k*ldt+k]) + math.Sqrt(math.Abs(t[(k+1)*ldt+k]))*math.Sqrt(math.Abs(t[k*ldt+k+1])) + } + + if evi >= evk { + i = k + } else { + sorted = false + _, ilst, ok := impl.Dtrexc(lapack.UpdateSchur, jw, t, ldt, v, ldv, i, k, work) + if ok { + i = ilst + } else { + i = k + } + } + if i == kend || t[(i+1)*ldt+i] == 0 { + k = i + 1 + } else { + k = i + 2 + } + } + } + } + + // Restore shift/eigenvalue array from T. + for i := jw - 1; i >= infqr; { + if i == infqr || t[i*ldt+i-1] == 0 { + sr[kwtop+i] = t[i*ldt+i] + si[kwtop+i] = 0 + i-- + continue + } + aa := t[(i-1)*ldt+i-1] + bb := t[(i-1)*ldt+i] + cc := t[i*ldt+i-1] + dd := t[i*ldt+i] + _, _, _, _, sr[kwtop+i-1], si[kwtop+i-1], sr[kwtop+i], si[kwtop+i], _, _ = impl.Dlanv2(aa, bb, cc, dd) + i -= 2 + } + + if ns < jw || s == 0 { + if ns > 1 && s != 0 { + // Reflect spike back into lower triangle. + bi.Dcopy(ns, v[:ns], 1, work[:ns], 1) + _, tau := impl.Dlarfg(ns, work[0], work[1:ns], 1) + work[0] = 1 + impl.Dlaset(blas.Lower, jw-2, jw-2, 0, 0, t[2*ldt:], ldt) + impl.Dlarf(blas.Left, ns, jw, work[:ns], 1, tau, t, ldt, work[jw:]) + impl.Dlarf(blas.Right, ns, ns, work[:ns], 1, tau, t, ldt, work[jw:]) + impl.Dlarf(blas.Right, jw, ns, work[:ns], 1, tau, v, ldv, work[jw:]) + impl.Dgehrd(jw, 0, ns-1, t, ldt, work[:jw-1], work[jw:], lwork-jw) + } + + // Copy updated reduced window into place. + if kwtop > 0 { + h[kwtop*ldh+kwtop-1] = s * v[0] + } + impl.Dlacpy(blas.Upper, jw, jw, t, ldt, h[kwtop*ldh+kwtop:], ldh) + bi.Dcopy(jw-1, t[ldt:], ldt+1, h[(kwtop+1)*ldh+kwtop:], ldh+1) + + // Accumulate orthogonal matrix in order to update H and Z, if + // requested. + if ns > 1 && s != 0 { + // work[:ns-1] contains the elementary reflectors stored + // by a call to Dgehrd above. + impl.Dormhr(blas.Right, blas.NoTrans, jw, ns, 0, ns-1, + t, ldt, work[:ns-1], v, ldv, work[jw:], lwork-jw) + } + + // Update vertical slab in H. + var ltop int + if !wantt { + ltop = ktop + } + for krow := ltop; krow < kwtop; krow += nv { + kln := min(nv, kwtop-krow) + bi.Dgemm(blas.NoTrans, blas.NoTrans, kln, jw, jw, + 1, h[krow*ldh+kwtop:], ldh, v, ldv, + 0, wv, ldwv) + impl.Dlacpy(blas.All, kln, jw, wv, ldwv, h[krow*ldh+kwtop:], ldh) + } + + // Update horizontal slab in H. + if wantt { + for kcol := kbot + 1; kcol < n; kcol += nh { + kln := min(nh, n-kcol) + bi.Dgemm(blas.Trans, blas.NoTrans, jw, kln, jw, + 1, v, ldv, h[kwtop*ldh+kcol:], ldh, + 0, t, ldt) + impl.Dlacpy(blas.All, jw, kln, t, ldt, h[kwtop*ldh+kcol:], ldh) + } + } + + // Update vertical slab in Z. + if wantz { + for krow := iloz; krow <= ihiz; krow += nv { + kln := min(nv, ihiz-krow+1) + bi.Dgemm(blas.NoTrans, blas.NoTrans, kln, jw, jw, + 1, z[krow*ldz+kwtop:], ldz, v, ldv, + 0, wv, ldwv) + impl.Dlacpy(blas.All, kln, jw, wv, ldwv, z[krow*ldz+kwtop:], ldz) + } + } + } + + // The number of deflations. + nd = jw - ns + // Shifts are converged eigenvalues that could not be deflated. + // Subtracting infqr from the spike length takes care of the case of a + // rare QR failure while calculating eigenvalues of the deflation + // window. + ns -= infqr + work[0] = float64(lwkopt) + return ns, nd +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr5.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr5.go new file mode 100644 index 0000000000..c198f229a2 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaqr5.go @@ -0,0 +1,644 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlaqr5 performs a single small-bulge multi-shift QR sweep on an isolated +// block of a Hessenberg matrix. +// +// wantt and wantz determine whether the quasi-triangular Schur factor and the +// orthogonal Schur factor, respectively, will be computed. +// +// kacc22 specifies the computation mode of far-from-diagonal orthogonal +// updates. Permitted values are: +// 0: Dlaqr5 will not accumulate reflections and will not use matrix-matrix +// multiply to update far-from-diagonal matrix entries. +// 1: Dlaqr5 will accumulate reflections and use matrix-matrix multiply to +// update far-from-diagonal matrix entries. +// 2: Dlaqr5 will accumulate reflections, use matrix-matrix multiply to update +// far-from-diagonal matrix entries, and take advantage of 2×2 block +// structure during matrix multiplies. +// For other values of kacc2 Dlaqr5 will panic. +// +// n is the order of the Hessenberg matrix H. +// +// ktop and kbot are indices of the first and last row and column of an isolated +// diagonal block upon which the QR sweep will be applied. It must hold that +// ktop == 0, or 0 < ktop <= n-1 and H[ktop, ktop-1] == 0, and +// kbot == n-1, or 0 <= kbot < n-1 and H[kbot+1, kbot] == 0, +// otherwise Dlaqr5 will panic. +// +// nshfts is the number of simultaneous shifts. It must be positive and even, +// otherwise Dlaqr5 will panic. +// +// sr and si contain the real and imaginary parts, respectively, of the shifts +// of origin that define the multi-shift QR sweep. On return both slices may be +// reordered by Dlaqr5. Their length must be equal to nshfts, otherwise Dlaqr5 +// will panic. +// +// h and ldh represent the Hessenberg matrix H of size n×n. On return +// multi-shift QR sweep with shifts sr+i*si has been applied to the isolated +// diagonal block in rows and columns ktop through kbot, inclusive. +// +// iloz and ihiz specify the rows of Z to which transformations will be applied +// if wantz is true. It must hold that 0 <= iloz <= ihiz < n, otherwise Dlaqr5 +// will panic. +// +// z and ldz represent the matrix Z of size n×n. If wantz is true, the QR sweep +// orthogonal similarity transformation is accumulated into +// z[iloz:ihiz,iloz:ihiz] from the right, otherwise z not referenced. +// +// v and ldv represent an auxiliary matrix V of size (nshfts/2)×3. Note that V +// is transposed with respect to the reference netlib implementation. +// +// u and ldu represent an auxiliary matrix of size (3*nshfts-3)×(3*nshfts-3). +// +// wh and ldwh represent an auxiliary matrix of size (3*nshfts-3)×nh. +// +// wv and ldwv represent an auxiliary matrix of size nv×(3*nshfts-3). +// +// Dlaqr5 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaqr5(wantt, wantz bool, kacc22 int, n, ktop, kbot, nshfts int, sr, si []float64, h []float64, ldh int, iloz, ihiz int, z []float64, ldz int, v []float64, ldv int, u []float64, ldu int, nv int, wv []float64, ldwv int, nh int, wh []float64, ldwh int) { + switch { + case kacc22 != 0 && kacc22 != 1 && kacc22 != 2: + panic(badKacc22) + case n < 0: + panic(nLT0) + case ktop < 0 || n <= ktop: + panic(badKtop) + case kbot < 0 || n <= kbot: + panic(badKbot) + + case nshfts < 0: + panic(nshftsLT0) + case nshfts&0x1 != 0: + panic(nshftsOdd) + case len(sr) != nshfts: + panic(badLenSr) + case len(si) != nshfts: + panic(badLenSi) + + case ldh < max(1, n): + panic(badLdH) + case len(h) < (n-1)*ldh+n: + panic(shortH) + + case wantz && ihiz >= n: + panic(badIhiz) + case wantz && iloz < 0 || ihiz < iloz: + panic(badIloz) + case ldz < 1, wantz && ldz < n: + panic(badLdZ) + case wantz && len(z) < (n-1)*ldz+n: + panic(shortZ) + + case ldv < 3: + // V is transposed w.r.t. reference lapack. + panic(badLdV) + case len(v) < (nshfts/2-1)*ldv+3: + panic(shortV) + + case ldu < max(1, 3*nshfts-3): + panic(badLdU) + case len(u) < (3*nshfts-3-1)*ldu+3*nshfts-3: + panic(shortU) + + case nv < 0: + panic(nvLT0) + case ldwv < max(1, 3*nshfts-3): + panic(badLdWV) + case len(wv) < (nv-1)*ldwv+3*nshfts-3: + panic(shortWV) + + case nh < 0: + panic(nhLT0) + case ldwh < max(1, nh): + panic(badLdWH) + case len(wh) < (3*nshfts-3-1)*ldwh+nh: + panic(shortWH) + + case ktop > 0 && h[ktop*ldh+ktop-1] != 0: + panic(notIsolated) + case kbot < n-1 && h[(kbot+1)*ldh+kbot] != 0: + panic(notIsolated) + } + + // If there are no shifts, then there is nothing to do. + if nshfts < 2 { + return + } + // If the active block is empty or 1×1, then there is nothing to do. + if ktop >= kbot { + return + } + + // Shuffle shifts into pairs of real shifts and pairs of complex + // conjugate shifts assuming complex conjugate shifts are already + // adjacent to one another. + for i := 0; i < nshfts-2; i += 2 { + if si[i] == -si[i+1] { + continue + } + sr[i], sr[i+1], sr[i+2] = sr[i+1], sr[i+2], sr[i] + si[i], si[i+1], si[i+2] = si[i+1], si[i+2], si[i] + } + + // Note: lapack says that nshfts must be even but allows it to be odd + // anyway. We panic above if nshfts is not even, so reducing it by one + // is unnecessary. The only caller Dlaqr04 uses only even nshfts. + // + // The original comment and code from lapack-3.6.0/SRC/dlaqr5.f:341: + // * ==== NSHFTS is supposed to be even, but if it is odd, + // * . then simply reduce it by one. The shuffle above + // * . ensures that the dropped shift is real and that + // * . the remaining shifts are paired. ==== + // * + // NS = NSHFTS - MOD( NSHFTS, 2 ) + ns := nshfts + + safmin := dlamchS + ulp := dlamchP + smlnum := safmin * float64(n) / ulp + + // Use accumulated reflections to update far-from-diagonal entries? + accum := kacc22 == 1 || kacc22 == 2 + // If so, exploit the 2×2 block structure? + blk22 := ns > 2 && kacc22 == 2 + + // Clear trash. + if ktop+2 <= kbot { + h[(ktop+2)*ldh+ktop] = 0 + } + + // nbmps = number of 2-shift bulges in the chain. + nbmps := ns / 2 + + // kdu = width of slab. + kdu := 6*nbmps - 3 + + // Create and chase chains of nbmps bulges. + for incol := 3*(1-nbmps) + ktop - 1; incol <= kbot-2; incol += 3*nbmps - 2 { + ndcol := incol + kdu + if accum { + impl.Dlaset(blas.All, kdu, kdu, 0, 1, u, ldu) + } + + // Near-the-diagonal bulge chase. The following loop performs + // the near-the-diagonal part of a small bulge multi-shift QR + // sweep. Each 6*nbmps-2 column diagonal chunk extends from + // column incol to column ndcol (including both column incol and + // column ndcol). The following loop chases a 3*nbmps column + // long chain of nbmps bulges 3*nbmps-2 columns to the right. + // (incol may be less than ktop and ndcol may be greater than + // kbot indicating phantom columns from which to chase bulges + // before they are actually introduced or to which to chase + // bulges beyond column kbot.) + for krcol := incol; krcol <= min(incol+3*nbmps-3, kbot-2); krcol++ { + // Bulges number mtop to mbot are active double implicit + // shift bulges. There may or may not also be small 2×2 + // bulge, if there is room. The inactive bulges (if any) + // must wait until the active bulges have moved down the + // diagonal to make room. The phantom matrix paradigm + // described above helps keep track. + + mtop := max(0, ((ktop-1)-krcol+2)/3) + mbot := min(nbmps, (kbot-krcol)/3) - 1 + m22 := mbot + 1 + bmp22 := (mbot < nbmps-1) && (krcol+3*m22 == kbot-2) + + // Generate reflections to chase the chain right one + // column. (The minimum value of k is ktop-1.) + for m := mtop; m <= mbot; m++ { + k := krcol + 3*m + if k == ktop-1 { + impl.Dlaqr1(3, h[ktop*ldh+ktop:], ldh, + sr[2*m], si[2*m], sr[2*m+1], si[2*m+1], + v[m*ldv:m*ldv+3]) + alpha := v[m*ldv] + _, v[m*ldv] = impl.Dlarfg(3, alpha, v[m*ldv+1:m*ldv+3], 1) + continue + } + beta := h[(k+1)*ldh+k] + v[m*ldv+1] = h[(k+2)*ldh+k] + v[m*ldv+2] = h[(k+3)*ldh+k] + beta, v[m*ldv] = impl.Dlarfg(3, beta, v[m*ldv+1:m*ldv+3], 1) + + // A bulge may collapse because of vigilant deflation or + // destructive underflow. In the underflow case, try the + // two-small-subdiagonals trick to try to reinflate the + // bulge. + if h[(k+3)*ldh+k] != 0 || h[(k+3)*ldh+k+1] != 0 || h[(k+3)*ldh+k+2] == 0 { + // Typical case: not collapsed (yet). + h[(k+1)*ldh+k] = beta + h[(k+2)*ldh+k] = 0 + h[(k+3)*ldh+k] = 0 + continue + } + + // Atypical case: collapsed. Attempt to reintroduce + // ignoring H[k+1,k] and H[k+2,k]. If the fill + // resulting from the new reflector is too large, + // then abandon it. Otherwise, use the new one. + var vt [3]float64 + impl.Dlaqr1(3, h[(k+1)*ldh+k+1:], ldh, sr[2*m], + si[2*m], sr[2*m+1], si[2*m+1], vt[:]) + alpha := vt[0] + _, vt[0] = impl.Dlarfg(3, alpha, vt[1:3], 1) + refsum := vt[0] * (h[(k+1)*ldh+k] + vt[1]*h[(k+2)*ldh+k]) + + dsum := math.Abs(h[k*ldh+k]) + math.Abs(h[(k+1)*ldh+k+1]) + math.Abs(h[(k+2)*ldh+k+2]) + if math.Abs(h[(k+2)*ldh+k]-refsum*vt[1])+math.Abs(refsum*vt[2]) > ulp*dsum { + // Starting a new bulge here would create + // non-negligible fill. Use the old one with + // trepidation. + h[(k+1)*ldh+k] = beta + h[(k+2)*ldh+k] = 0 + h[(k+3)*ldh+k] = 0 + continue + } else { + // Starting a new bulge here would create + // only negligible fill. Replace the old + // reflector with the new one. + h[(k+1)*ldh+k] -= refsum + h[(k+2)*ldh+k] = 0 + h[(k+3)*ldh+k] = 0 + v[m*ldv] = vt[0] + v[m*ldv+1] = vt[1] + v[m*ldv+2] = vt[2] + } + } + + // Generate a 2×2 reflection, if needed. + if bmp22 { + k := krcol + 3*m22 + if k == ktop-1 { + impl.Dlaqr1(2, h[(k+1)*ldh+k+1:], ldh, + sr[2*m22], si[2*m22], sr[2*m22+1], si[2*m22+1], + v[m22*ldv:m22*ldv+2]) + beta := v[m22*ldv] + _, v[m22*ldv] = impl.Dlarfg(2, beta, v[m22*ldv+1:m22*ldv+2], 1) + } else { + beta := h[(k+1)*ldh+k] + v[m22*ldv+1] = h[(k+2)*ldh+k] + beta, v[m22*ldv] = impl.Dlarfg(2, beta, v[m22*ldv+1:m22*ldv+2], 1) + h[(k+1)*ldh+k] = beta + h[(k+2)*ldh+k] = 0 + } + } + + // Multiply H by reflections from the left. + var jbot int + switch { + case accum: + jbot = min(ndcol, kbot) + case wantt: + jbot = n - 1 + default: + jbot = kbot + } + for j := max(ktop, krcol); j <= jbot; j++ { + mend := min(mbot+1, (j-krcol+2)/3) - 1 + for m := mtop; m <= mend; m++ { + k := krcol + 3*m + refsum := v[m*ldv] * (h[(k+1)*ldh+j] + + v[m*ldv+1]*h[(k+2)*ldh+j] + v[m*ldv+2]*h[(k+3)*ldh+j]) + h[(k+1)*ldh+j] -= refsum + h[(k+2)*ldh+j] -= refsum * v[m*ldv+1] + h[(k+3)*ldh+j] -= refsum * v[m*ldv+2] + } + } + if bmp22 { + k := krcol + 3*m22 + for j := max(k+1, ktop); j <= jbot; j++ { + refsum := v[m22*ldv] * (h[(k+1)*ldh+j] + v[m22*ldv+1]*h[(k+2)*ldh+j]) + h[(k+1)*ldh+j] -= refsum + h[(k+2)*ldh+j] -= refsum * v[m22*ldv+1] + } + } + + // Multiply H by reflections from the right. Delay filling in the last row + // until the vigilant deflation check is complete. + var jtop int + switch { + case accum: + jtop = max(ktop, incol) + case wantt: + jtop = 0 + default: + jtop = ktop + } + for m := mtop; m <= mbot; m++ { + if v[m*ldv] == 0 { + continue + } + k := krcol + 3*m + for j := jtop; j <= min(kbot, k+3); j++ { + refsum := v[m*ldv] * (h[j*ldh+k+1] + + v[m*ldv+1]*h[j*ldh+k+2] + v[m*ldv+2]*h[j*ldh+k+3]) + h[j*ldh+k+1] -= refsum + h[j*ldh+k+2] -= refsum * v[m*ldv+1] + h[j*ldh+k+3] -= refsum * v[m*ldv+2] + } + if accum { + // Accumulate U. (If necessary, update Z later with an + // efficient matrix-matrix multiply.) + kms := k - incol + for j := max(0, ktop-incol-1); j < kdu; j++ { + refsum := v[m*ldv] * (u[j*ldu+kms] + + v[m*ldv+1]*u[j*ldu+kms+1] + v[m*ldv+2]*u[j*ldu+kms+2]) + u[j*ldu+kms] -= refsum + u[j*ldu+kms+1] -= refsum * v[m*ldv+1] + u[j*ldu+kms+2] -= refsum * v[m*ldv+2] + } + } else if wantz { + // U is not accumulated, so update Z now by multiplying by + // reflections from the right. + for j := iloz; j <= ihiz; j++ { + refsum := v[m*ldv] * (z[j*ldz+k+1] + + v[m*ldv+1]*z[j*ldz+k+2] + v[m*ldv+2]*z[j*ldz+k+3]) + z[j*ldz+k+1] -= refsum + z[j*ldz+k+2] -= refsum * v[m*ldv+1] + z[j*ldz+k+3] -= refsum * v[m*ldv+2] + } + } + } + + // Special case: 2×2 reflection (if needed). + if bmp22 && v[m22*ldv] != 0 { + k := krcol + 3*m22 + for j := jtop; j <= min(kbot, k+3); j++ { + refsum := v[m22*ldv] * (h[j*ldh+k+1] + v[m22*ldv+1]*h[j*ldh+k+2]) + h[j*ldh+k+1] -= refsum + h[j*ldh+k+2] -= refsum * v[m22*ldv+1] + } + if accum { + kms := k - incol + for j := max(0, ktop-incol-1); j < kdu; j++ { + refsum := v[m22*ldv] * (u[j*ldu+kms] + v[m22*ldv+1]*u[j*ldu+kms+1]) + u[j*ldu+kms] -= refsum + u[j*ldu+kms+1] -= refsum * v[m22*ldv+1] + } + } else if wantz { + for j := iloz; j <= ihiz; j++ { + refsum := v[m22*ldv] * (z[j*ldz+k+1] + v[m22*ldv+1]*z[j*ldz+k+2]) + z[j*ldz+k+1] -= refsum + z[j*ldz+k+2] -= refsum * v[m22*ldv+1] + } + } + } + + // Vigilant deflation check. + mstart := mtop + if krcol+3*mstart < ktop { + mstart++ + } + mend := mbot + if bmp22 { + mend++ + } + if krcol == kbot-2 { + mend++ + } + for m := mstart; m <= mend; m++ { + k := min(kbot-1, krcol+3*m) + + // The following convergence test requires that the tradition + // small-compared-to-nearby-diagonals criterion and the Ahues & + // Tisseur (LAWN 122, 1997) criteria both be satisfied. The latter + // improves accuracy in some examples. Falling back on an alternate + // convergence criterion when tst1 or tst2 is zero (as done here) is + // traditional but probably unnecessary. + + if h[(k+1)*ldh+k] == 0 { + continue + } + tst1 := math.Abs(h[k*ldh+k]) + math.Abs(h[(k+1)*ldh+k+1]) + if tst1 == 0 { + if k >= ktop+1 { + tst1 += math.Abs(h[k*ldh+k-1]) + } + if k >= ktop+2 { + tst1 += math.Abs(h[k*ldh+k-2]) + } + if k >= ktop+3 { + tst1 += math.Abs(h[k*ldh+k-3]) + } + if k <= kbot-2 { + tst1 += math.Abs(h[(k+2)*ldh+k+1]) + } + if k <= kbot-3 { + tst1 += math.Abs(h[(k+3)*ldh+k+1]) + } + if k <= kbot-4 { + tst1 += math.Abs(h[(k+4)*ldh+k+1]) + } + } + if math.Abs(h[(k+1)*ldh+k]) <= math.Max(smlnum, ulp*tst1) { + h12 := math.Max(math.Abs(h[(k+1)*ldh+k]), math.Abs(h[k*ldh+k+1])) + h21 := math.Min(math.Abs(h[(k+1)*ldh+k]), math.Abs(h[k*ldh+k+1])) + h11 := math.Max(math.Abs(h[(k+1)*ldh+k+1]), math.Abs(h[k*ldh+k]-h[(k+1)*ldh+k+1])) + h22 := math.Min(math.Abs(h[(k+1)*ldh+k+1]), math.Abs(h[k*ldh+k]-h[(k+1)*ldh+k+1])) + scl := h11 + h12 + tst2 := h22 * (h11 / scl) + if tst2 == 0 || h21*(h12/scl) <= math.Max(smlnum, ulp*tst2) { + h[(k+1)*ldh+k] = 0 + } + } + } + + // Fill in the last row of each bulge. + mend = min(nbmps, (kbot-krcol-1)/3) - 1 + for m := mtop; m <= mend; m++ { + k := krcol + 3*m + refsum := v[m*ldv] * v[m*ldv+2] * h[(k+4)*ldh+k+3] + h[(k+4)*ldh+k+1] = -refsum + h[(k+4)*ldh+k+2] = -refsum * v[m*ldv+1] + h[(k+4)*ldh+k+3] -= refsum * v[m*ldv+2] + } + } + + // Use U (if accumulated) to update far-from-diagonal entries in H. + // If required, use U to update Z as well. + if !accum { + continue + } + var jtop, jbot int + if wantt { + jtop = 0 + jbot = n - 1 + } else { + jtop = ktop + jbot = kbot + } + bi := blas64.Implementation() + if !blk22 || incol < ktop || kbot < ndcol || ns <= 2 { + // Updates not exploiting the 2×2 block structure of U. k0 and nu keep track + // of the location and size of U in the special cases of introducing bulges + // and chasing bulges off the bottom. In these special cases and in case the + // number of shifts is ns = 2, there is no 2×2 block structure to exploit. + + k0 := max(0, ktop-incol-1) + nu := kdu - max(0, ndcol-kbot) - k0 + + // Horizontal multiply. + for jcol := min(ndcol, kbot) + 1; jcol <= jbot; jcol += nh { + jlen := min(nh, jbot-jcol+1) + bi.Dgemm(blas.Trans, blas.NoTrans, nu, jlen, nu, + 1, u[k0*ldu+k0:], ldu, + h[(incol+k0+1)*ldh+jcol:], ldh, + 0, wh, ldwh) + impl.Dlacpy(blas.All, nu, jlen, wh, ldwh, h[(incol+k0+1)*ldh+jcol:], ldh) + } + + // Vertical multiply. + for jrow := jtop; jrow <= max(ktop, incol)-1; jrow += nv { + jlen := min(nv, max(ktop, incol)-jrow) + bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, nu, nu, + 1, h[jrow*ldh+incol+k0+1:], ldh, + u[k0*ldu+k0:], ldu, + 0, wv, ldwv) + impl.Dlacpy(blas.All, jlen, nu, wv, ldwv, h[jrow*ldh+incol+k0+1:], ldh) + } + + // Z multiply (also vertical). + if wantz { + for jrow := iloz; jrow <= ihiz; jrow += nv { + jlen := min(nv, ihiz-jrow+1) + bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, nu, nu, + 1, z[jrow*ldz+incol+k0+1:], ldz, + u[k0*ldu+k0:], ldu, + 0, wv, ldwv) + impl.Dlacpy(blas.All, jlen, nu, wv, ldwv, z[jrow*ldz+incol+k0+1:], ldz) + } + } + + continue + } + + // Updates exploiting U's 2×2 block structure. + + // i2, i4, j2, j4 are the last rows and columns of the blocks. + i2 := (kdu + 1) / 2 + i4 := kdu + j2 := i4 - i2 + j4 := kdu + + // kzs and knz deal with the band of zeros along the diagonal of one of the + // triangular blocks. + kzs := (j4 - j2) - (ns + 1) + knz := ns + 1 + + // Horizontal multiply. + for jcol := min(ndcol, kbot) + 1; jcol <= jbot; jcol += nh { + jlen := min(nh, jbot-jcol+1) + + // Copy bottom of H to top+kzs of scratch (the first kzs + // rows get multiplied by zero). + impl.Dlacpy(blas.All, knz, jlen, h[(incol+1+j2)*ldh+jcol:], ldh, wh[kzs*ldwh:], ldwh) + + // Multiply by U21^T. + impl.Dlaset(blas.All, kzs, jlen, 0, 0, wh, ldwh) + bi.Dtrmm(blas.Left, blas.Upper, blas.Trans, blas.NonUnit, knz, jlen, + 1, u[j2*ldu+kzs:], ldu, wh[kzs*ldwh:], ldwh) + + // Multiply top of H by U11^T. + bi.Dgemm(blas.Trans, blas.NoTrans, i2, jlen, j2, + 1, u, ldu, h[(incol+1)*ldh+jcol:], ldh, + 1, wh, ldwh) + + // Copy top of H to bottom of WH. + impl.Dlacpy(blas.All, j2, jlen, h[(incol+1)*ldh+jcol:], ldh, wh[i2*ldwh:], ldwh) + + // Multiply by U21^T. + bi.Dtrmm(blas.Left, blas.Lower, blas.Trans, blas.NonUnit, j2, jlen, + 1, u[i2:], ldu, wh[i2*ldwh:], ldwh) + + // Multiply by U22. + bi.Dgemm(blas.Trans, blas.NoTrans, i4-i2, jlen, j4-j2, + 1, u[j2*ldu+i2:], ldu, h[(incol+1+j2)*ldh+jcol:], ldh, + 1, wh[i2*ldwh:], ldwh) + + // Copy it back. + impl.Dlacpy(blas.All, kdu, jlen, wh, ldwh, h[(incol+1)*ldh+jcol:], ldh) + } + + // Vertical multiply. + for jrow := jtop; jrow <= max(incol, ktop)-1; jrow += nv { + jlen := min(nv, max(incol, ktop)-jrow) + + // Copy right of H to scratch (the first kzs columns get multiplied + // by zero). + impl.Dlacpy(blas.All, jlen, knz, h[jrow*ldh+incol+1+j2:], ldh, wv[kzs:], ldwv) + + // Multiply by U21. + impl.Dlaset(blas.All, jlen, kzs, 0, 0, wv, ldwv) + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.NonUnit, jlen, knz, + 1, u[j2*ldu+kzs:], ldu, wv[kzs:], ldwv) + + // Multiply by U11. + bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, i2, j2, + 1, h[jrow*ldh+incol+1:], ldh, u, ldu, + 1, wv, ldwv) + + // Copy left of H to right of scratch. + impl.Dlacpy(blas.All, jlen, j2, h[jrow*ldh+incol+1:], ldh, wv[i2:], ldwv) + + // Multiply by U21. + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.NonUnit, jlen, i4-i2, + 1, u[i2:], ldu, wv[i2:], ldwv) + + // Multiply by U22. + bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, i4-i2, j4-j2, + 1, h[jrow*ldh+incol+1+j2:], ldh, u[j2*ldu+i2:], ldu, + 1, wv[i2:], ldwv) + + // Copy it back. + impl.Dlacpy(blas.All, jlen, kdu, wv, ldwv, h[jrow*ldh+incol+1:], ldh) + } + + if !wantz { + continue + } + // Multiply Z (also vertical). + for jrow := iloz; jrow <= ihiz; jrow += nv { + jlen := min(nv, ihiz-jrow+1) + + // Copy right of Z to left of scratch (first kzs columns get + // multiplied by zero). + impl.Dlacpy(blas.All, jlen, knz, z[jrow*ldz+incol+1+j2:], ldz, wv[kzs:], ldwv) + + // Multiply by U12. + impl.Dlaset(blas.All, jlen, kzs, 0, 0, wv, ldwv) + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.NonUnit, jlen, knz, + 1, u[j2*ldu+kzs:], ldu, wv[kzs:], ldwv) + + // Multiply by U11. + bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, i2, j2, + 1, z[jrow*ldz+incol+1:], ldz, u, ldu, + 1, wv, ldwv) + + // Copy left of Z to right of scratch. + impl.Dlacpy(blas.All, jlen, j2, z[jrow*ldz+incol+1:], ldz, wv[i2:], ldwv) + + // Multiply by U21. + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.NonUnit, jlen, i4-i2, + 1, u[i2:], ldu, wv[i2:], ldwv) + + // Multiply by U22. + bi.Dgemm(blas.NoTrans, blas.NoTrans, jlen, i4-i2, j4-j2, + 1, z[jrow*ldz+incol+1+j2:], ldz, u[j2*ldu+i2:], ldu, + 1, wv[i2:], ldwv) + + // Copy the result back to Z. + impl.Dlacpy(blas.All, jlen, kdu, wv, ldwv, z[jrow*ldz+incol+1:], ldz) + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlarf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarf.go new file mode 100644 index 0000000000..9fc97a3285 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarf.go @@ -0,0 +1,101 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlarf applies an elementary reflector to a general rectangular matrix c. +// This computes +// c = h * c if side == Left +// c = c * h if side == right +// where +// h = 1 - tau * v * v^T +// and c is an m * n matrix. +// +// work is temporary storage of length at least m if side == Left and at least +// n if side == Right. This function will panic if this length requirement is not met. +// +// Dlarf is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlarf(side blas.Side, m, n int, v []float64, incv int, tau float64, c []float64, ldc int, work []float64) { + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case incv == 0: + panic(zeroIncV) + case ldc < max(1, n): + panic(badLdC) + } + + if m == 0 || n == 0 { + return + } + + applyleft := side == blas.Left + lenV := n + if applyleft { + lenV = m + } + + switch { + case len(v) < 1+(lenV-1)*abs(incv): + panic(shortV) + case len(c) < (m-1)*ldc+n: + panic(shortC) + case (applyleft && len(work) < n) || (!applyleft && len(work) < m): + panic(shortWork) + } + + lastv := 0 // last non-zero element of v + lastc := 0 // last non-zero row/column of c + if tau != 0 { + var i int + if applyleft { + lastv = m - 1 + } else { + lastv = n - 1 + } + if incv > 0 { + i = lastv * incv + } + + // Look for the last non-zero row in v. + for lastv >= 0 && v[i] == 0 { + lastv-- + i -= incv + } + if applyleft { + // Scan for the last non-zero column in C[0:lastv, :] + lastc = impl.Iladlc(lastv+1, n, c, ldc) + } else { + // Scan for the last non-zero row in C[:, 0:lastv] + lastc = impl.Iladlr(m, lastv+1, c, ldc) + } + } + if lastv == -1 || lastc == -1 { + return + } + // Sometimes 1-indexing is nicer ... + bi := blas64.Implementation() + if applyleft { + // Form H * C + // w[0:lastc+1] = c[1:lastv+1, 1:lastc+1]^T * v[1:lastv+1,1] + bi.Dgemv(blas.Trans, lastv+1, lastc+1, 1, c, ldc, v, incv, 0, work, 1) + // c[0: lastv, 0: lastc] = c[...] - w[0:lastv, 1] * v[1:lastc, 1]^T + bi.Dger(lastv+1, lastc+1, -tau, v, incv, work, 1, c, ldc) + return + } + // Form C*H + // w[0:lastc+1,1] := c[0:lastc+1,0:lastv+1] * v[0:lastv+1,1] + bi.Dgemv(blas.NoTrans, lastc+1, lastv+1, 1, c, ldc, v, incv, 0, work, 1) + // c[0:lastc+1,0:lastv+1] = c[...] - w[0:lastc+1,0] * v[0:lastv+1,0]^T + bi.Dger(lastc+1, lastv+1, -tau, work, 1, v, incv, c, ldc) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfb.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfb.go new file mode 100644 index 0000000000..4dd8e063ac --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfb.go @@ -0,0 +1,449 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dlarfb applies a block reflector to a matrix. +// +// In the call to Dlarfb, the mxn c is multiplied by the implicitly defined matrix h as follows: +// c = h * c if side == Left and trans == NoTrans +// c = c * h if side == Right and trans == NoTrans +// c = h^T * c if side == Left and trans == Trans +// c = c * h^T if side == Right and trans == Trans +// h is a product of elementary reflectors. direct sets the direction of multiplication +// h = h_1 * h_2 * ... * h_k if direct == Forward +// h = h_k * h_k-1 * ... * h_1 if direct == Backward +// The combination of direct and store defines the orientation of the elementary +// reflectors. In all cases the ones on the diagonal are implicitly represented. +// +// If direct == lapack.Forward and store == lapack.ColumnWise +// V = [ 1 ] +// [v1 1 ] +// [v1 v2 1] +// [v1 v2 v3] +// [v1 v2 v3] +// If direct == lapack.Forward and store == lapack.RowWise +// V = [ 1 v1 v1 v1 v1] +// [ 1 v2 v2 v2] +// [ 1 v3 v3] +// If direct == lapack.Backward and store == lapack.ColumnWise +// V = [v1 v2 v3] +// [v1 v2 v3] +// [ 1 v2 v3] +// [ 1 v3] +// [ 1] +// If direct == lapack.Backward and store == lapack.RowWise +// V = [v1 v1 1 ] +// [v2 v2 v2 1 ] +// [v3 v3 v3 v3 1] +// An elementary reflector can be explicitly constructed by extracting the +// corresponding elements of v, placing a 1 where the diagonal would be, and +// placing zeros in the remaining elements. +// +// t is a k×k matrix containing the block reflector, and this function will panic +// if t is not of sufficient size. See Dlarft for more information. +// +// work is a temporary storage matrix with stride ldwork. +// work must be of size at least n×k side == Left and m×k if side == Right, and +// this function will panic if this size is not met. +// +// Dlarfb is an internal routine. It is exported for testing purposes. +func (Implementation) Dlarfb(side blas.Side, trans blas.Transpose, direct lapack.Direct, store lapack.StoreV, m, n, k int, v []float64, ldv int, t []float64, ldt int, c []float64, ldc int, work []float64, ldwork int) { + nv := m + if side == blas.Right { + nv = n + } + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case trans != blas.Trans && trans != blas.NoTrans: + panic(badTrans) + case direct != lapack.Forward && direct != lapack.Backward: + panic(badDirect) + case store != lapack.ColumnWise && store != lapack.RowWise: + panic(badStoreV) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case store == lapack.ColumnWise && ldv < max(1, k): + panic(badLdV) + case store == lapack.RowWise && ldv < max(1, nv): + panic(badLdV) + case ldt < max(1, k): + panic(badLdT) + case ldc < max(1, n): + panic(badLdC) + case ldwork < max(1, k): + panic(badLdWork) + } + + if m == 0 || n == 0 { + return + } + + nw := n + if side == blas.Right { + nw = m + } + switch { + case store == lapack.ColumnWise && len(v) < (nv-1)*ldv+k: + panic(shortV) + case store == lapack.RowWise && len(v) < (k-1)*ldv+nv: + panic(shortV) + case len(t) < (k-1)*ldt+k: + panic(shortT) + case len(c) < (m-1)*ldc+n: + panic(shortC) + case len(work) < (nw-1)*ldwork+k: + panic(shortWork) + } + + bi := blas64.Implementation() + + transt := blas.Trans + if trans == blas.Trans { + transt = blas.NoTrans + } + // TODO(btracey): This follows the original Lapack code where the + // elements are copied into the columns of the working array. The + // loops should go in the other direction so the data is written + // into the rows of work so the copy is not strided. A bigger change + // would be to replace work with work^T, but benchmarks would be + // needed to see if the change is merited. + if store == lapack.ColumnWise { + if direct == lapack.Forward { + // V1 is the first k rows of C. V2 is the remaining rows. + if side == blas.Left { + // W = C^T V = C1^T V1 + C2^T V2 (stored in work). + + // W = C1. + for j := 0; j < k; j++ { + bi.Dcopy(n, c[j*ldc:], 1, work[j:], ldwork) + } + // W = W * V1. + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, + n, k, 1, + v, ldv, + work, ldwork) + if m > k { + // W = W + C2^T V2. + bi.Dgemm(blas.Trans, blas.NoTrans, n, k, m-k, + 1, c[k*ldc:], ldc, v[k*ldv:], ldv, + 1, work, ldwork) + } + // W = W * T^T or W * T. + bi.Dtrmm(blas.Right, blas.Upper, transt, blas.NonUnit, n, k, + 1, t, ldt, + work, ldwork) + // C -= V * W^T. + if m > k { + // C2 -= V2 * W^T. + bi.Dgemm(blas.NoTrans, blas.Trans, m-k, n, k, + -1, v[k*ldv:], ldv, work, ldwork, + 1, c[k*ldc:], ldc) + } + // W *= V1^T. + bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, n, k, + 1, v, ldv, + work, ldwork) + // C1 -= W^T. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < n; i++ { + for j := 0; j < k; j++ { + c[j*ldc+i] -= work[i*ldwork+j] + } + } + return + } + // Form C = C * H or C * H^T, where C = (C1 C2). + + // W = C1. + for i := 0; i < k; i++ { + bi.Dcopy(m, c[i:], ldc, work[i:], ldwork) + } + // W *= V1. + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, m, k, + 1, v, ldv, + work, ldwork) + if n > k { + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, k, n-k, + 1, c[k:], ldc, v[k*ldv:], ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Upper, trans, blas.NonUnit, m, k, + 1, t, ldt, + work, ldwork) + if n > k { + bi.Dgemm(blas.NoTrans, blas.Trans, m, n-k, k, + -1, work, ldwork, v[k*ldv:], ldv, + 1, c[k:], ldc) + } + // C -= W * V^T. + bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, m, k, + 1, v, ldv, + work, ldwork) + // C -= W. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < m; i++ { + for j := 0; j < k; j++ { + c[i*ldc+j] -= work[i*ldwork+j] + } + } + return + } + // V = (V1) + // = (V2) (last k rows) + // Where V2 is unit upper triangular. + if side == blas.Left { + // Form H * C or + // W = C^T V. + + // W = C2^T. + for j := 0; j < k; j++ { + bi.Dcopy(n, c[(m-k+j)*ldc:], 1, work[j:], ldwork) + } + // W *= V2. + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.Unit, n, k, + 1, v[(m-k)*ldv:], ldv, + work, ldwork) + if m > k { + // W += C1^T * V1. + bi.Dgemm(blas.Trans, blas.NoTrans, n, k, m-k, + 1, c, ldc, v, ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Lower, transt, blas.NonUnit, n, k, + 1, t, ldt, + work, ldwork) + // C -= V * W^T. + if m > k { + bi.Dgemm(blas.NoTrans, blas.Trans, m-k, n, k, + -1, v, ldv, work, ldwork, + 1, c, ldc) + } + // W *= V2^T. + bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.Unit, n, k, + 1, v[(m-k)*ldv:], ldv, + work, ldwork) + // C2 -= W^T. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < n; i++ { + for j := 0; j < k; j++ { + c[(m-k+j)*ldc+i] -= work[i*ldwork+j] + } + } + return + } + // Form C * H or C * H^T where C = (C1 C2). + // W = C * V. + + // W = C2. + for j := 0; j < k; j++ { + bi.Dcopy(m, c[n-k+j:], ldc, work[j:], ldwork) + } + + // W = W * V2. + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.Unit, m, k, + 1, v[(n-k)*ldv:], ldv, + work, ldwork) + if n > k { + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, k, n-k, + 1, c, ldc, v, ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Lower, trans, blas.NonUnit, m, k, + 1, t, ldt, + work, ldwork) + // C -= W * V^T. + if n > k { + // C1 -= W * V1^T. + bi.Dgemm(blas.NoTrans, blas.Trans, m, n-k, k, + -1, work, ldwork, v, ldv, + 1, c, ldc) + } + // W *= V2^T. + bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.Unit, m, k, + 1, v[(n-k)*ldv:], ldv, + work, ldwork) + // C2 -= W. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < m; i++ { + for j := 0; j < k; j++ { + c[i*ldc+n-k+j] -= work[i*ldwork+j] + } + } + return + } + // Store = Rowwise. + if direct == lapack.Forward { + // V = (V1 V2) where v1 is unit upper triangular. + if side == blas.Left { + // Form H * C or H^T * C where C = (C1; C2). + // W = C^T * V^T. + + // W = C1^T. + for j := 0; j < k; j++ { + bi.Dcopy(n, c[j*ldc:], 1, work[j:], ldwork) + } + // W *= V1^T. + bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.Unit, n, k, + 1, v, ldv, + work, ldwork) + if m > k { + bi.Dgemm(blas.Trans, blas.Trans, n, k, m-k, + 1, c[k*ldc:], ldc, v[k:], ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Upper, transt, blas.NonUnit, n, k, + 1, t, ldt, + work, ldwork) + // C -= V^T * W^T. + if m > k { + bi.Dgemm(blas.Trans, blas.Trans, m-k, n, k, + -1, v[k:], ldv, work, ldwork, + 1, c[k*ldc:], ldc) + } + // W *= V1. + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.Unit, n, k, + 1, v, ldv, + work, ldwork) + // C1 -= W^T. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < n; i++ { + for j := 0; j < k; j++ { + c[j*ldc+i] -= work[i*ldwork+j] + } + } + return + } + // Form C * H or C * H^T where C = (C1 C2). + // W = C * V^T. + + // W = C1. + for j := 0; j < k; j++ { + bi.Dcopy(m, c[j:], ldc, work[j:], ldwork) + } + // W *= V1^T. + bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.Unit, m, k, + 1, v, ldv, + work, ldwork) + if n > k { + bi.Dgemm(blas.NoTrans, blas.Trans, m, k, n-k, + 1, c[k:], ldc, v[k:], ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Upper, trans, blas.NonUnit, m, k, + 1, t, ldt, + work, ldwork) + // C -= W * V. + if n > k { + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n-k, k, + -1, work, ldwork, v[k:], ldv, + 1, c[k:], ldc) + } + // W *= V1. + bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.Unit, m, k, + 1, v, ldv, + work, ldwork) + // C1 -= W. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < m; i++ { + for j := 0; j < k; j++ { + c[i*ldc+j] -= work[i*ldwork+j] + } + } + return + } + // V = (V1 V2) where V2 is the last k columns and is lower unit triangular. + if side == blas.Left { + // Form H * C or H^T C where C = (C1 ; C2). + // W = C^T * V^T. + + // W = C2^T. + for j := 0; j < k; j++ { + bi.Dcopy(n, c[(m-k+j)*ldc:], 1, work[j:], ldwork) + } + // W *= V2^T. + bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, n, k, + 1, v[m-k:], ldv, + work, ldwork) + if m > k { + bi.Dgemm(blas.Trans, blas.Trans, n, k, m-k, + 1, c, ldc, v, ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Lower, transt, blas.NonUnit, n, k, + 1, t, ldt, + work, ldwork) + // C -= V^T * W^T. + if m > k { + bi.Dgemm(blas.Trans, blas.Trans, m-k, n, k, + -1, v, ldv, work, ldwork, + 1, c, ldc) + } + // W *= V2. + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, n, k, + 1, v[m-k:], ldv, + work, ldwork) + // C2 -= W^T. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < n; i++ { + for j := 0; j < k; j++ { + c[(m-k+j)*ldc+i] -= work[i*ldwork+j] + } + } + return + } + // Form C * H or C * H^T where C = (C1 C2). + // W = C * V^T. + // W = C2. + for j := 0; j < k; j++ { + bi.Dcopy(m, c[n-k+j:], ldc, work[j:], ldwork) + } + // W *= V2^T. + bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, m, k, + 1, v[n-k:], ldv, + work, ldwork) + if n > k { + bi.Dgemm(blas.NoTrans, blas.Trans, m, k, n-k, + 1, c, ldc, v, ldv, + 1, work, ldwork) + } + // W *= T or T^T. + bi.Dtrmm(blas.Right, blas.Lower, trans, blas.NonUnit, m, k, + 1, t, ldt, + work, ldwork) + // C -= W * V. + if n > k { + bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n-k, k, + -1, work, ldwork, v, ldv, + 1, c, ldc) + } + // W *= V2. + bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, m, k, + 1, v[n-k:], ldv, + work, ldwork) + // C1 -= W. + // TODO(btracey): This should use blas.Axpy. + for i := 0; i < m; i++ { + for j := 0; j < k; j++ { + c[i*ldc+n-k+j] -= work[i*ldwork+j] + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfg.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfg.go new file mode 100644 index 0000000000..e037fdd6bd --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfg.go @@ -0,0 +1,71 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlarfg generates an elementary reflector for a Householder matrix. It creates +// a real elementary reflector of order n such that +// H * (alpha) = (beta) +// ( x) ( 0) +// H^T * H = I +// H is represented in the form +// H = 1 - tau * (1; v) * (1 v^T) +// where tau is a real scalar. +// +// On entry, x contains the vector x, on exit it contains v. +// +// Dlarfg is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlarfg(n int, alpha float64, x []float64, incX int) (beta, tau float64) { + switch { + case n < 0: + panic(nLT0) + case incX <= 0: + panic(badIncX) + } + + if n <= 1 { + return alpha, 0 + } + + if len(x) < 1+(n-2)*abs(incX) { + panic(shortX) + } + + bi := blas64.Implementation() + + xnorm := bi.Dnrm2(n-1, x, incX) + if xnorm == 0 { + return alpha, 0 + } + beta = -math.Copysign(impl.Dlapy2(alpha, xnorm), alpha) + safmin := dlamchS / dlamchE + knt := 0 + if math.Abs(beta) < safmin { + // xnorm and beta may be inaccurate, scale x and recompute. + rsafmn := 1 / safmin + for { + knt++ + bi.Dscal(n-1, rsafmn, x, incX) + beta *= rsafmn + alpha *= rsafmn + if math.Abs(beta) >= safmin { + break + } + } + xnorm = bi.Dnrm2(n-1, x, incX) + beta = -math.Copysign(impl.Dlapy2(alpha, xnorm), alpha) + } + tau = (beta - alpha) / beta + bi.Dscal(n-1, 1/(alpha-beta), x, incX) + for j := 0; j < knt; j++ { + beta *= safmin + } + return beta, tau +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlarft.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarft.go new file mode 100644 index 0000000000..8f03eb8b3b --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarft.go @@ -0,0 +1,166 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dlarft forms the triangular factor T of a block reflector H, storing the answer +// in t. +// H = I - V * T * V^T if store == lapack.ColumnWise +// H = I - V^T * T * V if store == lapack.RowWise +// H is defined by a product of the elementary reflectors where +// H = H_0 * H_1 * ... * H_{k-1} if direct == lapack.Forward +// H = H_{k-1} * ... * H_1 * H_0 if direct == lapack.Backward +// +// t is a k×k triangular matrix. t is upper triangular if direct = lapack.Forward +// and lower triangular otherwise. This function will panic if t is not of +// sufficient size. +// +// store describes the storage of the elementary reflectors in v. See +// Dlarfb for a description of layout. +// +// tau contains the scalar factors of the elementary reflectors H_i. +// +// Dlarft is an internal routine. It is exported for testing purposes. +func (Implementation) Dlarft(direct lapack.Direct, store lapack.StoreV, n, k int, v []float64, ldv int, tau []float64, t []float64, ldt int) { + mv, nv := n, k + if store == lapack.RowWise { + mv, nv = k, n + } + switch { + case direct != lapack.Forward && direct != lapack.Backward: + panic(badDirect) + case store != lapack.RowWise && store != lapack.ColumnWise: + panic(badStoreV) + case n < 0: + panic(nLT0) + case k < 1: + panic(kLT1) + case ldv < max(1, nv): + panic(badLdV) + case len(tau) < k: + panic(shortTau) + case ldt < max(1, k): + panic(shortT) + } + + if n == 0 { + return + } + + switch { + case len(v) < (mv-1)*ldv+nv: + panic(shortV) + case len(t) < (k-1)*ldt+k: + panic(shortT) + } + + bi := blas64.Implementation() + + // TODO(btracey): There are a number of minor obvious loop optimizations here. + // TODO(btracey): It may be possible to rearrange some of the code so that + // index of 1 is more common in the Dgemv. + if direct == lapack.Forward { + prevlastv := n - 1 + for i := 0; i < k; i++ { + prevlastv = max(i, prevlastv) + if tau[i] == 0 { + for j := 0; j <= i; j++ { + t[j*ldt+i] = 0 + } + continue + } + var lastv int + if store == lapack.ColumnWise { + // skip trailing zeros + for lastv = n - 1; lastv >= i+1; lastv-- { + if v[lastv*ldv+i] != 0 { + break + } + } + for j := 0; j < i; j++ { + t[j*ldt+i] = -tau[i] * v[i*ldv+j] + } + j := min(lastv, prevlastv) + bi.Dgemv(blas.Trans, j-i, i, + -tau[i], v[(i+1)*ldv:], ldv, v[(i+1)*ldv+i:], ldv, + 1, t[i:], ldt) + } else { + for lastv = n - 1; lastv >= i+1; lastv-- { + if v[i*ldv+lastv] != 0 { + break + } + } + for j := 0; j < i; j++ { + t[j*ldt+i] = -tau[i] * v[j*ldv+i] + } + j := min(lastv, prevlastv) + bi.Dgemv(blas.NoTrans, i, j-i, + -tau[i], v[i+1:], ldv, v[i*ldv+i+1:], 1, + 1, t[i:], ldt) + } + bi.Dtrmv(blas.Upper, blas.NoTrans, blas.NonUnit, i, t, ldt, t[i:], ldt) + t[i*ldt+i] = tau[i] + if i > 1 { + prevlastv = max(prevlastv, lastv) + } else { + prevlastv = lastv + } + } + return + } + prevlastv := 0 + for i := k - 1; i >= 0; i-- { + if tau[i] == 0 { + for j := i; j < k; j++ { + t[j*ldt+i] = 0 + } + continue + } + var lastv int + if i < k-1 { + if store == lapack.ColumnWise { + for lastv = 0; lastv < i; lastv++ { + if v[lastv*ldv+i] != 0 { + break + } + } + for j := i + 1; j < k; j++ { + t[j*ldt+i] = -tau[i] * v[(n-k+i)*ldv+j] + } + j := max(lastv, prevlastv) + bi.Dgemv(blas.Trans, n-k+i-j, k-i-1, + -tau[i], v[j*ldv+i+1:], ldv, v[j*ldv+i:], ldv, + 1, t[(i+1)*ldt+i:], ldt) + } else { + for lastv = 0; lastv < i; lastv++ { + if v[i*ldv+lastv] != 0 { + break + } + } + for j := i + 1; j < k; j++ { + t[j*ldt+i] = -tau[i] * v[j*ldv+n-k+i] + } + j := max(lastv, prevlastv) + bi.Dgemv(blas.NoTrans, k-i-1, n-k+i-j, + -tau[i], v[(i+1)*ldv+j:], ldv, v[i*ldv+j:], 1, + 1, t[(i+1)*ldt+i:], ldt) + } + bi.Dtrmv(blas.Lower, blas.NoTrans, blas.NonUnit, k-i-1, + t[(i+1)*ldt+i+1:], ldt, + t[(i+1)*ldt+i:], ldt) + if i > 0 { + prevlastv = min(prevlastv, lastv) + } else { + prevlastv = lastv + } + } + t[i*ldt+i] = tau[i] + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfx.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfx.go new file mode 100644 index 0000000000..d7928c8cf4 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlarfx.go @@ -0,0 +1,550 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dlarfx applies an elementary reflector H to a real m×n matrix C, from either +// the left or the right, with loop unrolling when the reflector has order less +// than 11. +// +// H is represented in the form +// H = I - tau * v * v^T, +// where tau is a real scalar and v is a real vector. If tau = 0, then H is +// taken to be the identity matrix. +// +// v must have length equal to m if side == blas.Left, and equal to n if side == +// blas.Right, otherwise Dlarfx will panic. +// +// c and ldc represent the m×n matrix C. On return, C is overwritten by the +// matrix H * C if side == blas.Left, or C * H if side == blas.Right. +// +// work must have length at least n if side == blas.Left, and at least m if side +// == blas.Right, otherwise Dlarfx will panic. work is not referenced if H has +// order < 11. +// +// Dlarfx is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlarfx(side blas.Side, m, n int, v []float64, tau float64, c []float64, ldc int, work []float64) { + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + nh := m + lwork := n + if side == blas.Right { + nh = n + lwork = m + } + switch { + case len(v) < nh: + panic(shortV) + case len(c) < (m-1)*ldc+n: + panic(shortC) + case nh > 10 && len(work) < lwork: + panic(shortWork) + } + + if tau == 0 { + return + } + + if side == blas.Left { + // Form H * C, where H has order m. + switch m { + default: // Code for general m. + impl.Dlarf(side, m, n, v, 1, tau, c, ldc, work) + return + + case 0: // No-op for zero size matrix. + return + + case 1: // Special code for 1×1 Householder matrix. + t0 := 1 - tau*v[0]*v[0] + for j := 0; j < n; j++ { + c[j] *= t0 + } + return + + case 2: // Special code for 2×2 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + } + return + + case 3: // Special code for 3×3 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + } + return + + case 4: // Special code for 4×4 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + } + return + + case 5: // Special code for 5×5 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + c[4*ldc+j] -= sum * t4 + } + return + + case 6: // Special code for 6×6 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + + v5*c[5*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + c[4*ldc+j] -= sum * t4 + c[5*ldc+j] -= sum * t5 + } + return + + case 7: // Special code for 7×7 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + + v5*c[5*ldc+j] + v6*c[6*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + c[4*ldc+j] -= sum * t4 + c[5*ldc+j] -= sum * t5 + c[6*ldc+j] -= sum * t6 + } + return + + case 8: // Special code for 8×8 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + v7 := v[7] + t7 := tau * v7 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + + v5*c[5*ldc+j] + v6*c[6*ldc+j] + v7*c[7*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + c[4*ldc+j] -= sum * t4 + c[5*ldc+j] -= sum * t5 + c[6*ldc+j] -= sum * t6 + c[7*ldc+j] -= sum * t7 + } + return + + case 9: // Special code for 9×9 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + v7 := v[7] + t7 := tau * v7 + v8 := v[8] + t8 := tau * v8 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + + v5*c[5*ldc+j] + v6*c[6*ldc+j] + v7*c[7*ldc+j] + v8*c[8*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + c[4*ldc+j] -= sum * t4 + c[5*ldc+j] -= sum * t5 + c[6*ldc+j] -= sum * t6 + c[7*ldc+j] -= sum * t7 + c[8*ldc+j] -= sum * t8 + } + return + + case 10: // Special code for 10×10 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + v7 := v[7] + t7 := tau * v7 + v8 := v[8] + t8 := tau * v8 + v9 := v[9] + t9 := tau * v9 + for j := 0; j < n; j++ { + sum := v0*c[j] + v1*c[ldc+j] + v2*c[2*ldc+j] + v3*c[3*ldc+j] + v4*c[4*ldc+j] + + v5*c[5*ldc+j] + v6*c[6*ldc+j] + v7*c[7*ldc+j] + v8*c[8*ldc+j] + v9*c[9*ldc+j] + c[j] -= sum * t0 + c[ldc+j] -= sum * t1 + c[2*ldc+j] -= sum * t2 + c[3*ldc+j] -= sum * t3 + c[4*ldc+j] -= sum * t4 + c[5*ldc+j] -= sum * t5 + c[6*ldc+j] -= sum * t6 + c[7*ldc+j] -= sum * t7 + c[8*ldc+j] -= sum * t8 + c[9*ldc+j] -= sum * t9 + } + return + } + } + + // Form C * H, where H has order n. + switch n { + default: // Code for general n. + impl.Dlarf(side, m, n, v, 1, tau, c, ldc, work) + return + + case 0: // No-op for zero size matrix. + return + + case 1: // Special code for 1×1 Householder matrix. + t0 := 1 - tau*v[0]*v[0] + for j := 0; j < m; j++ { + c[j*ldc] *= t0 + } + return + + case 2: // Special code for 2×2 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + } + return + + case 3: // Special code for 3×3 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + } + return + + case 4: // Special code for 4×4 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + } + return + + case 5: // Special code for 5×5 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + cs[4] -= sum * t4 + } + return + + case 6: // Special code for 6×6 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + v5*cs[5] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + cs[4] -= sum * t4 + cs[5] -= sum * t5 + } + return + + case 7: // Special code for 7×7 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + + v5*cs[5] + v6*cs[6] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + cs[4] -= sum * t4 + cs[5] -= sum * t5 + cs[6] -= sum * t6 + } + return + + case 8: // Special code for 8×8 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + v7 := v[7] + t7 := tau * v7 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + + v5*cs[5] + v6*cs[6] + v7*cs[7] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + cs[4] -= sum * t4 + cs[5] -= sum * t5 + cs[6] -= sum * t6 + cs[7] -= sum * t7 + } + return + + case 9: // Special code for 9×9 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + v7 := v[7] + t7 := tau * v7 + v8 := v[8] + t8 := tau * v8 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + + v5*cs[5] + v6*cs[6] + v7*cs[7] + v8*cs[8] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + cs[4] -= sum * t4 + cs[5] -= sum * t5 + cs[6] -= sum * t6 + cs[7] -= sum * t7 + cs[8] -= sum * t8 + } + return + + case 10: // Special code for 10×10 Householder matrix. + v0 := v[0] + t0 := tau * v0 + v1 := v[1] + t1 := tau * v1 + v2 := v[2] + t2 := tau * v2 + v3 := v[3] + t3 := tau * v3 + v4 := v[4] + t4 := tau * v4 + v5 := v[5] + t5 := tau * v5 + v6 := v[6] + t6 := tau * v6 + v7 := v[7] + t7 := tau * v7 + v8 := v[8] + t8 := tau * v8 + v9 := v[9] + t9 := tau * v9 + for j := 0; j < m; j++ { + cs := c[j*ldc:] + sum := v0*cs[0] + v1*cs[1] + v2*cs[2] + v3*cs[3] + v4*cs[4] + + v5*cs[5] + v6*cs[6] + v7*cs[7] + v8*cs[8] + v9*cs[9] + cs[0] -= sum * t0 + cs[1] -= sum * t1 + cs[2] -= sum * t2 + cs[3] -= sum * t3 + cs[4] -= sum * t4 + cs[5] -= sum * t5 + cs[6] -= sum * t6 + cs[7] -= sum * t7 + cs[8] -= sum * t8 + cs[9] -= sum * t9 + } + return + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlartg.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlartg.go new file mode 100644 index 0000000000..ad64546137 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlartg.go @@ -0,0 +1,80 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlartg generates a plane rotation so that +// [ cs sn] * [f] = [r] +// [-sn cs] [g] = [0] +// This is a more accurate version of BLAS drotg, with the other differences that +// if g = 0, then cs = 1 and sn = 0, and if f = 0 and g != 0, then cs = 0 and sn = 1. +// If abs(f) > abs(g), cs will be positive. +// +// Dlartg is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlartg(f, g float64) (cs, sn, r float64) { + safmn2 := math.Pow(dlamchB, math.Trunc(math.Log(dlamchS/dlamchE)/math.Log(dlamchB)/2)) + safmx2 := 1 / safmn2 + if g == 0 { + cs = 1 + sn = 0 + r = f + return cs, sn, r + } + if f == 0 { + cs = 0 + sn = 1 + r = g + return cs, sn, r + } + f1 := f + g1 := g + scale := math.Max(math.Abs(f1), math.Abs(g1)) + if scale >= safmx2 { + var count int + for { + count++ + f1 *= safmn2 + g1 *= safmn2 + scale = math.Max(math.Abs(f1), math.Abs(g1)) + if scale < safmx2 { + break + } + } + r = math.Sqrt(f1*f1 + g1*g1) + cs = f1 / r + sn = g1 / r + for i := 0; i < count; i++ { + r *= safmx2 + } + } else if scale <= safmn2 { + var count int + for { + count++ + f1 *= safmx2 + g1 *= safmx2 + scale = math.Max(math.Abs(f1), math.Abs(g1)) + if scale >= safmn2 { + break + } + } + r = math.Sqrt(f1*f1 + g1*g1) + cs = f1 / r + sn = g1 / r + for i := 0; i < count; i++ { + r *= safmn2 + } + } else { + r = math.Sqrt(f1*f1 + g1*g1) + cs = f1 / r + sn = g1 / r + } + if math.Abs(f) > math.Abs(g) && cs < 0 { + cs *= -1 + sn *= -1 + r *= -1 + } + return cs, sn, r +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlas2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlas2.go new file mode 100644 index 0000000000..9922b4aa77 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlas2.go @@ -0,0 +1,43 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlas2 computes the singular values of the 2×2 matrix defined by +// [F G] +// [0 H] +// The smaller and larger singular values are returned in that order. +// +// Dlas2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlas2(f, g, h float64) (ssmin, ssmax float64) { + fa := math.Abs(f) + ga := math.Abs(g) + ha := math.Abs(h) + fhmin := math.Min(fa, ha) + fhmax := math.Max(fa, ha) + if fhmin == 0 { + if fhmax == 0 { + return 0, ga + } + v := math.Min(fhmax, ga) / math.Max(fhmax, ga) + return 0, math.Max(fhmax, ga) * math.Sqrt(1+v*v) + } + if ga < fhmax { + as := 1 + fhmin/fhmax + at := (fhmax - fhmin) / fhmax + au := (ga / fhmax) * (ga / fhmax) + c := 2 / (math.Sqrt(as*as+au) + math.Sqrt(at*at+au)) + return fhmin * c, fhmax / c + } + au := fhmax / ga + if au == 0 { + return fhmin * fhmax / ga, ga + } + as := 1 + fhmin/fhmax + at := (fhmax - fhmin) / fhmax + c := 1 / (math.Sqrt(1+(as*au)*(as*au)) + math.Sqrt(1+(at*au)*(at*au))) + return 2 * (fhmin * c) * au, ga / (c + c) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlascl.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlascl.go new file mode 100644 index 0000000000..61c4eb79cb --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlascl.go @@ -0,0 +1,111 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/lapack" +) + +// Dlascl multiplies an m×n matrix by the scalar cto/cfrom. +// +// cfrom must not be zero, and cto and cfrom must not be NaN, otherwise Dlascl +// will panic. +// +// Dlascl is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlascl(kind lapack.MatrixType, kl, ku int, cfrom, cto float64, m, n int, a []float64, lda int) { + switch kind { + default: + panic(badMatrixType) + case 'H', 'B', 'Q', 'Z': // See dlascl.f. + panic("not implemented") + case lapack.General, lapack.UpperTri, lapack.LowerTri: + if lda < max(1, n) { + panic(badLdA) + } + } + switch { + case cfrom == 0: + panic(zeroCFrom) + case math.IsNaN(cfrom): + panic(nanCFrom) + case math.IsNaN(cto): + panic(nanCTo) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + } + + if n == 0 || m == 0 { + return + } + + switch kind { + case lapack.General, lapack.UpperTri, lapack.LowerTri: + if len(a) < (m-1)*lda+n { + panic(shortA) + } + } + + smlnum := dlamchS + bignum := 1 / smlnum + cfromc := cfrom + ctoc := cto + cfrom1 := cfromc * smlnum + for { + var done bool + var mul, ctol float64 + if cfrom1 == cfromc { + // cfromc is inf. + mul = ctoc / cfromc + done = true + ctol = ctoc + } else { + ctol = ctoc / bignum + if ctol == ctoc { + // ctoc is either 0 or inf. + mul = ctoc + done = true + cfromc = 1 + } else if math.Abs(cfrom1) > math.Abs(ctoc) && ctoc != 0 { + mul = smlnum + done = false + cfromc = cfrom1 + } else if math.Abs(ctol) > math.Abs(cfromc) { + mul = bignum + done = false + ctoc = ctol + } else { + mul = ctoc / cfromc + done = true + } + } + switch kind { + case lapack.General: + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + a[i*lda+j] = a[i*lda+j] * mul + } + } + case lapack.UpperTri: + for i := 0; i < m; i++ { + for j := i; j < n; j++ { + a[i*lda+j] = a[i*lda+j] * mul + } + } + case lapack.LowerTri: + for i := 0; i < m; i++ { + for j := 0; j <= min(i, n-1); j++ { + a[i*lda+j] = a[i*lda+j] * mul + } + } + } + if done { + break + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaset.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaset.go new file mode 100644 index 0000000000..b4e63916fb --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaset.go @@ -0,0 +1,57 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dlaset sets the off-diagonal elements of A to alpha, and the diagonal +// elements to beta. If uplo == blas.Upper, only the elements in the upper +// triangular part are set. If uplo == blas.Lower, only the elements in the +// lower triangular part are set. If uplo is otherwise, all of the elements of A +// are set. +// +// Dlaset is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaset(uplo blas.Uplo, m, n int, alpha, beta float64, a []float64, lda int) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + minmn := min(m, n) + if minmn == 0 { + return + } + + if len(a) < (m-1)*lda+n { + panic(shortA) + } + + if uplo == blas.Upper { + for i := 0; i < m; i++ { + for j := i + 1; j < n; j++ { + a[i*lda+j] = alpha + } + } + } else if uplo == blas.Lower { + for i := 0; i < m; i++ { + for j := 0; j < min(i+1, n); j++ { + a[i*lda+j] = alpha + } + } + } else { + for i := 0; i < m; i++ { + for j := 0; j < n; j++ { + a[i*lda+j] = alpha + } + } + } + for i := 0; i < minmn; i++ { + a[i*lda+i] = beta + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq1.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq1.go new file mode 100644 index 0000000000..1f1d1dc42e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq1.go @@ -0,0 +1,100 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dlasq1 computes the singular values of an n×n bidiagonal matrix with diagonal +// d and off-diagonal e. On exit, d contains the singular values in decreasing +// order, and e is overwritten. d must have length at least n, e must have +// length at least n-1, and the input work must have length at least 4*n. Dlasq1 +// will panic if these conditions are not met. +// +// Dlasq1 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasq1(n int, d, e, work []float64) (info int) { + if n < 0 { + panic(nLT0) + } + + if n == 0 { + return info + } + + switch { + case len(d) < n: + panic(shortD) + case len(e) < n-1: + panic(shortE) + case len(work) < 4*n: + panic(shortWork) + } + + if n == 1 { + d[0] = math.Abs(d[0]) + return info + } + + if n == 2 { + d[1], d[0] = impl.Dlas2(d[0], e[0], d[1]) + return info + } + + // Estimate the largest singular value. + var sigmx float64 + for i := 0; i < n-1; i++ { + d[i] = math.Abs(d[i]) + sigmx = math.Max(sigmx, math.Abs(e[i])) + } + d[n-1] = math.Abs(d[n-1]) + // Early return if sigmx is zero (matrix is already diagonal). + if sigmx == 0 { + impl.Dlasrt(lapack.SortDecreasing, n, d) + return info + } + + for i := 0; i < n; i++ { + sigmx = math.Max(sigmx, d[i]) + } + + // Copy D and E into WORK (in the Z format) and scale (squaring the + // input data makes scaling by a power of the radix pointless). + + eps := dlamchP + safmin := dlamchS + scale := math.Sqrt(eps / safmin) + bi := blas64.Implementation() + bi.Dcopy(n, d, 1, work, 2) + bi.Dcopy(n-1, e, 1, work[1:], 2) + impl.Dlascl(lapack.General, 0, 0, sigmx, scale, 2*n-1, 1, work, 1) + + // Compute the q's and e's. + for i := 0; i < 2*n-1; i++ { + work[i] *= work[i] + } + work[2*n-1] = 0 + + info = impl.Dlasq2(n, work) + if info == 0 { + for i := 0; i < n; i++ { + d[i] = math.Sqrt(work[i]) + } + impl.Dlascl(lapack.General, 0, 0, scale, sigmx, n, 1, d, 1) + } else if info == 2 { + // Maximum number of iterations exceeded. Move data from work + // into D and E so the calling subroutine can try to finish. + for i := 0; i < n; i++ { + d[i] = math.Sqrt(work[2*i]) + e[i] = math.Sqrt(work[2*i+1]) + } + impl.Dlascl(lapack.General, 0, 0, scale, sigmx, n, 1, d, 1) + impl.Dlascl(lapack.General, 0, 0, scale, sigmx, n, 1, e, 1) + } + return info +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq2.go new file mode 100644 index 0000000000..fd24a5509a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq2.go @@ -0,0 +1,369 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/lapack" +) + +// Dlasq2 computes all the eigenvalues of the symmetric positive +// definite tridiagonal matrix associated with the qd array Z. Eigevalues +// are computed to high relative accuracy avoiding denormalization, underflow +// and overflow. +// +// To see the relation of Z to the tridiagonal matrix, let L be a +// unit lower bidiagonal matrix with sub-diagonals Z(2,4,6,,..) and +// let U be an upper bidiagonal matrix with 1's above and diagonal +// Z(1,3,5,,..). The tridiagonal is L*U or, if you prefer, the +// symmetric tridiagonal to which it is similar. +// +// info returns a status error. The return codes mean as follows: +// 0: The algorithm completed successfully. +// 1: A split was marked by a positive value in e. +// 2: Current block of Z not diagonalized after 100*n iterations (in inner +// while loop). On exit Z holds a qd array with the same eigenvalues as +// the given Z. +// 3: Termination criterion of outer while loop not met (program created more +// than N unreduced blocks). +// +// z must have length at least 4*n, and must not contain any negative elements. +// Dlasq2 will panic otherwise. +// +// Dlasq2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasq2(n int, z []float64) (info int) { + if n < 0 { + panic(nLT0) + } + + if n == 0 { + return info + } + + if len(z) < 4*n { + panic(shortZ) + } + + if n == 1 { + if z[0] < 0 { + panic(negZ) + } + return info + } + + const cbias = 1.5 + + eps := dlamchP + safmin := dlamchS + tol := eps * 100 + tol2 := tol * tol + if n == 2 { + if z[1] < 0 || z[2] < 0 { + panic(negZ) + } else if z[2] > z[0] { + z[0], z[2] = z[2], z[0] + } + z[4] = z[0] + z[1] + z[2] + if z[1] > z[2]*tol2 { + t := 0.5 * (z[0] - z[2] + z[1]) + s := z[2] * (z[1] / t) + if s <= t { + s = z[2] * (z[1] / (t * (1 + math.Sqrt(1+s/t)))) + } else { + s = z[2] * (z[1] / (t + math.Sqrt(t)*math.Sqrt(t+s))) + } + t = z[0] + s + z[1] + z[2] *= z[0] / t + z[0] = t + } + z[1] = z[2] + z[5] = z[1] + z[0] + return info + } + // Check for negative data and compute sums of q's and e's. + z[2*n-1] = 0 + emin := z[1] + var d, e, qmax float64 + var i1, n1 int + for k := 0; k < 2*(n-1); k += 2 { + if z[k] < 0 || z[k+1] < 0 { + panic(negZ) + } + d += z[k] + e += z[k+1] + qmax = math.Max(qmax, z[k]) + emin = math.Min(emin, z[k+1]) + } + if z[2*(n-1)] < 0 { + panic(negZ) + } + d += z[2*(n-1)] + // Check for diagonality. + if e == 0 { + for k := 1; k < n; k++ { + z[k] = z[2*k] + } + impl.Dlasrt(lapack.SortDecreasing, n, z) + z[2*(n-1)] = d + return info + } + trace := d + e + // Check for zero data. + if trace == 0 { + z[2*(n-1)] = 0 + return info + } + // Rearrange data for locality: Z=(q1,qq1,e1,ee1,q2,qq2,e2,ee2,...). + for k := 2 * n; k >= 2; k -= 2 { + z[2*k-1] = 0 + z[2*k-2] = z[k-1] + z[2*k-3] = 0 + z[2*k-4] = z[k-2] + } + i0 := 0 + n0 := n - 1 + + // Reverse the qd-array, if warranted. + // z[4*i0-3] --> z[4*(i0+1)-3-1] --> z[4*i0] + if cbias*z[4*i0] < z[4*n0] { + ipn4Out := 4 * (i0 + n0 + 2) + for i4loop := 4 * (i0 + 1); i4loop <= 2*(i0+n0+1); i4loop += 4 { + i4 := i4loop - 1 + ipn4 := ipn4Out - 1 + z[i4-3], z[ipn4-i4-4] = z[ipn4-i4-4], z[i4-3] + z[i4-1], z[ipn4-i4-6] = z[ipn4-i4-6], z[i4-1] + } + } + + // Initial split checking via dqd and Li's test. + pp := 0 + for k := 0; k < 2; k++ { + d = z[4*n0+pp] + for i4loop := 4*n0 + pp; i4loop >= 4*(i0+1)+pp; i4loop -= 4 { + i4 := i4loop - 1 + if z[i4-1] <= tol2*d { + z[i4-1] = math.Copysign(0, -1) + d = z[i4-3] + } else { + d = z[i4-3] * (d / (d + z[i4-1])) + } + } + // dqd maps Z to ZZ plus Li's test. + emin = z[4*(i0+1)+pp] + d = z[4*i0+pp] + for i4loop := 4*(i0+1) + pp; i4loop <= 4*n0+pp; i4loop += 4 { + i4 := i4loop - 1 + z[i4-2*pp-2] = d + z[i4-1] + if z[i4-1] <= tol2*d { + z[i4-1] = math.Copysign(0, -1) + z[i4-2*pp-2] = d + z[i4-2*pp] = 0 + d = z[i4+1] + } else if safmin*z[i4+1] < z[i4-2*pp-2] && safmin*z[i4-2*pp-2] < z[i4+1] { + tmp := z[i4+1] / z[i4-2*pp-2] + z[i4-2*pp] = z[i4-1] * tmp + d *= tmp + } else { + z[i4-2*pp] = z[i4+1] * (z[i4-1] / z[i4-2*pp-2]) + d = z[i4+1] * (d / z[i4-2*pp-2]) + } + emin = math.Min(emin, z[i4-2*pp]) + } + z[4*(n0+1)-pp-3] = d + + // Now find qmax. + qmax = z[4*(i0+1)-pp-3] + for i4loop := 4*(i0+1) - pp + 2; i4loop <= 4*(n0+1)+pp-2; i4loop += 4 { + i4 := i4loop - 1 + qmax = math.Max(qmax, z[i4]) + } + // Prepare for the next iteration on K. + pp = 1 - pp + } + + // Initialise variables to pass to DLASQ3. + var ttype int + var dmin1, dmin2, dn, dn1, dn2, g, tau float64 + var tempq float64 + iter := 2 + var nFail int + nDiv := 2 * (n0 - i0) + var i4 int +outer: + for iwhila := 1; iwhila <= n+1; iwhila++ { + // Test for completion. + if n0 < 0 { + // Move q's to the front. + for k := 1; k < n; k++ { + z[k] = z[4*k] + } + // Sort and compute sum of eigenvalues. + impl.Dlasrt(lapack.SortDecreasing, n, z) + e = 0 + for k := n - 1; k >= 0; k-- { + e += z[k] + } + // Store trace, sum(eigenvalues) and information on performance. + z[2*n] = trace + z[2*n+1] = e + z[2*n+2] = float64(iter) + z[2*n+3] = float64(nDiv) / float64(n*n) + z[2*n+4] = 100 * float64(nFail) / float64(iter) + return info + } + + // While array unfinished do + // e[n0] holds the value of sigma when submatrix in i0:n0 + // splits from the rest of the array, but is negated. + var desig float64 + var sigma float64 + if n0 != n-1 { + sigma = -z[4*(n0+1)-2] + } + if sigma < 0 { + info = 1 + return info + } + // Find last unreduced submatrix's top index i0, find qmax and + // emin. Find Gershgorin-type bound if Q's much greater than E's. + var emax float64 + if n0 > i0 { + emin = math.Abs(z[4*(n0+1)-6]) + } else { + emin = 0 + } + qmin := z[4*(n0+1)-4] + qmax = qmin + zSmall := false + for i4loop := 4 * (n0 + 1); i4loop >= 8; i4loop -= 4 { + i4 = i4loop - 1 + if z[i4-5] <= 0 { + zSmall = true + break + } + if qmin >= 4*emax { + qmin = math.Min(qmin, z[i4-3]) + emax = math.Max(emax, z[i4-5]) + } + qmax = math.Max(qmax, z[i4-7]+z[i4-5]) + emin = math.Min(emin, z[i4-5]) + } + if !zSmall { + i4 = 3 + } + i0 = (i4+1)/4 - 1 + pp = 0 + if n0-i0 > 1 { + dee := z[4*i0] + deemin := dee + kmin := i0 + for i4loop := 4*(i0+1) + 1; i4loop <= 4*(n0+1)-3; i4loop += 4 { + i4 := i4loop - 1 + dee = z[i4] * (dee / (dee + z[i4-2])) + if dee <= deemin { + deemin = dee + kmin = (i4+4)/4 - 1 + } + } + if (kmin-i0)*2 < n0-kmin && deemin <= 0.5*z[4*n0] { + ipn4Out := 4 * (i0 + n0 + 2) + pp = 2 + for i4loop := 4 * (i0 + 1); i4loop <= 2*(i0+n0+1); i4loop += 4 { + i4 := i4loop - 1 + ipn4 := ipn4Out - 1 + z[i4-3], z[ipn4-i4-4] = z[ipn4-i4-4], z[i4-3] + z[i4-2], z[ipn4-i4-3] = z[ipn4-i4-3], z[i4-2] + z[i4-1], z[ipn4-i4-6] = z[ipn4-i4-6], z[i4-1] + z[i4], z[ipn4-i4-5] = z[ipn4-i4-5], z[i4] + } + } + } + // Put -(initial shift) into DMIN. + dmin := -math.Max(0, qmin-2*math.Sqrt(qmin)*math.Sqrt(emax)) + + // Now i0:n0 is unreduced. + // PP = 0 for ping, PP = 1 for pong. + // PP = 2 indicates that flipping was applied to the Z array and + // and that the tests for deflation upon entry in Dlasq3 + // should not be performed. + nbig := 100 * (n0 - i0 + 1) + for iwhilb := 0; iwhilb < nbig; iwhilb++ { + if i0 > n0 { + continue outer + } + + // While submatrix unfinished take a good dqds step. + i0, n0, pp, dmin, sigma, desig, qmax, nFail, iter, nDiv, ttype, dmin1, dmin2, dn, dn1, dn2, g, tau = + impl.Dlasq3(i0, n0, z, pp, dmin, sigma, desig, qmax, nFail, iter, nDiv, ttype, dmin1, dmin2, dn, dn1, dn2, g, tau) + + pp = 1 - pp + // When emin is very small check for splits. + if pp == 0 && n0-i0 >= 3 { + if z[4*(n0+1)-1] <= tol2*qmax || z[4*(n0+1)-2] <= tol2*sigma { + splt := i0 - 1 + qmax = z[4*i0] + emin = z[4*(i0+1)-2] + oldemn := z[4*(i0+1)-1] + for i4loop := 4 * (i0 + 1); i4loop <= 4*(n0-2); i4loop += 4 { + i4 := i4loop - 1 + if z[i4] <= tol2*z[i4-3] || z[i4-1] <= tol2*sigma { + z[i4-1] = -sigma + splt = i4 / 4 + qmax = 0 + emin = z[i4+3] + oldemn = z[i4+4] + } else { + qmax = math.Max(qmax, z[i4+1]) + emin = math.Min(emin, z[i4-1]) + oldemn = math.Min(oldemn, z[i4]) + } + } + z[4*(n0+1)-2] = emin + z[4*(n0+1)-1] = oldemn + i0 = splt + 1 + } + } + } + // Maximum number of iterations exceeded, restore the shift + // sigma and place the new d's and e's in a qd array. + // This might need to be done for several blocks. + info = 2 + i1 = i0 + for { + tempq = z[4*i0] + z[4*i0] += sigma + for k := i0 + 1; k <= n0; k++ { + tempe := z[4*(k+1)-6] + z[4*(k+1)-6] *= tempq / z[4*(k+1)-8] + tempq = z[4*k] + z[4*k] += sigma + tempe - z[4*(k+1)-6] + } + // Prepare to do this on the previous block if there is one. + if i1 <= 0 { + break + } + n1 = i1 - 1 + for i1 >= 1 && z[4*(i1+1)-6] >= 0 { + i1 -= 1 + } + sigma = -z[4*(n1+1)-2] + } + for k := 0; k < n; k++ { + z[2*k] = z[4*k] + // Only the block 1..N0 is unfinished. The rest of the e's + // must be essentially zero, although sometimes other data + // has been stored in them. + if k < n0 { + z[2*(k+1)-1] = z[4*(k+1)-1] + } else { + z[2*(k+1)] = 0 + } + } + return info + } + info = 3 + return info +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq3.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq3.go new file mode 100644 index 0000000000..a05e94ef17 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq3.go @@ -0,0 +1,172 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlasq3 checks for deflation, computes a shift (tau) and calls dqds. +// In case of failure it changes shifts, and tries again until output +// is positive. +// +// Dlasq3 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasq3(i0, n0 int, z []float64, pp int, dmin, sigma, desig, qmax float64, nFail, iter, nDiv int, ttype int, dmin1, dmin2, dn, dn1, dn2, g, tau float64) ( + i0Out, n0Out, ppOut int, dminOut, sigmaOut, desigOut, qmaxOut float64, nFailOut, iterOut, nDivOut, ttypeOut int, dmin1Out, dmin2Out, dnOut, dn1Out, dn2Out, gOut, tauOut float64) { + switch { + case i0 < 0: + panic(i0LT0) + case n0 < 0: + panic(n0LT0) + case len(z) < 4*n0: + panic(shortZ) + case pp != 0 && pp != 1 && pp != 2: + panic(badPp) + } + + const cbias = 1.5 + + n0in := n0 + eps := dlamchP + tol := eps * 100 + tol2 := tol * tol + var nn int + var t float64 + for { + if n0 < i0 { + return i0, n0, pp, dmin, sigma, desig, qmax, nFail, iter, nDiv, ttype, dmin1, dmin2, dn, dn1, dn2, g, tau + } + if n0 == i0 { + z[4*(n0+1)-4] = z[4*(n0+1)+pp-4] + sigma + n0-- + continue + } + nn = 4*(n0+1) + pp - 1 + if n0 != i0+1 { + // Check whether e[n0-1] is negligible, 1 eigenvalue. + if z[nn-5] > tol2*(sigma+z[nn-3]) && z[nn-2*pp-4] > tol2*z[nn-7] { + // Check whether e[n0-2] is negligible, 2 eigenvalues. + if z[nn-9] > tol2*sigma && z[nn-2*pp-8] > tol2*z[nn-11] { + break + } + } else { + z[4*(n0+1)-4] = z[4*(n0+1)+pp-4] + sigma + n0-- + continue + } + } + if z[nn-3] > z[nn-7] { + z[nn-3], z[nn-7] = z[nn-7], z[nn-3] + } + t = 0.5 * (z[nn-7] - z[nn-3] + z[nn-5]) + if z[nn-5] > z[nn-3]*tol2 && t != 0 { + s := z[nn-3] * (z[nn-5] / t) + if s <= t { + s = z[nn-3] * (z[nn-5] / (t * (1 + math.Sqrt(1+s/t)))) + } else { + s = z[nn-3] * (z[nn-5] / (t + math.Sqrt(t)*math.Sqrt(t+s))) + } + t = z[nn-7] + (s + z[nn-5]) + z[nn-3] *= z[nn-7] / t + z[nn-7] = t + } + z[4*(n0+1)-8] = z[nn-7] + sigma + z[4*(n0+1)-4] = z[nn-3] + sigma + n0 -= 2 + } + if pp == 2 { + pp = 0 + } + + // Reverse the qd-array, if warranted. + if dmin <= 0 || n0 < n0in { + if cbias*z[4*(i0+1)+pp-4] < z[4*(n0+1)+pp-4] { + ipn4Out := 4 * (i0 + n0 + 2) + for j4loop := 4 * (i0 + 1); j4loop <= 2*((i0+1)+(n0+1)-1); j4loop += 4 { + ipn4 := ipn4Out - 1 + j4 := j4loop - 1 + + z[j4-3], z[ipn4-j4-4] = z[ipn4-j4-4], z[j4-3] + z[j4-2], z[ipn4-j4-3] = z[ipn4-j4-3], z[j4-2] + z[j4-1], z[ipn4-j4-6] = z[ipn4-j4-6], z[j4-1] + z[j4], z[ipn4-j4-5] = z[ipn4-j4-5], z[j4] + } + if n0-i0 <= 4 { + z[4*(n0+1)+pp-2] = z[4*(i0+1)+pp-2] + z[4*(n0+1)-pp-1] = z[4*(i0+1)-pp-1] + } + dmin2 = math.Min(dmin2, z[4*(i0+1)-pp-2]) + z[4*(n0+1)+pp-2] = math.Min(math.Min(z[4*(n0+1)+pp-2], z[4*(i0+1)+pp-2]), z[4*(i0+1)+pp+2]) + z[4*(n0+1)-pp-1] = math.Min(math.Min(z[4*(n0+1)-pp-1], z[4*(i0+1)-pp-1]), z[4*(i0+1)-pp+3]) + qmax = math.Max(math.Max(qmax, z[4*(i0+1)+pp-4]), z[4*(i0+1)+pp]) + dmin = math.Copysign(0, -1) // Fortran code has -zero, but -0 in go is 0 + } + } + + // Choose a shift. + tau, ttype, g = impl.Dlasq4(i0, n0, z, pp, n0in, dmin, dmin1, dmin2, dn, dn1, dn2, tau, ttype, g) + + // Call dqds until dmin > 0. +loop: + for { + i0, n0, pp, tau, sigma, dmin, dmin1, dmin2, dn, dn1, dn2 = impl.Dlasq5(i0, n0, z, pp, tau, sigma) + + nDiv += n0 - i0 + 2 + iter++ + switch { + case dmin >= 0 && dmin1 >= 0: + // Success. + goto done + + case dmin < 0 && dmin1 > 0 && z[4*n0-pp-1] < tol*(sigma+dn1) && math.Abs(dn) < tol*sigma: + // Convergence hidden by negative dn. + z[4*n0-pp+1] = 0 + dmin = 0 + goto done + + case dmin < 0: + // Tau too big. Select new Tau and try again. + nFail++ + if ttype < -22 { + // Failed twice. Play it safe. + tau = 0 + } else if dmin1 > 0 { + // Late failure. Gives excellent shift. + tau = (tau + dmin) * (1 - 2*eps) + ttype -= 11 + } else { + // Early failure. Divide by 4. + tau = tau / 4 + ttype -= 12 + } + + case math.IsNaN(dmin): + if tau == 0 { + break loop + } + tau = 0 + + default: + // Possible underflow. Play it safe. + break loop + } + } + + // Risk of underflow. + dmin, dmin1, dmin2, dn, dn1, dn2 = impl.Dlasq6(i0, n0, z, pp) + nDiv += n0 - i0 + 2 + iter++ + tau = 0 + +done: + if tau < sigma { + desig += tau + t = sigma + desig + desig -= t - sigma + } else { + t = sigma + tau + desig += sigma - (t - tau) + } + sigma = t + return i0, n0, pp, dmin, sigma, desig, qmax, nFail, iter, nDiv, ttype, dmin1, dmin2, dn, dn1, dn2, g, tau +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq4.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq4.go new file mode 100644 index 0000000000..f6dbb31b98 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq4.go @@ -0,0 +1,249 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlasq4 computes an approximation to the smallest eigenvalue using values of d +// from the previous transform. +// i0, n0, and n0in are zero-indexed. +// +// Dlasq4 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasq4(i0, n0 int, z []float64, pp int, n0in int, dmin, dmin1, dmin2, dn, dn1, dn2, tau float64, ttype int, g float64) (tauOut float64, ttypeOut int, gOut float64) { + switch { + case i0 < 0: + panic(i0LT0) + case n0 < 0: + panic(n0LT0) + case len(z) < 4*n0: + panic(shortZ) + case pp != 0 && pp != 1: + panic(badPp) + } + + const ( + cnst1 = 0.563 + cnst2 = 1.01 + cnst3 = 1.05 + + cnstthird = 0.333 // TODO(btracey): Fix? + ) + // A negative dmin forces the shift to take that absolute value + // ttype records the type of shift. + if dmin <= 0 { + tau = -dmin + ttype = -1 + return tau, ttype, g + } + nn := 4*(n0+1) + pp - 1 // -1 for zero indexing + s := math.NaN() // Poison s so that failure to take a path below is obvious + if n0in == n0 { + // No eigenvalues deflated. + if dmin == dn || dmin == dn1 { + b1 := math.Sqrt(z[nn-3]) * math.Sqrt(z[nn-5]) + b2 := math.Sqrt(z[nn-7]) * math.Sqrt(z[nn-9]) + a2 := z[nn-7] + z[nn-5] + if dmin == dn && dmin1 == dn1 { + gap2 := dmin2 - a2 - dmin2/4 + var gap1 float64 + if gap2 > 0 && gap2 > b2 { + gap1 = a2 - dn - (b2/gap2)*b2 + } else { + gap1 = a2 - dn - (b1 + b2) + } + if gap1 > 0 && gap1 > b1 { + s = math.Max(dn-(b1/gap1)*b1, 0.5*dmin) + ttype = -2 + } else { + s = 0 + if dn > b1 { + s = dn - b1 + } + if a2 > b1+b2 { + s = math.Min(s, a2-(b1+b2)) + } + s = math.Max(s, cnstthird*dmin) + ttype = -3 + } + } else { + ttype = -4 + s = dmin / 4 + var gam float64 + var np int + if dmin == dn { + gam = dn + a2 = 0 + if z[nn-5] > z[nn-7] { + return tau, ttype, g + } + b2 = z[nn-5] / z[nn-7] + np = nn - 9 + } else { + np = nn - 2*pp + gam = dn1 + if z[np-4] > z[np-2] { + return tau, ttype, g + } + a2 = z[np-4] / z[np-2] + if z[nn-9] > z[nn-11] { + return tau, ttype, g + } + b2 = z[nn-9] / z[nn-11] + np = nn - 13 + } + // Approximate contribution to norm squared from i < nn-1. + a2 += b2 + for i4loop := np + 1; i4loop >= 4*(i0+1)-1+pp; i4loop -= 4 { + i4 := i4loop - 1 + if b2 == 0 { + break + } + b1 = b2 + if z[i4] > z[i4-2] { + return tau, ttype, g + } + b2 *= z[i4] / z[i4-2] + a2 += b2 + if 100*math.Max(b2, b1) < a2 || cnst1 < a2 { + break + } + } + a2 *= cnst3 + // Rayleigh quotient residual bound. + if a2 < cnst1 { + s = gam * (1 - math.Sqrt(a2)) / (1 + a2) + } + } + } else if dmin == dn2 { + ttype = -5 + s = dmin / 4 + // Compute contribution to norm squared from i > nn-2. + np := nn - 2*pp + b1 := z[np-2] + b2 := z[np-6] + gam := dn2 + if z[np-8] > b2 || z[np-4] > b1 { + return tau, ttype, g + } + a2 := (z[np-8] / b2) * (1 + z[np-4]/b1) + // Approximate contribution to norm squared from i < nn-2. + if n0-i0 > 2 { + b2 = z[nn-13] / z[nn-15] + a2 += b2 + for i4loop := (nn + 1) - 17; i4loop >= 4*(i0+1)-1+pp; i4loop -= 4 { + i4 := i4loop - 1 + if b2 == 0 { + break + } + b1 = b2 + if z[i4] > z[i4-2] { + return tau, ttype, g + } + b2 *= z[i4] / z[i4-2] + a2 += b2 + if 100*math.Max(b2, b1) < a2 || cnst1 < a2 { + break + } + } + a2 *= cnst3 + } + if a2 < cnst1 { + s = gam * (1 - math.Sqrt(a2)) / (1 + a2) + } + } else { + // Case 6, no information to guide us. + if ttype == -6 { + g += cnstthird * (1 - g) + } else if ttype == -18 { + g = cnstthird / 4 + } else { + g = 1.0 / 4 + } + s = g * dmin + ttype = -6 + } + } else if n0in == (n0 + 1) { + // One eigenvalue just deflated. Use DMIN1, DN1 for DMIN and DN. + if dmin1 == dn1 && dmin2 == dn2 { + ttype = -7 + s = cnstthird * dmin1 + if z[nn-5] > z[nn-7] { + return tau, ttype, g + } + b1 := z[nn-5] / z[nn-7] + b2 := b1 + if b2 != 0 { + for i4loop := 4*(n0+1) - 9 + pp; i4loop >= 4*(i0+1)-1+pp; i4loop -= 4 { + i4 := i4loop - 1 + a2 := b1 + if z[i4] > z[i4-2] { + return tau, ttype, g + } + b1 *= z[i4] / z[i4-2] + b2 += b1 + if 100*math.Max(b1, a2) < b2 { + break + } + } + } + b2 = math.Sqrt(cnst3 * b2) + a2 := dmin1 / (1 + b2*b2) + gap2 := 0.5*dmin2 - a2 + if gap2 > 0 && gap2 > b2*a2 { + s = math.Max(s, a2*(1-cnst2*a2*(b2/gap2)*b2)) + } else { + s = math.Max(s, a2*(1-cnst2*b2)) + ttype = -8 + } + } else { + s = dmin1 / 4 + if dmin1 == dn1 { + s = 0.5 * dmin1 + } + ttype = -9 + } + } else if n0in == (n0 + 2) { + // Two eigenvalues deflated. Use DMIN2, DN2 for DMIN and DN. + if dmin2 == dn2 && 2*z[nn-5] < z[nn-7] { + ttype = -10 + s = cnstthird * dmin2 + if z[nn-5] > z[nn-7] { + return tau, ttype, g + } + b1 := z[nn-5] / z[nn-7] + b2 := b1 + if b2 != 0 { + for i4loop := 4*(n0+1) - 9 + pp; i4loop >= 4*(i0+1)-1+pp; i4loop -= 4 { + i4 := i4loop - 1 + if z[i4] > z[i4-2] { + return tau, ttype, g + } + b1 *= z[i4] / z[i4-2] + b2 += b1 + if 100*b1 < b2 { + break + } + } + } + b2 = math.Sqrt(cnst3 * b2) + a2 := dmin2 / (1 + b2*b2) + gap2 := z[nn-7] + z[nn-9] - math.Sqrt(z[nn-11])*math.Sqrt(z[nn-9]) - a2 + if gap2 > 0 && gap2 > b2*a2 { + s = math.Max(s, a2*(1-cnst2*a2*(b2/gap2)*b2)) + } else { + s = math.Max(s, a2*(1-cnst2*b2)) + } + } else { + s = dmin2 / 4 + ttype = -11 + } + } else if n0in > n0+2 { + // Case 12, more than two eigenvalues deflated. No information. + s = 0 + ttype = -12 + } + tau = s + return tau, ttype, g +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq5.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq5.go new file mode 100644 index 0000000000..d3826d9186 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq5.go @@ -0,0 +1,140 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlasq5 computes one dqds transform in ping-pong form. +// i0 and n0 are zero-indexed. +// +// Dlasq5 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasq5(i0, n0 int, z []float64, pp int, tau, sigma float64) (i0Out, n0Out, ppOut int, tauOut, sigmaOut, dmin, dmin1, dmin2, dn, dnm1, dnm2 float64) { + // The lapack function has inputs for ieee and eps, but Go requires ieee so + // these are unnecessary. + + switch { + case i0 < 0: + panic(i0LT0) + case n0 < 0: + panic(n0LT0) + case len(z) < 4*n0: + panic(shortZ) + case pp != 0 && pp != 1: + panic(badPp) + } + + if n0-i0-1 <= 0 { + return i0, n0, pp, tau, sigma, dmin, dmin1, dmin2, dn, dnm1, dnm2 + } + + eps := dlamchP + dthresh := eps * (sigma + tau) + if tau < dthresh*0.5 { + tau = 0 + } + var j4 int + var emin float64 + if tau != 0 { + j4 = 4*i0 + pp + emin = z[j4+4] + d := z[j4] - tau + dmin = d + // In the reference there are code paths that actually return this value. + // dmin1 = -z[j4] + if pp == 0 { + for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { + j4 := j4loop - 1 + z[j4-2] = d + z[j4-1] + tmp := z[j4+1] / z[j4-2] + d = d*tmp - tau + dmin = math.Min(dmin, d) + z[j4] = z[j4-1] * tmp + emin = math.Min(z[j4], emin) + } + } else { + for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { + j4 := j4loop - 1 + z[j4-3] = d + z[j4] + tmp := z[j4+2] / z[j4-3] + d = d*tmp - tau + dmin = math.Min(dmin, d) + z[j4-1] = z[j4] * tmp + emin = math.Min(z[j4-1], emin) + } + } + // Unroll the last two steps. + dnm2 = d + dmin2 = dmin + j4 = 4*((n0+1)-2) - pp - 1 + j4p2 := j4 + 2*pp - 1 + z[j4-2] = dnm2 + z[j4p2] + z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) + dnm1 = z[j4p2+2]*(dnm2/z[j4-2]) - tau + dmin = math.Min(dmin, dnm1) + + dmin1 = dmin + j4 += 4 + j4p2 = j4 + 2*pp - 1 + z[j4-2] = dnm1 + z[j4p2] + z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) + dn = z[j4p2+2]*(dnm1/z[j4-2]) - tau + dmin = math.Min(dmin, dn) + } else { + // This is the version that sets d's to zero if they are small enough. + j4 = 4*(i0+1) + pp - 4 + emin = z[j4+4] + d := z[j4] - tau + dmin = d + // In the reference there are code paths that actually return this value. + // dmin1 = -z[j4] + if pp == 0 { + for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { + j4 := j4loop - 1 + z[j4-2] = d + z[j4-1] + tmp := z[j4+1] / z[j4-2] + d = d*tmp - tau + if d < dthresh { + d = 0 + } + dmin = math.Min(dmin, d) + z[j4] = z[j4-1] * tmp + emin = math.Min(z[j4], emin) + } + } else { + for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { + j4 := j4loop - 1 + z[j4-3] = d + z[j4] + tmp := z[j4+2] / z[j4-3] + d = d*tmp - tau + if d < dthresh { + d = 0 + } + dmin = math.Min(dmin, d) + z[j4-1] = z[j4] * tmp + emin = math.Min(z[j4-1], emin) + } + } + // Unroll the last two steps. + dnm2 = d + dmin2 = dmin + j4 = 4*((n0+1)-2) - pp - 1 + j4p2 := j4 + 2*pp - 1 + z[j4-2] = dnm2 + z[j4p2] + z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) + dnm1 = z[j4p2+2]*(dnm2/z[j4-2]) - tau + dmin = math.Min(dmin, dnm1) + + dmin1 = dmin + j4 += 4 + j4p2 = j4 + 2*pp - 1 + z[j4-2] = dnm1 + z[j4p2] + z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) + dn = z[j4p2+2]*(dnm1/z[j4-2]) - tau + dmin = math.Min(dmin, dn) + } + z[j4+2] = dn + z[4*(n0+1)-pp-1] = emin + return i0, n0, pp, tau, sigma, dmin, dmin1, dmin2, dn, dnm1, dnm2 +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq6.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq6.go new file mode 100644 index 0000000000..54bf587562 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasq6.go @@ -0,0 +1,118 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlasq6 computes one dqd transform in ping-pong form with protection against +// overflow and underflow. z has length at least 4*(n0+1) and holds the qd array. +// i0 is the zero-based first index. +// n0 is the zero-based last index. +// +// Dlasq6 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasq6(i0, n0 int, z []float64, pp int) (dmin, dmin1, dmin2, dn, dnm1, dnm2 float64) { + switch { + case i0 < 0: + panic(i0LT0) + case n0 < 0: + panic(n0LT0) + case len(z) < 4*n0: + panic(shortZ) + case pp != 0 && pp != 1: + panic(badPp) + } + + if n0-i0-1 <= 0 { + return dmin, dmin1, dmin2, dn, dnm1, dnm2 + } + + safmin := dlamchS + j4 := 4*(i0+1) + pp - 4 // -4 rather than -3 for zero indexing + emin := z[j4+4] + d := z[j4] + dmin = d + if pp == 0 { + for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { + j4 := j4loop - 1 // Translate back to zero-indexed. + z[j4-2] = d + z[j4-1] + if z[j4-2] == 0 { + z[j4] = 0 + d = z[j4+1] + dmin = d + emin = 0 + } else if safmin*z[j4+1] < z[j4-2] && safmin*z[j4-2] < z[j4+1] { + tmp := z[j4+1] / z[j4-2] + z[j4] = z[j4-1] * tmp + d *= tmp + } else { + z[j4] = z[j4+1] * (z[j4-1] / z[j4-2]) + d = z[j4+1] * (d / z[j4-2]) + } + dmin = math.Min(dmin, d) + emin = math.Min(emin, z[j4]) + } + } else { + for j4loop := 4 * (i0 + 1); j4loop <= 4*((n0+1)-3); j4loop += 4 { + j4 := j4loop - 1 + z[j4-3] = d + z[j4] + if z[j4-3] == 0 { + z[j4-1] = 0 + d = z[j4+2] + dmin = d + emin = 0 + } else if safmin*z[j4+2] < z[j4-3] && safmin*z[j4-3] < z[j4+2] { + tmp := z[j4+2] / z[j4-3] + z[j4-1] = z[j4] * tmp + d *= tmp + } else { + z[j4-1] = z[j4+2] * (z[j4] / z[j4-3]) + d = z[j4+2] * (d / z[j4-3]) + } + dmin = math.Min(dmin, d) + emin = math.Min(emin, z[j4-1]) + } + } + // Unroll last two steps. + dnm2 = d + dmin2 = dmin + j4 = 4*(n0-1) - pp - 1 + j4p2 := j4 + 2*pp - 1 + z[j4-2] = dnm2 + z[j4p2] + if z[j4-2] == 0 { + z[j4] = 0 + dnm1 = z[j4p2+2] + dmin = dnm1 + emin = 0 + } else if safmin*z[j4p2+2] < z[j4-2] && safmin*z[j4-2] < z[j4p2+2] { + tmp := z[j4p2+2] / z[j4-2] + z[j4] = z[j4p2] * tmp + dnm1 = dnm2 * tmp + } else { + z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) + dnm1 = z[j4p2+2] * (dnm2 / z[j4-2]) + } + dmin = math.Min(dmin, dnm1) + dmin1 = dmin + j4 += 4 + j4p2 = j4 + 2*pp - 1 + z[j4-2] = dnm1 + z[j4p2] + if z[j4-2] == 0 { + z[j4] = 0 + dn = z[j4p2+2] + dmin = dn + emin = 0 + } else if safmin*z[j4p2+2] < z[j4-2] && safmin*z[j4-2] < z[j4p2+2] { + tmp := z[j4p2+2] / z[j4-2] + z[j4] = z[j4p2] * tmp + dn = dnm1 * tmp + } else { + z[j4] = z[j4p2+2] * (z[j4p2] / z[j4-2]) + dn = z[j4p2+2] * (dnm1 / z[j4-2]) + } + dmin = math.Min(dmin, dn) + z[j4+2] = dn + z[4*(n0+1)-pp-1] = emin + return dmin, dmin1, dmin2, dn, dnm1, dnm2 +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasr.go new file mode 100644 index 0000000000..a7dbe002d5 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasr.go @@ -0,0 +1,279 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dlasr applies a sequence of plane rotations to the m×n matrix A. This series +// of plane rotations is implicitly represented by a matrix P. P is multiplied +// by a depending on the value of side -- A = P * A if side == lapack.Left, +// A = A * P^T if side == lapack.Right. +// +// The exact value of P depends on the value of pivot, but in all cases P is +// implicitly represented by a series of 2×2 rotation matrices. The entries of +// rotation matrix k are defined by s[k] and c[k] +// R(k) = [ c[k] s[k]] +// [-s[k] s[k]] +// If direct == lapack.Forward, the rotation matrices are applied as +// P = P(z-1) * ... * P(2) * P(1), while if direct == lapack.Backward they are +// applied as P = P(1) * P(2) * ... * P(n). +// +// pivot defines the mapping of the elements in R(k) to P(k). +// If pivot == lapack.Variable, the rotation is performed for the (k, k+1) plane. +// P(k) = [1 ] +// [ ... ] +// [ 1 ] +// [ c[k] s[k] ] +// [ -s[k] c[k] ] +// [ 1 ] +// [ ... ] +// [ 1] +// if pivot == lapack.Top, the rotation is performed for the (1, k+1) plane, +// P(k) = [c[k] s[k] ] +// [ 1 ] +// [ ... ] +// [ 1 ] +// [-s[k] c[k] ] +// [ 1 ] +// [ ... ] +// [ 1] +// and if pivot == lapack.Bottom, the rotation is performed for the (k, z) plane. +// P(k) = [1 ] +// [ ... ] +// [ 1 ] +// [ c[k] s[k]] +// [ 1 ] +// [ ... ] +// [ 1 ] +// [ -s[k] c[k]] +// s and c have length m - 1 if side == blas.Left, and n - 1 if side == blas.Right. +// +// Dlasr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasr(side blas.Side, pivot lapack.Pivot, direct lapack.Direct, m, n int, c, s, a []float64, lda int) { + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case pivot != lapack.Variable && pivot != lapack.Top && pivot != lapack.Bottom: + panic(badPivot) + case direct != lapack.Forward && direct != lapack.Backward: + panic(badDirect) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if m == 0 || n == 0 { + return + } + + if side == blas.Left { + if len(c) < m-1 { + panic(shortC) + } + if len(s) < m-1 { + panic(shortS) + } + } else { + if len(c) < n-1 { + panic(shortC) + } + if len(s) < n-1 { + panic(shortS) + } + } + if len(a) < (m-1)*lda+n { + panic(shortA) + } + + if side == blas.Left { + if pivot == lapack.Variable { + if direct == lapack.Forward { + for j := 0; j < m-1; j++ { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + tmp2 := a[j*lda+i] + tmp := a[(j+1)*lda+i] + a[(j+1)*lda+i] = ctmp*tmp - stmp*tmp2 + a[j*lda+i] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } + for j := m - 2; j >= 0; j-- { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + tmp2 := a[j*lda+i] + tmp := a[(j+1)*lda+i] + a[(j+1)*lda+i] = ctmp*tmp - stmp*tmp2 + a[j*lda+i] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } else if pivot == lapack.Top { + if direct == lapack.Forward { + for j := 1; j < m; j++ { + ctmp := c[j-1] + stmp := s[j-1] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + tmp := a[j*lda+i] + tmp2 := a[i] + a[j*lda+i] = ctmp*tmp - stmp*tmp2 + a[i] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } + for j := m - 1; j >= 1; j-- { + ctmp := c[j-1] + stmp := s[j-1] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + ctmp := c[j-1] + stmp := s[j-1] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + tmp := a[j*lda+i] + tmp2 := a[i] + a[j*lda+i] = ctmp*tmp - stmp*tmp2 + a[i] = stmp*tmp + ctmp*tmp2 + } + } + } + } + } + return + } + if direct == lapack.Forward { + for j := 0; j < m-1; j++ { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + tmp := a[j*lda+i] + tmp2 := a[(m-1)*lda+i] + a[j*lda+i] = stmp*tmp2 + ctmp*tmp + a[(m-1)*lda+i] = ctmp*tmp2 - stmp*tmp + } + } + } + return + } + for j := m - 2; j >= 0; j-- { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < n; i++ { + tmp := a[j*lda+i] + tmp2 := a[(m-1)*lda+i] + a[j*lda+i] = stmp*tmp2 + ctmp*tmp + a[(m-1)*lda+i] = ctmp*tmp2 - stmp*tmp + } + } + } + return + } + if pivot == lapack.Variable { + if direct == lapack.Forward { + for j := 0; j < n-1; j++ { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < m; i++ { + tmp := a[i*lda+j+1] + tmp2 := a[i*lda+j] + a[i*lda+j+1] = ctmp*tmp - stmp*tmp2 + a[i*lda+j] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } + for j := n - 2; j >= 0; j-- { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < m; i++ { + tmp := a[i*lda+j+1] + tmp2 := a[i*lda+j] + a[i*lda+j+1] = ctmp*tmp - stmp*tmp2 + a[i*lda+j] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } else if pivot == lapack.Top { + if direct == lapack.Forward { + for j := 1; j < n; j++ { + ctmp := c[j-1] + stmp := s[j-1] + if ctmp != 1 || stmp != 0 { + for i := 0; i < m; i++ { + tmp := a[i*lda+j] + tmp2 := a[i*lda] + a[i*lda+j] = ctmp*tmp - stmp*tmp2 + a[i*lda] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } + for j := n - 1; j >= 1; j-- { + ctmp := c[j-1] + stmp := s[j-1] + if ctmp != 1 || stmp != 0 { + for i := 0; i < m; i++ { + tmp := a[i*lda+j] + tmp2 := a[i*lda] + a[i*lda+j] = ctmp*tmp - stmp*tmp2 + a[i*lda] = stmp*tmp + ctmp*tmp2 + } + } + } + return + } + if direct == lapack.Forward { + for j := 0; j < n-1; j++ { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < m; i++ { + tmp := a[i*lda+j] + tmp2 := a[i*lda+n-1] + a[i*lda+j] = stmp*tmp2 + ctmp*tmp + a[i*lda+n-1] = ctmp*tmp2 - stmp*tmp + } + + } + } + return + } + for j := n - 2; j >= 0; j-- { + ctmp := c[j] + stmp := s[j] + if ctmp != 1 || stmp != 0 { + for i := 0; i < m; i++ { + tmp := a[i*lda+j] + tmp2 := a[i*lda+n-1] + a[i*lda+j] = stmp*tmp2 + ctmp*tmp + a[i*lda+n-1] = ctmp*tmp2 - stmp*tmp + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasrt.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasrt.go new file mode 100644 index 0000000000..be472805bf --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasrt.go @@ -0,0 +1,36 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "sort" + + "gonum.org/v1/gonum/lapack" +) + +// Dlasrt sorts the numbers in the input slice d. If s == lapack.SortIncreasing, +// the elements are sorted in increasing order. If s == lapack.SortDecreasing, +// the elements are sorted in decreasing order. For other values of s Dlasrt +// will panic. +// +// Dlasrt is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasrt(s lapack.Sort, n int, d []float64) { + switch { + case n < 0: + panic(nLT0) + case len(d) < n: + panic(shortD) + } + + d = d[:n] + switch s { + default: + panic(badSort) + case lapack.SortIncreasing: + sort.Float64s(d) + case lapack.SortDecreasing: + sort.Sort(sort.Reverse(sort.Float64Slice(d))) + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlassq.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlassq.go new file mode 100644 index 0000000000..9c2dc7729f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlassq.go @@ -0,0 +1,41 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlassq updates a sum of squares in scaled form. The input parameters scale and +// sumsq represent the current scale and total sum of squares. These values are +// updated with the information in the first n elements of the vector specified +// by x and incX. +// +// Dlassq is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlassq(n int, x []float64, incx int, scale float64, sumsq float64) (scl, smsq float64) { + switch { + case n < 0: + panic(nLT0) + case incx <= 0: + panic(badIncX) + case len(x) < 1+(n-1)*incx: + panic(shortX) + } + + if n == 0 { + return scale, sumsq + } + + for ix := 0; ix <= (n-1)*incx; ix += incx { + absxi := math.Abs(x[ix]) + if absxi > 0 || math.IsNaN(absxi) { + if scale < absxi { + sumsq = 1 + sumsq*(scale/absxi)*(scale/absxi) + scale = absxi + } else { + sumsq += (absxi / scale) * (absxi / scale) + } + } + } + return scale, sumsq +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasv2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasv2.go new file mode 100644 index 0000000000..204af19316 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasv2.go @@ -0,0 +1,115 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Dlasv2 computes the singular value decomposition of a 2×2 matrix. +// [ csl snl] [f g] [csr -snr] = [ssmax 0] +// [-snl csl] [0 h] [snr csr] = [ 0 ssmin] +// ssmax is the larger absolute singular value, and ssmin is the smaller absolute +// singular value. [cls, snl] and [csr, snr] are the left and right singular vectors. +// +// Dlasv2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasv2(f, g, h float64) (ssmin, ssmax, snr, csr, snl, csl float64) { + ft := f + fa := math.Abs(ft) + ht := h + ha := math.Abs(h) + // pmax points to the largest element of the matrix in terms of absolute value. + // 1 if F, 2 if G, 3 if H. + pmax := 1 + swap := ha > fa + if swap { + pmax = 3 + ft, ht = ht, ft + fa, ha = ha, fa + } + gt := g + ga := math.Abs(gt) + var clt, crt, slt, srt float64 + if ga == 0 { + ssmin = ha + ssmax = fa + clt = 1 + crt = 1 + slt = 0 + srt = 0 + } else { + gasmall := true + if ga > fa { + pmax = 2 + if (fa / ga) < dlamchE { + gasmall = false + ssmax = ga + if ha > 1 { + ssmin = fa / (ga / ha) + } else { + ssmin = (fa / ga) * ha + } + clt = 1 + slt = ht / gt + srt = 1 + crt = ft / gt + } + } + if gasmall { + d := fa - ha + l := d / fa + if d == fa { // deal with inf + l = 1 + } + m := gt / ft + t := 2 - l + s := math.Hypot(t, m) + var r float64 + if l == 0 { + r = math.Abs(m) + } else { + r = math.Hypot(l, m) + } + a := 0.5 * (s + r) + ssmin = ha / a + ssmax = fa * a + if m == 0 { + if l == 0 { + t = math.Copysign(2, ft) * math.Copysign(1, gt) + } else { + t = gt/math.Copysign(d, ft) + m/t + } + } else { + t = (m/(s+t) + m/(r+l)) * (1 + a) + } + l = math.Hypot(t, 2) + crt = 2 / l + srt = t / l + clt = (crt + srt*m) / a + slt = (ht / ft) * srt / a + } + } + if swap { + csl = srt + snl = crt + csr = slt + snr = clt + } else { + csl = clt + snl = slt + csr = crt + snr = srt + } + var tsign float64 + switch pmax { + case 1: + tsign = math.Copysign(1, csr) * math.Copysign(1, csl) * math.Copysign(1, f) + case 2: + tsign = math.Copysign(1, snr) * math.Copysign(1, csl) * math.Copysign(1, g) + case 3: + tsign = math.Copysign(1, snr) * math.Copysign(1, snl) * math.Copysign(1, h) + } + ssmax = math.Copysign(ssmax, tsign) + ssmin = math.Copysign(ssmin, tsign*math.Copysign(1, f)*math.Copysign(1, h)) + return ssmin, ssmax, snr, csr, snl, csl +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlaswp.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaswp.go new file mode 100644 index 0000000000..b207d1218c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlaswp.go @@ -0,0 +1,52 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas/blas64" + +// Dlaswp swaps the rows k1 to k2 of a rectangular matrix A according to the +// indices in ipiv so that row k is swapped with ipiv[k]. +// +// n is the number of columns of A and incX is the increment for ipiv. If incX +// is 1, the swaps are applied from k1 to k2. If incX is -1, the swaps are +// applied in reverse order from k2 to k1. For other values of incX Dlaswp will +// panic. ipiv must have length k2+1, otherwise Dlaswp will panic. +// +// The indices k1, k2, and the elements of ipiv are zero-based. +// +// Dlaswp is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlaswp(n int, a []float64, lda int, k1, k2 int, ipiv []int, incX int) { + switch { + case n < 0: + panic(nLT0) + case k2 < 0: + panic(badK2) + case k1 < 0 || k2 < k1: + panic(badK1) + case lda < max(1, n): + panic(badLdA) + case len(a) < (k2-1)*lda+n: + panic(shortA) + case len(ipiv) != k2+1: + panic(badLenIpiv) + case incX != 1 && incX != -1: + panic(absIncNotOne) + } + + if n == 0 { + return + } + + bi := blas64.Implementation() + if incX == 1 { + for k := k1; k <= k2; k++ { + bi.Dswap(n, a[k*lda:], 1, a[ipiv[k]*lda:], 1) + } + return + } + for k := k2; k >= k1; k-- { + bi.Dswap(n, a[k*lda:], 1, a[ipiv[k]*lda:], 1) + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlasy2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasy2.go new file mode 100644 index 0000000000..abfe60e58e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlasy2.go @@ -0,0 +1,290 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlasy2 solves the Sylvester matrix equation where the matrices are of order 1 +// or 2. It computes the unknown n1×n2 matrix X so that +// TL*X + sgn*X*TR = scale*B, if tranl == false and tranr == false, +// TL^T*X + sgn*X*TR = scale*B, if tranl == true and tranr == false, +// TL*X + sgn*X*TR^T = scale*B, if tranl == false and tranr == true, +// TL^T*X + sgn*X*TR^T = scale*B, if tranl == true and tranr == true, +// where TL is n1×n1, TR is n2×n2, B is n1×n2, and 1 <= n1,n2 <= 2. +// +// isgn must be 1 or -1, and n1 and n2 must be 0, 1, or 2, but these conditions +// are not checked. +// +// Dlasy2 returns three values, a scale factor that is chosen less than or equal +// to 1 to prevent the solution overflowing, the infinity norm of the solution, +// and an indicator of success. If ok is false, TL and TR have eigenvalues that +// are too close, so TL or TR is perturbed to get a non-singular equation. +// +// Dlasy2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlasy2(tranl, tranr bool, isgn, n1, n2 int, tl []float64, ldtl int, tr []float64, ldtr int, b []float64, ldb int, x []float64, ldx int) (scale, xnorm float64, ok bool) { + // TODO(vladimir-ch): Add input validation checks conditionally skipped + // using the build tag mechanism. + + ok = true + // Quick return if possible. + if n1 == 0 || n2 == 0 { + return scale, xnorm, ok + } + + // Set constants to control overflow. + eps := dlamchP + smlnum := dlamchS / eps + sgn := float64(isgn) + + if n1 == 1 && n2 == 1 { + // 1×1 case: TL11*X + sgn*X*TR11 = B11. + tau1 := tl[0] + sgn*tr[0] + bet := math.Abs(tau1) + if bet <= smlnum { + tau1 = smlnum + bet = smlnum + ok = false + } + scale = 1 + gam := math.Abs(b[0]) + if smlnum*gam > bet { + scale = 1 / gam + } + x[0] = b[0] * scale / tau1 + xnorm = math.Abs(x[0]) + return scale, xnorm, ok + } + + if n1+n2 == 3 { + // 1×2 or 2×1 case. + var ( + smin float64 + tmp [4]float64 // tmp is used as a 2×2 row-major matrix. + btmp [2]float64 + ) + if n1 == 1 && n2 == 2 { + // 1×2 case: TL11*[X11 X12] + sgn*[X11 X12]*op[TR11 TR12] = [B11 B12]. + // [TR21 TR22] + smin = math.Abs(tl[0]) + smin = math.Max(smin, math.Max(math.Abs(tr[0]), math.Abs(tr[1]))) + smin = math.Max(smin, math.Max(math.Abs(tr[ldtr]), math.Abs(tr[ldtr+1]))) + smin = math.Max(eps*smin, smlnum) + tmp[0] = tl[0] + sgn*tr[0] + tmp[3] = tl[0] + sgn*tr[ldtr+1] + if tranr { + tmp[1] = sgn * tr[1] + tmp[2] = sgn * tr[ldtr] + } else { + tmp[1] = sgn * tr[ldtr] + tmp[2] = sgn * tr[1] + } + btmp[0] = b[0] + btmp[1] = b[1] + } else { + // 2×1 case: op[TL11 TL12]*[X11] + sgn*[X11]*TR11 = [B11]. + // [TL21 TL22]*[X21] [X21] [B21] + smin = math.Abs(tr[0]) + smin = math.Max(smin, math.Max(math.Abs(tl[0]), math.Abs(tl[1]))) + smin = math.Max(smin, math.Max(math.Abs(tl[ldtl]), math.Abs(tl[ldtl+1]))) + smin = math.Max(eps*smin, smlnum) + tmp[0] = tl[0] + sgn*tr[0] + tmp[3] = tl[ldtl+1] + sgn*tr[0] + if tranl { + tmp[1] = tl[ldtl] + tmp[2] = tl[1] + } else { + tmp[1] = tl[1] + tmp[2] = tl[ldtl] + } + btmp[0] = b[0] + btmp[1] = b[ldb] + } + + // Solve 2×2 system using complete pivoting. + // Set pivots less than smin to smin. + + bi := blas64.Implementation() + ipiv := bi.Idamax(len(tmp), tmp[:], 1) + // Compute the upper triangular matrix [u11 u12]. + // [ 0 u22] + u11 := tmp[ipiv] + if math.Abs(u11) <= smin { + ok = false + u11 = smin + } + locu12 := [4]int{1, 0, 3, 2} // Index in tmp of the element on the same row as the pivot. + u12 := tmp[locu12[ipiv]] + locl21 := [4]int{2, 3, 0, 1} // Index in tmp of the element on the same column as the pivot. + l21 := tmp[locl21[ipiv]] / u11 + locu22 := [4]int{3, 2, 1, 0} // Index in tmp of the remaining element. + u22 := tmp[locu22[ipiv]] - l21*u12 + if math.Abs(u22) <= smin { + ok = false + u22 = smin + } + if ipiv&0x2 != 0 { // true for ipiv equal to 2 and 3. + // The pivot was in the second row, swap the elements of + // the right-hand side. + btmp[0], btmp[1] = btmp[1], btmp[0]-l21*btmp[1] + } else { + btmp[1] -= l21 * btmp[0] + } + scale = 1 + if 2*smlnum*math.Abs(btmp[1]) > math.Abs(u22) || 2*smlnum*math.Abs(btmp[0]) > math.Abs(u11) { + scale = 0.5 / math.Max(math.Abs(btmp[0]), math.Abs(btmp[1])) + btmp[0] *= scale + btmp[1] *= scale + } + // Solve the system [u11 u12] [x21] = [ btmp[0] ]. + // [ 0 u22] [x22] [ btmp[1] ] + x22 := btmp[1] / u22 + x21 := btmp[0]/u11 - (u12/u11)*x22 + if ipiv&0x1 != 0 { // true for ipiv equal to 1 and 3. + // The pivot was in the second column, swap the elements + // of the solution. + x21, x22 = x22, x21 + } + x[0] = x21 + if n1 == 1 { + x[1] = x22 + xnorm = math.Abs(x[0]) + math.Abs(x[1]) + } else { + x[ldx] = x22 + xnorm = math.Max(math.Abs(x[0]), math.Abs(x[ldx])) + } + return scale, xnorm, ok + } + + // 2×2 case: op[TL11 TL12]*[X11 X12] + SGN*[X11 X12]*op[TR11 TR12] = [B11 B12]. + // [TL21 TL22] [X21 X22] [X21 X22] [TR21 TR22] [B21 B22] + // + // Solve equivalent 4×4 system using complete pivoting. + // Set pivots less than smin to smin. + + smin := math.Max(math.Abs(tr[0]), math.Abs(tr[1])) + smin = math.Max(smin, math.Max(math.Abs(tr[ldtr]), math.Abs(tr[ldtr+1]))) + smin = math.Max(smin, math.Max(math.Abs(tl[0]), math.Abs(tl[1]))) + smin = math.Max(smin, math.Max(math.Abs(tl[ldtl]), math.Abs(tl[ldtl+1]))) + smin = math.Max(eps*smin, smlnum) + + var t [4][4]float64 + t[0][0] = tl[0] + sgn*tr[0] + t[1][1] = tl[0] + sgn*tr[ldtr+1] + t[2][2] = tl[ldtl+1] + sgn*tr[0] + t[3][3] = tl[ldtl+1] + sgn*tr[ldtr+1] + if tranl { + t[0][2] = tl[ldtl] + t[1][3] = tl[ldtl] + t[2][0] = tl[1] + t[3][1] = tl[1] + } else { + t[0][2] = tl[1] + t[1][3] = tl[1] + t[2][0] = tl[ldtl] + t[3][1] = tl[ldtl] + } + if tranr { + t[0][1] = sgn * tr[1] + t[1][0] = sgn * tr[ldtr] + t[2][3] = sgn * tr[1] + t[3][2] = sgn * tr[ldtr] + } else { + t[0][1] = sgn * tr[ldtr] + t[1][0] = sgn * tr[1] + t[2][3] = sgn * tr[ldtr] + t[3][2] = sgn * tr[1] + } + + var btmp [4]float64 + btmp[0] = b[0] + btmp[1] = b[1] + btmp[2] = b[ldb] + btmp[3] = b[ldb+1] + + // Perform elimination. + var jpiv [4]int // jpiv records any column swaps for pivoting. + for i := 0; i < 3; i++ { + var ( + xmax float64 + ipsv, jpsv int + ) + for ip := i; ip < 4; ip++ { + for jp := i; jp < 4; jp++ { + if math.Abs(t[ip][jp]) >= xmax { + xmax = math.Abs(t[ip][jp]) + ipsv = ip + jpsv = jp + } + } + } + if ipsv != i { + // The pivot is not in the top row of the unprocessed + // block, swap rows ipsv and i of t and btmp. + t[ipsv], t[i] = t[i], t[ipsv] + btmp[ipsv], btmp[i] = btmp[i], btmp[ipsv] + } + if jpsv != i { + // The pivot is not in the left column of the + // unprocessed block, swap columns jpsv and i of t. + for k := 0; k < 4; k++ { + t[k][jpsv], t[k][i] = t[k][i], t[k][jpsv] + } + } + jpiv[i] = jpsv + if math.Abs(t[i][i]) < smin { + ok = false + t[i][i] = smin + } + for k := i + 1; k < 4; k++ { + t[k][i] /= t[i][i] + btmp[k] -= t[k][i] * btmp[i] + for j := i + 1; j < 4; j++ { + t[k][j] -= t[k][i] * t[i][j] + } + } + } + if math.Abs(t[3][3]) < smin { + ok = false + t[3][3] = smin + } + scale = 1 + if 8*smlnum*math.Abs(btmp[0]) > math.Abs(t[0][0]) || + 8*smlnum*math.Abs(btmp[1]) > math.Abs(t[1][1]) || + 8*smlnum*math.Abs(btmp[2]) > math.Abs(t[2][2]) || + 8*smlnum*math.Abs(btmp[3]) > math.Abs(t[3][3]) { + + maxbtmp := math.Max(math.Abs(btmp[0]), math.Abs(btmp[1])) + maxbtmp = math.Max(maxbtmp, math.Max(math.Abs(btmp[2]), math.Abs(btmp[3]))) + scale = 1 / 8 / maxbtmp + btmp[0] *= scale + btmp[1] *= scale + btmp[2] *= scale + btmp[3] *= scale + } + // Compute the solution of the upper triangular system t * tmp = btmp. + var tmp [4]float64 + for i := 3; i >= 0; i-- { + temp := 1 / t[i][i] + tmp[i] = btmp[i] * temp + for j := i + 1; j < 4; j++ { + tmp[i] -= temp * t[i][j] * tmp[j] + } + } + for i := 2; i >= 0; i-- { + if jpiv[i] != i { + tmp[i], tmp[jpiv[i]] = tmp[jpiv[i]], tmp[i] + } + } + x[0] = tmp[0] + x[1] = tmp[1] + x[ldx] = tmp[2] + x[ldx+1] = tmp[3] + xnorm = math.Max(math.Abs(tmp[0])+math.Abs(tmp[1]), math.Abs(tmp[2])+math.Abs(tmp[3])) + return scale, xnorm, ok +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrd.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrd.go new file mode 100644 index 0000000000..018efc98cc --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrd.go @@ -0,0 +1,165 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlatrd reduces nb rows and columns of a real n×n symmetric matrix A to symmetric +// tridiagonal form. It computes the orthonormal similarity transformation +// Q^T * A * Q +// and returns the matrices V and W to apply to the unreduced part of A. If +// uplo == blas.Upper, the upper triangle is supplied and the last nb rows are +// reduced. If uplo == blas.Lower, the lower triangle is supplied and the first +// nb rows are reduced. +// +// a contains the symmetric matrix on entry with active triangular half specified +// by uplo. On exit, the nb columns have been reduced to tridiagonal form. The +// diagonal contains the diagonal of the reduced matrix, the off-diagonal is +// set to 1, and the remaining elements contain the data to construct Q. +// +// If uplo == blas.Upper, with n = 5 and nb = 2 on exit a is +// [ a a a v4 v5] +// [ a a v4 v5] +// [ a 1 v5] +// [ d 1] +// [ d] +// +// If uplo == blas.Lower, with n = 5 and nb = 2, on exit a is +// [ d ] +// [ 1 d ] +// [v1 1 a ] +// [v1 v2 a a ] +// [v1 v2 a a a] +// +// e contains the superdiagonal elements of the reduced matrix. If uplo == blas.Upper, +// e[n-nb:n-1] contains the last nb columns of the reduced matrix, while if +// uplo == blas.Lower, e[:nb] contains the first nb columns of the reduced matrix. +// e must have length at least n-1, and Dlatrd will panic otherwise. +// +// tau contains the scalar factors of the elementary reflectors needed to construct Q. +// The reflectors are stored in tau[n-nb:n-1] if uplo == blas.Upper, and in +// tau[:nb] if uplo == blas.Lower. tau must have length n-1, and Dlatrd will panic +// otherwise. +// +// w is an n×nb matrix. On exit it contains the data to update the unreduced part +// of A. +// +// The matrix Q is represented as a product of elementary reflectors. Each reflector +// H has the form +// I - tau * v * v^T +// If uplo == blas.Upper, +// Q = H_{n-1} * H_{n-2} * ... * H_{n-nb} +// where v[:i-1] is stored in A[:i-1,i], v[i-1] = 1, and v[i:n] = 0. +// +// If uplo == blas.Lower, +// Q = H_0 * H_1 * ... * H_{nb-1} +// where v[:i+1] = 0, v[i+1] = 1, and v[i+2:n] is stored in A[i+2:n,i]. +// +// The vectors v form the n×nb matrix V which is used with W to apply a +// symmetric rank-2 update to the unreduced part of A +// A = A - V * W^T - W * V^T +// +// Dlatrd is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlatrd(uplo blas.Uplo, n, nb int, a []float64, lda int, e, tau, w []float64, ldw int) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case nb < 0: + panic(nbLT0) + case nb > n: + panic(nbGTN) + case lda < max(1, n): + panic(badLdA) + case ldw < max(1, nb): + panic(badLdW) + } + + if n == 0 { + return + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(w) < (n-1)*ldw+nb: + panic(shortW) + case len(e) < n-1: + panic(shortE) + case len(tau) < n-1: + panic(shortTau) + } + + bi := blas64.Implementation() + + if uplo == blas.Upper { + for i := n - 1; i >= n-nb; i-- { + iw := i - n + nb + if i < n-1 { + // Update A(0:i, i). + bi.Dgemv(blas.NoTrans, i+1, n-i-1, -1, a[i+1:], lda, + w[i*ldw+iw+1:], 1, 1, a[i:], lda) + bi.Dgemv(blas.NoTrans, i+1, n-i-1, -1, w[iw+1:], ldw, + a[i*lda+i+1:], 1, 1, a[i:], lda) + } + if i > 0 { + // Generate elementary reflector H_i to annihilate A(0:i-2,i). + e[i-1], tau[i-1] = impl.Dlarfg(i, a[(i-1)*lda+i], a[i:], lda) + a[(i-1)*lda+i] = 1 + + // Compute W(0:i-1, i). + bi.Dsymv(blas.Upper, i, 1, a, lda, a[i:], lda, 0, w[iw:], ldw) + if i < n-1 { + bi.Dgemv(blas.Trans, i, n-i-1, 1, w[iw+1:], ldw, + a[i:], lda, 0, w[(i+1)*ldw+iw:], ldw) + bi.Dgemv(blas.NoTrans, i, n-i-1, -1, a[i+1:], lda, + w[(i+1)*ldw+iw:], ldw, 1, w[iw:], ldw) + bi.Dgemv(blas.Trans, i, n-i-1, 1, a[i+1:], lda, + a[i:], lda, 0, w[(i+1)*ldw+iw:], ldw) + bi.Dgemv(blas.NoTrans, i, n-i-1, -1, w[iw+1:], ldw, + w[(i+1)*ldw+iw:], ldw, 1, w[iw:], ldw) + } + bi.Dscal(i, tau[i-1], w[iw:], ldw) + alpha := -0.5 * tau[i-1] * bi.Ddot(i, w[iw:], ldw, a[i:], lda) + bi.Daxpy(i, alpha, a[i:], lda, w[iw:], ldw) + } + } + } else { + // Reduce first nb columns of lower triangle. + for i := 0; i < nb; i++ { + // Update A(i:n, i) + bi.Dgemv(blas.NoTrans, n-i, i, -1, a[i*lda:], lda, + w[i*ldw:], 1, 1, a[i*lda+i:], lda) + bi.Dgemv(blas.NoTrans, n-i, i, -1, w[i*ldw:], ldw, + a[i*lda:], 1, 1, a[i*lda+i:], lda) + if i < n-1 { + // Generate elementary reflector H_i to annihilate A(i+2:n,i). + e[i], tau[i] = impl.Dlarfg(n-i-1, a[(i+1)*lda+i], a[min(i+2, n-1)*lda+i:], lda) + a[(i+1)*lda+i] = 1 + + // Compute W(i+1:n,i). + bi.Dsymv(blas.Lower, n-i-1, 1, a[(i+1)*lda+i+1:], lda, + a[(i+1)*lda+i:], lda, 0, w[(i+1)*ldw+i:], ldw) + bi.Dgemv(blas.Trans, n-i-1, i, 1, w[(i+1)*ldw:], ldw, + a[(i+1)*lda+i:], lda, 0, w[i:], ldw) + bi.Dgemv(blas.NoTrans, n-i-1, i, -1, a[(i+1)*lda:], lda, + w[i:], ldw, 1, w[(i+1)*ldw+i:], ldw) + bi.Dgemv(blas.Trans, n-i-1, i, 1, a[(i+1)*lda:], lda, + a[(i+1)*lda+i:], lda, 0, w[i:], ldw) + bi.Dgemv(blas.NoTrans, n-i-1, i, -1, w[(i+1)*ldw:], ldw, + w[i:], ldw, 1, w[(i+1)*ldw+i:], ldw) + bi.Dscal(n-i-1, tau[i], w[(i+1)*ldw+i:], ldw) + alpha := -0.5 * tau[i] * bi.Ddot(n-i-1, w[(i+1)*ldw+i:], ldw, + a[(i+1)*lda+i:], lda) + bi.Daxpy(n-i-1, alpha, a[(i+1)*lda+i:], lda, + w[(i+1)*ldw+i:], ldw) + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrs.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrs.go new file mode 100644 index 0000000000..dc445c6fe1 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlatrs.go @@ -0,0 +1,359 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlatrs solves a triangular system of equations scaled to prevent overflow. It +// solves +// A * x = scale * b if trans == blas.NoTrans +// A^T * x = scale * b if trans == blas.Trans +// where the scale s is set for numeric stability. +// +// A is an n×n triangular matrix. On entry, the slice x contains the values of +// b, and on exit it contains the solution vector x. +// +// If normin == true, cnorm is an input and cnorm[j] contains the norm of the off-diagonal +// part of the j^th column of A. If trans == blas.NoTrans, cnorm[j] must be greater +// than or equal to the infinity norm, and greater than or equal to the one-norm +// otherwise. If normin == false, then cnorm is treated as an output, and is set +// to contain the 1-norm of the off-diagonal part of the j^th column of A. +// +// Dlatrs is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dlatrs(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, normin bool, n int, a []float64, lda int, x []float64, cnorm []float64) (scale float64) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans: + panic(badTrans) + case diag != blas.Unit && diag != blas.NonUnit: + panic(badDiag) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if n == 0 { + return 0 + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(x) < n: + panic(shortX) + case len(cnorm) < n: + panic(shortCNorm) + } + + upper := uplo == blas.Upper + nonUnit := diag == blas.NonUnit + + smlnum := dlamchS / dlamchP + bignum := 1 / smlnum + scale = 1 + + bi := blas64.Implementation() + + if !normin { + if upper { + cnorm[0] = 0 + for j := 1; j < n; j++ { + cnorm[j] = bi.Dasum(j, a[j:], lda) + } + } else { + for j := 0; j < n-1; j++ { + cnorm[j] = bi.Dasum(n-j-1, a[(j+1)*lda+j:], lda) + } + cnorm[n-1] = 0 + } + } + // Scale the column norms by tscal if the maximum element in cnorm is greater than bignum. + imax := bi.Idamax(n, cnorm, 1) + tmax := cnorm[imax] + var tscal float64 + if tmax <= bignum { + tscal = 1 + } else { + tscal = 1 / (smlnum * tmax) + bi.Dscal(n, tscal, cnorm, 1) + } + + // Compute a bound on the computed solution vector to see if bi.Dtrsv can be used. + j := bi.Idamax(n, x, 1) + xmax := math.Abs(x[j]) + xbnd := xmax + var grow float64 + var jfirst, jlast, jinc int + if trans == blas.NoTrans { + if upper { + jfirst = n - 1 + jlast = -1 + jinc = -1 + } else { + jfirst = 0 + jlast = n + jinc = 1 + } + // Compute the growth in A * x = b. + if tscal != 1 { + grow = 0 + goto Solve + } + if nonUnit { + grow = 1 / math.Max(xbnd, smlnum) + xbnd = grow + for j := jfirst; j != jlast; j += jinc { + if grow <= smlnum { + goto Solve + } + tjj := math.Abs(a[j*lda+j]) + xbnd = math.Min(xbnd, math.Min(1, tjj)*grow) + if tjj+cnorm[j] >= smlnum { + grow *= tjj / (tjj + cnorm[j]) + } else { + grow = 0 + } + } + grow = xbnd + } else { + grow = math.Min(1, 1/math.Max(xbnd, smlnum)) + for j := jfirst; j != jlast; j += jinc { + if grow <= smlnum { + goto Solve + } + grow *= 1 / (1 + cnorm[j]) + } + } + } else { + if upper { + jfirst = 0 + jlast = n + jinc = 1 + } else { + jfirst = n - 1 + jlast = -1 + jinc = -1 + } + if tscal != 1 { + grow = 0 + goto Solve + } + if nonUnit { + grow = 1 / (math.Max(xbnd, smlnum)) + xbnd = grow + for j := jfirst; j != jlast; j += jinc { + if grow <= smlnum { + goto Solve + } + xj := 1 + cnorm[j] + grow = math.Min(grow, xbnd/xj) + tjj := math.Abs(a[j*lda+j]) + if xj > tjj { + xbnd *= tjj / xj + } + } + grow = math.Min(grow, xbnd) + } else { + grow = math.Min(1, 1/math.Max(xbnd, smlnum)) + for j := jfirst; j != jlast; j += jinc { + if grow <= smlnum { + goto Solve + } + xj := 1 + cnorm[j] + grow /= xj + } + } + } + +Solve: + if grow*tscal > smlnum { + // Use the Level 2 BLAS solve if the reciprocal of the bound on + // elements of X is not too small. + bi.Dtrsv(uplo, trans, diag, n, a, lda, x, 1) + if tscal != 1 { + bi.Dscal(n, 1/tscal, cnorm, 1) + } + return scale + } + + // Use a Level 1 BLAS solve, scaling intermediate results. + if xmax > bignum { + scale = bignum / xmax + bi.Dscal(n, scale, x, 1) + xmax = bignum + } + if trans == blas.NoTrans { + for j := jfirst; j != jlast; j += jinc { + xj := math.Abs(x[j]) + var tjj, tjjs float64 + if nonUnit { + tjjs = a[j*lda+j] * tscal + } else { + tjjs = tscal + if tscal == 1 { + goto Skip1 + } + } + tjj = math.Abs(tjjs) + if tjj > smlnum { + if tjj < 1 { + if xj > tjj*bignum { + rec := 1 / xj + bi.Dscal(n, rec, x, 1) + scale *= rec + xmax *= rec + } + } + x[j] /= tjjs + xj = math.Abs(x[j]) + } else if tjj > 0 { + if xj > tjj*bignum { + rec := (tjj * bignum) / xj + if cnorm[j] > 1 { + rec /= cnorm[j] + } + bi.Dscal(n, rec, x, 1) + scale *= rec + xmax *= rec + } + x[j] /= tjjs + xj = math.Abs(x[j]) + } else { + for i := 0; i < n; i++ { + x[i] = 0 + } + x[j] = 1 + xj = 1 + scale = 0 + xmax = 0 + } + Skip1: + if xj > 1 { + rec := 1 / xj + if cnorm[j] > (bignum-xmax)*rec { + rec *= 0.5 + bi.Dscal(n, rec, x, 1) + scale *= rec + } + } else if xj*cnorm[j] > bignum-xmax { + bi.Dscal(n, 0.5, x, 1) + scale *= 0.5 + } + if upper { + if j > 0 { + bi.Daxpy(j, -x[j]*tscal, a[j:], lda, x, 1) + i := bi.Idamax(j, x, 1) + xmax = math.Abs(x[i]) + } + } else { + if j < n-1 { + bi.Daxpy(n-j-1, -x[j]*tscal, a[(j+1)*lda+j:], lda, x[j+1:], 1) + i := j + bi.Idamax(n-j-1, x[j+1:], 1) + xmax = math.Abs(x[i]) + } + } + } + } else { + for j := jfirst; j != jlast; j += jinc { + xj := math.Abs(x[j]) + uscal := tscal + rec := 1 / math.Max(xmax, 1) + var tjjs float64 + if cnorm[j] > (bignum-xj)*rec { + rec *= 0.5 + if nonUnit { + tjjs = a[j*lda+j] * tscal + } else { + tjjs = tscal + } + tjj := math.Abs(tjjs) + if tjj > 1 { + rec = math.Min(1, rec*tjj) + uscal /= tjjs + } + if rec < 1 { + bi.Dscal(n, rec, x, 1) + scale *= rec + xmax *= rec + } + } + var sumj float64 + if uscal == 1 { + if upper { + sumj = bi.Ddot(j, a[j:], lda, x, 1) + } else if j < n-1 { + sumj = bi.Ddot(n-j-1, a[(j+1)*lda+j:], lda, x[j+1:], 1) + } + } else { + if upper { + for i := 0; i < j; i++ { + sumj += (a[i*lda+j] * uscal) * x[i] + } + } else if j < n { + for i := j + 1; i < n; i++ { + sumj += (a[i*lda+j] * uscal) * x[i] + } + } + } + if uscal == tscal { + x[j] -= sumj + xj := math.Abs(x[j]) + var tjjs float64 + if nonUnit { + tjjs = a[j*lda+j] * tscal + } else { + tjjs = tscal + if tscal == 1 { + goto Skip2 + } + } + tjj := math.Abs(tjjs) + if tjj > smlnum { + if tjj < 1 { + if xj > tjj*bignum { + rec = 1 / xj + bi.Dscal(n, rec, x, 1) + scale *= rec + xmax *= rec + } + } + x[j] /= tjjs + } else if tjj > 0 { + if xj > tjj*bignum { + rec = (tjj * bignum) / xj + bi.Dscal(n, rec, x, 1) + scale *= rec + xmax *= rec + } + x[j] /= tjjs + } else { + for i := 0; i < n; i++ { + x[i] = 0 + } + x[j] = 1 + scale = 0 + xmax = 0 + } + } else { + x[j] = x[j]/tjjs - sumj + } + Skip2: + xmax = math.Max(xmax, math.Abs(x[j])) + } + } + scale /= tscal + if tscal != 1 { + bi.Dscal(n, 1/tscal, cnorm, 1) + } + return scale +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlauu2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlauu2.go new file mode 100644 index 0000000000..ecce22cc64 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlauu2.go @@ -0,0 +1,64 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlauu2 computes the product +// U * U^T if uplo is blas.Upper +// L^T * L if uplo is blas.Lower +// where U or L is stored in the upper or lower triangular part of A. +// Only the upper or lower triangle of the result is stored, overwriting +// the corresponding factor in A. +func (impl Implementation) Dlauu2(uplo blas.Uplo, n int, a []float64, lda int) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if n == 0 { + return + } + + if len(a) < (n-1)*lda+n { + panic(shortA) + } + + bi := blas64.Implementation() + + if uplo == blas.Upper { + // Compute the product U*U^T. + for i := 0; i < n; i++ { + aii := a[i*lda+i] + if i < n-1 { + a[i*lda+i] = bi.Ddot(n-i, a[i*lda+i:], 1, a[i*lda+i:], 1) + bi.Dgemv(blas.NoTrans, i, n-i-1, 1, a[i+1:], lda, a[i*lda+i+1:], 1, + aii, a[i:], lda) + } else { + bi.Dscal(i+1, aii, a[i:], lda) + } + } + } else { + // Compute the product L^T*L. + for i := 0; i < n; i++ { + aii := a[i*lda+i] + if i < n-1 { + a[i*lda+i] = bi.Ddot(n-i, a[i*lda+i:], lda, a[i*lda+i:], lda) + bi.Dgemv(blas.Trans, n-i-1, i, 1, a[(i+1)*lda:], lda, a[(i+1)*lda+i:], lda, + aii, a[i*lda:], 1) + } else { + bi.Dscal(i+1, aii, a[i*lda:], 1) + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dlauum.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dlauum.go new file mode 100644 index 0000000000..67ecaddf4c --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dlauum.go @@ -0,0 +1,81 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dlauum computes the product +// U * U^T if uplo is blas.Upper +// L^T * L if uplo is blas.Lower +// where U or L is stored in the upper or lower triangular part of A. +// Only the upper or lower triangle of the result is stored, overwriting +// the corresponding factor in A. +func (impl Implementation) Dlauum(uplo blas.Uplo, n int, a []float64, lda int) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if n == 0 { + return + } + + if len(a) < (n-1)*lda+n { + panic(shortA) + } + + // Determine the block size. + opts := "U" + if uplo == blas.Lower { + opts = "L" + } + nb := impl.Ilaenv(1, "DLAUUM", opts, n, -1, -1, -1) + + if nb <= 1 || n <= nb { + // Use unblocked code. + impl.Dlauu2(uplo, n, a, lda) + return + } + + // Use blocked code. + bi := blas64.Implementation() + if uplo == blas.Upper { + // Compute the product U*U^T. + for i := 0; i < n; i += nb { + ib := min(nb, n-i) + bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.NonUnit, + i, ib, 1, a[i*lda+i:], lda, a[i:], lda) + impl.Dlauu2(blas.Upper, ib, a[i*lda+i:], lda) + if n-i-ib > 0 { + bi.Dgemm(blas.NoTrans, blas.Trans, i, ib, n-i-ib, + 1, a[i+ib:], lda, a[i*lda+i+ib:], lda, 1, a[i:], lda) + bi.Dsyrk(blas.Upper, blas.NoTrans, ib, n-i-ib, + 1, a[i*lda+i+ib:], lda, 1, a[i*lda+i:], lda) + } + } + } else { + // Compute the product L^T*L. + for i := 0; i < n; i += nb { + ib := min(nb, n-i) + bi.Dtrmm(blas.Left, blas.Lower, blas.Trans, blas.NonUnit, + ib, i, 1, a[i*lda+i:], lda, a[i*lda:], lda) + impl.Dlauu2(blas.Lower, ib, a[i*lda+i:], lda) + if n-i-ib > 0 { + bi.Dgemm(blas.Trans, blas.NoTrans, ib, i, n-i-ib, + 1, a[(i+ib)*lda+i:], lda, a[(i+ib)*lda:], lda, 1, a[i*lda:], lda) + bi.Dsyrk(blas.Lower, blas.Trans, ib, n-i-ib, + 1, a[(i+ib)*lda+i:], lda, 1, a[i*lda+i:], lda) + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/doc.go b/vendor/gonum.org/v1/gonum/lapack/gonum/doc.go new file mode 100644 index 0000000000..5794289272 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/doc.go @@ -0,0 +1,28 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gonum is a pure-go implementation of the LAPACK API. The LAPACK API defines +// a set of algorithms for advanced matrix operations. +// +// The function definitions and implementations follow that of the netlib reference +// implementation. See http://www.netlib.org/lapack/explore-html/ for more +// information, and http://www.netlib.org/lapack/explore-html/d4/de1/_l_i_c_e_n_s_e_source.html +// for more license information. +// +// Slice function arguments frequently represent vectors and matrices. The data +// layout is identical to that found in https://godoc.org/gonum.org/v1/gonum/blas/gonum. +// +// Most LAPACK functions are built on top the routines defined in the BLAS API, +// and as such the computation time for many LAPACK functions is +// dominated by BLAS calls. Here, BLAS is accessed through the +// blas64 package (https://godoc.org/golang.org/v1/gonum/blas/blas64). In particular, +// this implies that an external BLAS library will be used if it is +// registered in blas64. +// +// The full LAPACK capability has not been implemented at present. The full +// API is very large, containing approximately 200 functions for double precision +// alone. Future additions will be focused on supporting the gonum matrix +// package (https://godoc.org/github.com/gonum/matrix/mat64), though pull requests +// with implementations and tests for LAPACK function are encouraged. +package gonum // import "gonum.org/v1/gonum/lapack/gonum" diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2l.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2l.go new file mode 100644 index 0000000000..a20765a9e9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2l.go @@ -0,0 +1,76 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dorg2l generates an m×n matrix Q with orthonormal columns which is defined +// as the last n columns of a product of k elementary reflectors of order m. +// Q = H_{k-1} * ... * H_1 * H_0 +// See Dgelqf for more information. It must be that m >= n >= k. +// +// tau contains the scalar reflectors computed by Dgeqlf. tau must have length +// at least k, and Dorg2l will panic otherwise. +// +// work contains temporary memory, and must have length at least n. Dorg2l will +// panic otherwise. +// +// Dorg2l is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorg2l(m, n, k int, a []float64, lda int, tau, work []float64) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case n > m: + panic(nGTM) + case k < 0: + panic(kLT0) + case k > n: + panic(kGTN) + case lda < max(1, n): + panic(badLdA) + } + + if n == 0 { + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + case len(work) < n: + panic(shortWork) + } + + // Initialize columns 0:n-k to columns of the unit matrix. + for j := 0; j < n-k; j++ { + for l := 0; l < m; l++ { + a[l*lda+j] = 0 + } + a[(m-n+j)*lda+j] = 1 + } + + bi := blas64.Implementation() + for i := 0; i < k; i++ { + ii := n - k + i + + // Apply H_i to A[0:m-k+i, 0:n-k+i] from the left. + a[(m-n+ii)*lda+ii] = 1 + impl.Dlarf(blas.Left, m-n+ii+1, ii, a[ii:], lda, tau[i], a, lda, work) + bi.Dscal(m-n+ii, -tau[i], a[ii:], lda) + a[(m-n+ii)*lda+ii] = 1 - tau[i] + + // Set A[m-k+i:m, n-k+i+1] to zero. + for l := m - n + ii + 1; l < m; l++ { + a[l*lda+ii] = 0 + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2r.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2r.go new file mode 100644 index 0000000000..de44775712 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorg2r.go @@ -0,0 +1,75 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dorg2r generates an m×n matrix Q with orthonormal columns defined by the +// product of elementary reflectors as computed by Dgeqrf. +// Q = H_0 * H_1 * ... * H_{k-1} +// len(tau) >= k, 0 <= k <= n, 0 <= n <= m, len(work) >= n. +// Dorg2r will panic if these conditions are not met. +// +// Dorg2r is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorg2r(m, n, k int, a []float64, lda int, tau []float64, work []float64) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case n > m: + panic(nGTM) + case k < 0: + panic(kLT0) + case k > n: + panic(kGTN) + case lda < max(1, n): + panic(badLdA) + } + + if n == 0 { + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + case len(work) < n: + panic(shortWork) + } + + bi := blas64.Implementation() + + // Initialize columns k+1:n to columns of the unit matrix. + for l := 0; l < m; l++ { + for j := k; j < n; j++ { + a[l*lda+j] = 0 + } + } + for j := k; j < n; j++ { + a[j*lda+j] = 1 + } + for i := k - 1; i >= 0; i-- { + for i := range work { + work[i] = 0 + } + if i < n-1 { + a[i*lda+i] = 1 + impl.Dlarf(blas.Left, m-i, n-i-1, a[i*lda+i:], lda, tau[i], a[i*lda+i+1:], lda, work) + } + if i < m-1 { + bi.Dscal(m-i-1, -tau[i], a[(i+1)*lda+i:], lda) + } + a[i*lda+i] = 1 - tau[i] + for l := 0; l < i; l++ { + a[l*lda+i] = 0 + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorgbr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgbr.go new file mode 100644 index 0000000000..626cad5ffe --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgbr.go @@ -0,0 +1,138 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/lapack" + +// Dorgbr generates one of the matrices Q or P^T computed by Dgebrd +// computed from the decomposition Dgebrd. See Dgebd2 for the description of +// Q and P^T. +// +// If vect == lapack.GenerateQ, then a is assumed to have been an m×k matrix and +// Q is of order m. If m >= k, then Dorgbr returns the first n columns of Q +// where m >= n >= k. If m < k, then Dorgbr returns Q as an m×m matrix. +// +// If vect == lapack.GeneratePT, then A is assumed to have been a k×n matrix, and +// P^T is of order n. If k < n, then Dorgbr returns the first m rows of P^T, +// where n >= m >= k. If k >= n, then Dorgbr returns P^T as an n×n matrix. +// +// Dorgbr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorgbr(vect lapack.GenOrtho, m, n, k int, a []float64, lda int, tau, work []float64, lwork int) { + wantq := vect == lapack.GenerateQ + mn := min(m, n) + switch { + case vect != lapack.GenerateQ && vect != lapack.GeneratePT: + panic(badGenOrtho) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case wantq && n > m: + panic(nGTM) + case wantq && n < min(m, k): + panic("lapack: n < min(m,k)") + case !wantq && m > n: + panic(mGTN) + case !wantq && m < min(n, k): + panic("lapack: m < min(n,k)") + case lda < max(1, n) && lwork != -1: + // Normally, we follow the reference and require the leading + // dimension to be always valid, even in case of workspace + // queries. However, if a caller provided a placeholder value + // for lda (and a) when doing a workspace query that didn't + // fulfill the condition here, it would cause a panic. This is + // exactly what Dgesvd does. + panic(badLdA) + case lwork < max(1, mn) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + work[0] = 1 + if m == 0 || n == 0 { + return + } + + if wantq { + if m >= k { + impl.Dorgqr(m, n, k, a, lda, tau, work, -1) + } else if m > 1 { + impl.Dorgqr(m-1, m-1, m-1, a[lda+1:], lda, tau, work, -1) + } + } else { + if k < n { + impl.Dorglq(m, n, k, a, lda, tau, work, -1) + } else if n > 1 { + impl.Dorglq(n-1, n-1, n-1, a[lda+1:], lda, tau, work, -1) + } + } + lworkopt := int(work[0]) + lworkopt = max(lworkopt, mn) + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case wantq && len(tau) < min(m, k): + panic(shortTau) + case !wantq && len(tau) < min(n, k): + panic(shortTau) + } + + if wantq { + // Form Q, determined by a call to Dgebrd to reduce an m×k matrix. + if m >= k { + impl.Dorgqr(m, n, k, a, lda, tau, work, lwork) + } else { + // Shift the vectors which define the elementary reflectors one + // column to the right, and set the first row and column of Q to + // those of the unit matrix. + for j := m - 1; j >= 1; j-- { + a[j] = 0 + for i := j + 1; i < m; i++ { + a[i*lda+j] = a[i*lda+j-1] + } + } + a[0] = 1 + for i := 1; i < m; i++ { + a[i*lda] = 0 + } + if m > 1 { + // Form Q[1:m-1, 1:m-1] + impl.Dorgqr(m-1, m-1, m-1, a[lda+1:], lda, tau, work, lwork) + } + } + } else { + // Form P^T, determined by a call to Dgebrd to reduce a k×n matrix. + if k < n { + impl.Dorglq(m, n, k, a, lda, tau, work, lwork) + } else { + // Shift the vectors which define the elementary reflectors one + // row downward, and set the first row and column of P^T to + // those of the unit matrix. + a[0] = 1 + for i := 1; i < n; i++ { + a[i*lda] = 0 + } + for j := 1; j < n; j++ { + for i := j - 1; i >= 1; i-- { + a[i*lda+j] = a[(i-1)*lda+j] + } + a[j] = 0 + } + if n > 1 { + impl.Dorglq(n-1, n-1, n-1, a[lda+1:], lda, tau, work, lwork) + } + } + } + work[0] = float64(lworkopt) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorghr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorghr.go new file mode 100644 index 0000000000..6e799d10d5 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorghr.go @@ -0,0 +1,101 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +// Dorghr generates an n×n orthogonal matrix Q which is defined as the product +// of ihi-ilo elementary reflectors: +// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. +// +// a and lda represent an n×n matrix that contains the elementary reflectors, as +// returned by Dgehrd. On return, a is overwritten by the n×n orthogonal matrix +// Q. Q will be equal to the identity matrix except in the submatrix +// Q[ilo+1:ihi+1,ilo+1:ihi+1]. +// +// ilo and ihi must have the same values as in the previous call of Dgehrd. It +// must hold that +// 0 <= ilo <= ihi < n, if n > 0, +// ilo = 0, ihi = -1, if n == 0. +// +// tau contains the scalar factors of the elementary reflectors, as returned by +// Dgehrd. tau must have length n-1. +// +// work must have length at least max(1,lwork) and lwork must be at least +// ihi-ilo. For optimum performance lwork must be at least (ihi-ilo)*nb where nb +// is the optimal blocksize. On return, work[0] will contain the optimal value +// of lwork. +// +// If lwork == -1, instead of performing Dorghr, only the optimal value of lwork +// will be stored into work[0]. +// +// If any requirement on input sizes is not met, Dorghr will panic. +// +// Dorghr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorghr(n, ilo, ihi int, a []float64, lda int, tau, work []float64, lwork int) { + nh := ihi - ilo + switch { + case ilo < 0 || max(1, n) <= ilo: + panic(badIlo) + case ihi < min(ilo, n-1) || n <= ihi: + panic(badIhi) + case lda < max(1, n): + panic(badLdA) + case lwork < max(1, nh) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if n == 0 { + work[0] = 1 + return + } + + lwkopt := max(1, nh) * impl.Ilaenv(1, "DORGQR", " ", nh, nh, nh, -1) + if lwork == -1 { + work[0] = float64(lwkopt) + return + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(tau) < n-1: + panic(shortTau) + } + + // Shift the vectors which define the elementary reflectors one column + // to the right. + for i := ilo + 2; i < ihi+1; i++ { + copy(a[i*lda+ilo+1:i*lda+i], a[i*lda+ilo:i*lda+i-1]) + } + // Set the first ilo+1 and the last n-ihi-1 rows and columns to those of + // the identity matrix. + for i := 0; i < ilo+1; i++ { + for j := 0; j < n; j++ { + a[i*lda+j] = 0 + } + a[i*lda+i] = 1 + } + for i := ilo + 1; i < ihi+1; i++ { + for j := 0; j <= ilo; j++ { + a[i*lda+j] = 0 + } + for j := i; j < n; j++ { + a[i*lda+j] = 0 + } + } + for i := ihi + 1; i < n; i++ { + for j := 0; j < n; j++ { + a[i*lda+j] = 0 + } + a[i*lda+i] = 1 + } + if nh > 0 { + // Generate Q[ilo+1:ihi+1,ilo+1:ihi+1]. + impl.Dorgqr(nh, nh, nh, a[(ilo+1)*lda+ilo+1:], lda, tau[ilo:ihi], work, lwork) + } + work[0] = float64(lwkopt) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorgl2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgl2.go new file mode 100644 index 0000000000..b5566b9de1 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgl2.go @@ -0,0 +1,71 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dorgl2 generates an m×n matrix Q with orthonormal rows defined by the +// first m rows product of elementary reflectors as computed by Dgelqf. +// Q = H_0 * H_1 * ... * H_{k-1} +// len(tau) >= k, 0 <= k <= m, 0 <= m <= n, len(work) >= m. +// Dorgl2 will panic if these conditions are not met. +// +// Dorgl2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorgl2(m, n, k int, a []float64, lda int, tau, work []float64) { + switch { + case m < 0: + panic(mLT0) + case n < m: + panic(nLTM) + case k < 0: + panic(kLT0) + case k > m: + panic(kGTM) + case lda < max(1, m): + panic(badLdA) + } + + if m == 0 { + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + case len(work) < m: + panic(shortWork) + } + + bi := blas64.Implementation() + + if k < m { + for i := k; i < m; i++ { + for j := 0; j < n; j++ { + a[i*lda+j] = 0 + } + } + for j := k; j < m; j++ { + a[j*lda+j] = 1 + } + } + for i := k - 1; i >= 0; i-- { + if i < n-1 { + if i < m-1 { + a[i*lda+i] = 1 + impl.Dlarf(blas.Right, m-i-1, n-i, a[i*lda+i:], 1, tau[i], a[(i+1)*lda+i:], lda, work) + } + bi.Dscal(n-i-1, -tau[i], a[i*lda+i+1:], 1) + } + a[i*lda+i] = 1 - tau[i] + for l := 0; l < i; l++ { + a[i*lda+l] = 0 + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorglq.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorglq.go new file mode 100644 index 0000000000..a6dd980ceb --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorglq.go @@ -0,0 +1,123 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dorglq generates an m×n matrix Q with orthonormal columns defined by the +// product of elementary reflectors as computed by Dgelqf. +// Q = H_0 * H_1 * ... * H_{k-1} +// Dorglq is the blocked version of Dorgl2 that makes greater use of level-3 BLAS +// routines. +// +// len(tau) >= k, 0 <= k <= m, and 0 <= m <= n. +// +// work is temporary storage, and lwork specifies the usable memory length. At minimum, +// lwork >= m, and the amount of blocking is limited by the usable length. +// If lwork == -1, instead of computing Dorglq the optimal work length is stored +// into work[0]. +// +// Dorglq will panic if the conditions on input values are not met. +// +// Dorglq is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorglq(m, n, k int, a []float64, lda int, tau, work []float64, lwork int) { + switch { + case m < 0: + panic(mLT0) + case n < m: + panic(nLTM) + case k < 0: + panic(kLT0) + case k > m: + panic(kGTM) + case lda < max(1, n): + panic(badLdA) + case lwork < max(1, m) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + if m == 0 { + work[0] = 1 + return + } + + nb := impl.Ilaenv(1, "DORGLQ", " ", m, n, k, -1) + if lwork == -1 { + work[0] = float64(m * nb) + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + } + + nbmin := 2 // Minimum block size + var nx int // Crossover size from blocked to unbloked code + iws := m // Length of work needed + var ldwork int + if 1 < nb && nb < k { + nx = max(0, impl.Ilaenv(3, "DORGLQ", " ", m, n, k, -1)) + if nx < k { + ldwork = nb + iws = m * ldwork + if lwork < iws { + nb = lwork / m + ldwork = nb + nbmin = max(2, impl.Ilaenv(2, "DORGLQ", " ", m, n, k, -1)) + } + } + } + + var ki, kk int + if nbmin <= nb && nb < k && nx < k { + // The first kk rows are handled by the blocked method. + ki = ((k - nx - 1) / nb) * nb + kk = min(k, ki+nb) + for i := kk; i < m; i++ { + for j := 0; j < kk; j++ { + a[i*lda+j] = 0 + } + } + } + if kk < m { + // Perform the operation on colums kk to the end. + impl.Dorgl2(m-kk, n-kk, k-kk, a[kk*lda+kk:], lda, tau[kk:], work) + } + if kk > 0 { + // Perform the operation on column-blocks + for i := ki; i >= 0; i -= nb { + ib := min(nb, k-i) + if i+ib < m { + impl.Dlarft(lapack.Forward, lapack.RowWise, + n-i, ib, + a[i*lda+i:], lda, + tau[i:], + work, ldwork) + + impl.Dlarfb(blas.Right, blas.Trans, lapack.Forward, lapack.RowWise, + m-i-ib, n-i, ib, + a[i*lda+i:], lda, + work, ldwork, + a[(i+ib)*lda+i:], lda, + work[ib*ldwork:], ldwork) + } + impl.Dorgl2(ib, n-i, ib, a[i*lda+i:], lda, tau[i:], work) + for l := i; l < i+ib; l++ { + for j := 0; j < i; j++ { + a[l*lda+j] = 0 + } + } + } + } + work[0] = float64(iws) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorgql.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgql.go new file mode 100644 index 0000000000..6927ba4ca3 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgql.go @@ -0,0 +1,136 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dorgql generates the m×n matrix Q with orthonormal columns defined as the +// last n columns of a product of k elementary reflectors of order m +// Q = H_{k-1} * ... * H_1 * H_0. +// +// It must hold that +// 0 <= k <= n <= m, +// and Dorgql will panic otherwise. +// +// On entry, the (n-k+i)-th column of A must contain the vector which defines +// the elementary reflector H_i, for i=0,...,k-1, and tau[i] must contain its +// scalar factor. On return, a contains the m×n matrix Q. +// +// tau must have length at least k, and Dorgql will panic otherwise. +// +// work must have length at least max(1,lwork), and lwork must be at least +// max(1,n), otherwise Dorgql will panic. For optimum performance lwork must +// be a sufficiently large multiple of n. +// +// If lwork == -1, instead of computing Dorgql the optimal work length is stored +// into work[0]. +// +// Dorgql is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorgql(m, n, k int, a []float64, lda int, tau, work []float64, lwork int) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case n > m: + panic(nGTM) + case k < 0: + panic(kLT0) + case k > n: + panic(kGTN) + case lda < max(1, n): + panic(badLdA) + case lwork < max(1, n) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if n == 0 { + work[0] = 1 + return + } + + nb := impl.Ilaenv(1, "DORGQL", " ", m, n, k, -1) + if lwork == -1 { + work[0] = float64(n * nb) + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + } + + nbmin := 2 + var nx, ldwork int + iws := n + if 1 < nb && nb < k { + // Determine when to cross over from blocked to unblocked code. + nx = max(0, impl.Ilaenv(3, "DORGQL", " ", m, n, k, -1)) + if nx < k { + // Determine if workspace is large enough for blocked code. + iws = n * nb + if lwork < iws { + // Not enough workspace to use optimal nb: reduce nb and determine + // the minimum value of nb. + nb = lwork / n + nbmin = max(2, impl.Ilaenv(2, "DORGQL", " ", m, n, k, -1)) + } + ldwork = nb + } + } + + var kk int + if nbmin <= nb && nb < k && nx < k { + // Use blocked code after the first block. The last kk columns are handled + // by the block method. + kk = min(k, ((k-nx+nb-1)/nb)*nb) + + // Set A(m-kk:m, 0:n-kk) to zero. + for i := m - kk; i < m; i++ { + for j := 0; j < n-kk; j++ { + a[i*lda+j] = 0 + } + } + } + + // Use unblocked code for the first or only block. + impl.Dorg2l(m-kk, n-kk, k-kk, a, lda, tau, work) + if kk > 0 { + // Use blocked code. + for i := k - kk; i < k; i += nb { + ib := min(nb, k-i) + if n-k+i > 0 { + // Form the triangular factor of the block reflector + // H = H_{i+ib-1} * ... * H_{i+1} * H_i. + impl.Dlarft(lapack.Backward, lapack.ColumnWise, m-k+i+ib, ib, + a[n-k+i:], lda, tau[i:], work, ldwork) + + // Apply H to A[0:m-k+i+ib, 0:n-k+i] from the left. + impl.Dlarfb(blas.Left, blas.NoTrans, lapack.Backward, lapack.ColumnWise, + m-k+i+ib, n-k+i, ib, a[n-k+i:], lda, work, ldwork, + a, lda, work[ib*ldwork:], ldwork) + } + + // Apply H to rows 0:m-k+i+ib of current block. + impl.Dorg2l(m-k+i+ib, ib, ib, a[n-k+i:], lda, tau[i:], work) + + // Set rows m-k+i+ib:m of current block to zero. + for j := n - k + i; j < n-k+i+ib; j++ { + for l := m - k + i + ib; l < m; l++ { + a[l*lda+j] = 0 + } + } + } + } + work[0] = float64(iws) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorgqr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgqr.go new file mode 100644 index 0000000000..f07fdaf46a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgqr.go @@ -0,0 +1,134 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dorgqr generates an m×n matrix Q with orthonormal columns defined by the +// product of elementary reflectors +// Q = H_0 * H_1 * ... * H_{k-1} +// as computed by Dgeqrf. +// Dorgqr is the blocked version of Dorg2r that makes greater use of level-3 BLAS +// routines. +// +// The length of tau must be at least k, and the length of work must be at least n. +// It also must be that 0 <= k <= n and 0 <= n <= m. +// +// work is temporary storage, and lwork specifies the usable memory length. At +// minimum, lwork >= n, and the amount of blocking is limited by the usable +// length. If lwork == -1, instead of computing Dorgqr the optimal work length +// is stored into work[0]. +// +// Dorgqr will panic if the conditions on input values are not met. +// +// Dorgqr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorgqr(m, n, k int, a []float64, lda int, tau, work []float64, lwork int) { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case n > m: + panic(nGTM) + case k < 0: + panic(kLT0) + case k > n: + panic(kGTN) + case lda < max(1, n) && lwork != -1: + // Normally, we follow the reference and require the leading + // dimension to be always valid, even in case of workspace + // queries. However, if a caller provided a placeholder value + // for lda (and a) when doing a workspace query that didn't + // fulfill the condition here, it would cause a panic. This is + // exactly what Dgesvd does. + panic(badLdA) + case lwork < max(1, n) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + if n == 0 { + work[0] = 1 + return + } + + nb := impl.Ilaenv(1, "DORGQR", " ", m, n, k, -1) + // work is treated as an n×nb matrix + if lwork == -1 { + work[0] = float64(n * nb) + return + } + + switch { + case len(a) < (m-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + } + + nbmin := 2 // Minimum block size + var nx int // Crossover size from blocked to unbloked code + iws := n // Length of work needed + var ldwork int + if 1 < nb && nb < k { + nx = max(0, impl.Ilaenv(3, "DORGQR", " ", m, n, k, -1)) + if nx < k { + ldwork = nb + iws = n * ldwork + if lwork < iws { + nb = lwork / n + ldwork = nb + nbmin = max(2, impl.Ilaenv(2, "DORGQR", " ", m, n, k, -1)) + } + } + } + var ki, kk int + if nbmin <= nb && nb < k && nx < k { + // The first kk columns are handled by the blocked method. + ki = ((k - nx - 1) / nb) * nb + kk = min(k, ki+nb) + for i := 0; i < kk; i++ { + for j := kk; j < n; j++ { + a[i*lda+j] = 0 + } + } + } + if kk < n { + // Perform the operation on colums kk to the end. + impl.Dorg2r(m-kk, n-kk, k-kk, a[kk*lda+kk:], lda, tau[kk:], work) + } + if kk > 0 { + // Perform the operation on column-blocks. + for i := ki; i >= 0; i -= nb { + ib := min(nb, k-i) + if i+ib < n { + impl.Dlarft(lapack.Forward, lapack.ColumnWise, + m-i, ib, + a[i*lda+i:], lda, + tau[i:], + work, ldwork) + + impl.Dlarfb(blas.Left, blas.NoTrans, lapack.Forward, lapack.ColumnWise, + m-i, n-i-ib, ib, + a[i*lda+i:], lda, + work, ldwork, + a[i*lda+i+ib:], lda, + work[ib*ldwork:], ldwork) + } + impl.Dorg2r(m-i, ib, ib, a[i*lda+i:], lda, tau[i:], work) + // Set rows 0:i-1 of current block to zero. + for j := i; j < i+ib; j++ { + for l := 0; l < i; l++ { + a[l*lda+j] = 0 + } + } + } + } + work[0] = float64(iws) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorgtr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgtr.go new file mode 100644 index 0000000000..483fbcae9d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorgtr.go @@ -0,0 +1,104 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dorgtr generates a real orthogonal matrix Q which is defined as the product +// of n-1 elementary reflectors of order n as returned by Dsytrd. +// +// The construction of Q depends on the value of uplo: +// Q = H_{n-1} * ... * H_1 * H_0 if uplo == blas.Upper +// Q = H_0 * H_1 * ... * H_{n-1} if uplo == blas.Lower +// where H_i is constructed from the elementary reflectors as computed by Dsytrd. +// See the documentation for Dsytrd for more information. +// +// tau must have length at least n-1, and Dorgtr will panic otherwise. +// +// work is temporary storage, and lwork specifies the usable memory length. At +// minimum, lwork >= max(1,n-1), and Dorgtr will panic otherwise. The amount of blocking +// is limited by the usable length. +// If lwork == -1, instead of computing Dorgtr the optimal work length is stored +// into work[0]. +// +// Dorgtr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorgtr(uplo blas.Uplo, n int, a []float64, lda int, tau, work []float64, lwork int) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case lwork < max(1, n-1) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + if n == 0 { + work[0] = 1 + return + } + + var nb int + if uplo == blas.Upper { + nb = impl.Ilaenv(1, "DORGQL", " ", n-1, n-1, n-1, -1) + } else { + nb = impl.Ilaenv(1, "DORGQR", " ", n-1, n-1, n-1, -1) + } + lworkopt := max(1, n-1) * nb + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(tau) < n-1: + panic(shortTau) + } + + if uplo == blas.Upper { + // Q was determined by a call to Dsytrd with uplo == blas.Upper. + // Shift the vectors which define the elementary reflectors one column + // to the left, and set the last row and column of Q to those of the unit + // matrix. + for j := 0; j < n-1; j++ { + for i := 0; i < j; i++ { + a[i*lda+j] = a[i*lda+j+1] + } + a[(n-1)*lda+j] = 0 + } + for i := 0; i < n-1; i++ { + a[i*lda+n-1] = 0 + } + a[(n-1)*lda+n-1] = 1 + + // Generate Q[0:n-1, 0:n-1]. + impl.Dorgql(n-1, n-1, n-1, a, lda, tau, work, lwork) + } else { + // Q was determined by a call to Dsytrd with uplo == blas.Upper. + // Shift the vectors which define the elementary reflectors one column + // to the right, and set the first row and column of Q to those of the unit + // matrix. + for j := n - 1; j > 0; j-- { + a[j] = 0 + for i := j + 1; i < n; i++ { + a[i*lda+j] = a[i*lda+j-1] + } + } + a[0] = 1 + for i := 1; i < n; i++ { + a[i*lda] = 0 + } + if n > 1 { + // Generate Q[1:n, 1:n]. + impl.Dorgqr(n-1, n-1, n-1, a[lda+1:], lda, tau, work, lwork) + } + } + work[0] = float64(lworkopt) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorm2r.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorm2r.go new file mode 100644 index 0000000000..4b0bd83cc3 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorm2r.go @@ -0,0 +1,101 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dorm2r multiplies a general matrix C by an orthogonal matrix from a QR factorization +// determined by Dgeqrf. +// C = Q * C if side == blas.Left and trans == blas.NoTrans +// C = Q^T * C if side == blas.Left and trans == blas.Trans +// C = C * Q if side == blas.Right and trans == blas.NoTrans +// C = C * Q^T if side == blas.Right and trans == blas.Trans +// If side == blas.Left, a is a matrix of size m×k, and if side == blas.Right +// a is of size n×k. +// +// tau contains the Householder factors and is of length at least k and this function +// will panic otherwise. +// +// work is temporary storage of length at least n if side == blas.Left +// and at least m if side == blas.Right and this function will panic otherwise. +// +// Dorm2r is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorm2r(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64) { + left := side == blas.Left + switch { + case !left && side != blas.Right: + panic(badSide) + case trans != blas.Trans && trans != blas.NoTrans: + panic(badTrans) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case left && k > m: + panic(kGTM) + case !left && k > n: + panic(kGTN) + case lda < max(1, k): + panic(badLdA) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 || k == 0 { + return + } + + switch { + case left && len(a) < (m-1)*lda+k: + panic(shortA) + case !left && len(a) < (n-1)*lda+k: + panic(shortA) + case len(c) < (m-1)*ldc+n: + panic(shortC) + case len(tau) < k: + panic(shortTau) + case left && len(work) < n: + panic(shortWork) + case !left && len(work) < m: + panic(shortWork) + } + + if left { + if trans == blas.NoTrans { + for i := k - 1; i >= 0; i-- { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m-i, n, a[i*lda+i:], lda, tau[i], c[i*ldc:], ldc, work) + a[i*lda+i] = aii + } + return + } + for i := 0; i < k; i++ { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m-i, n, a[i*lda+i:], lda, tau[i], c[i*ldc:], ldc, work) + a[i*lda+i] = aii + } + return + } + if trans == blas.NoTrans { + for i := 0; i < k; i++ { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m, n-i, a[i*lda+i:], lda, tau[i], c[i:], ldc, work) + a[i*lda+i] = aii + } + return + } + for i := k - 1; i >= 0; i-- { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m, n-i, a[i*lda+i:], lda, tau[i], c[i:], ldc, work) + a[i*lda+i] = aii + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dormbr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dormbr.go new file mode 100644 index 0000000000..026dc04127 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dormbr.go @@ -0,0 +1,178 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dormbr applies a multiplicative update to the matrix C based on a +// decomposition computed by Dgebrd. +// +// Dormbr overwrites the m×n matrix C with +// Q * C if vect == lapack.ApplyQ, side == blas.Left, and trans == blas.NoTrans +// C * Q if vect == lapack.ApplyQ, side == blas.Right, and trans == blas.NoTrans +// Q^T * C if vect == lapack.ApplyQ, side == blas.Left, and trans == blas.Trans +// C * Q^T if vect == lapack.ApplyQ, side == blas.Right, and trans == blas.Trans +// +// P * C if vect == lapack.ApplyP, side == blas.Left, and trans == blas.NoTrans +// C * P if vect == lapack.ApplyP, side == blas.Right, and trans == blas.NoTrans +// P^T * C if vect == lapack.ApplyP, side == blas.Left, and trans == blas.Trans +// C * P^T if vect == lapack.ApplyP, side == blas.Right, and trans == blas.Trans +// where P and Q are the orthogonal matrices determined by Dgebrd when reducing +// a matrix A to bidiagonal form: A = Q * B * P^T. See Dgebrd for the +// definitions of Q and P. +// +// If vect == lapack.ApplyQ, A is assumed to have been an nq×k matrix, while if +// vect == lapack.ApplyP, A is assumed to have been a k×nq matrix. nq = m if +// side == blas.Left, while nq = n if side == blas.Right. +// +// tau must have length min(nq,k), and Dormbr will panic otherwise. tau contains +// the elementary reflectors to construct Q or P depending on the value of +// vect. +// +// work must have length at least max(1,lwork), and lwork must be either -1 or +// at least max(1,n) if side == blas.Left, and at least max(1,m) if side == +// blas.Right. For optimum performance lwork should be at least n*nb if side == +// blas.Left, and at least m*nb if side == blas.Right, where nb is the optimal +// block size. On return, work[0] will contain the optimal value of lwork. +// +// If lwork == -1, the function only calculates the optimal value of lwork and +// returns it in work[0]. +// +// Dormbr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dormbr(vect lapack.ApplyOrtho, side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) { + nq := n + nw := m + if side == blas.Left { + nq = m + nw = n + } + applyQ := vect == lapack.ApplyQ + switch { + case !applyQ && vect != lapack.ApplyP: + panic(badApplyOrtho) + case side != blas.Left && side != blas.Right: + panic(badSide) + case trans != blas.NoTrans && trans != blas.Trans: + panic(badTrans) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case applyQ && lda < max(1, min(nq, k)): + panic(badLdA) + case !applyQ && lda < max(1, nq): + panic(badLdA) + case ldc < max(1, n): + panic(badLdC) + case lwork < max(1, nw) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if m == 0 || n == 0 { + work[0] = 1 + return + } + + // The current implementation does not use opts, but a future change may + // use these options so construct them. + var opts string + if side == blas.Left { + opts = "L" + } else { + opts = "R" + } + if trans == blas.Trans { + opts += "T" + } else { + opts += "N" + } + var nb int + if applyQ { + if side == blas.Left { + nb = impl.Ilaenv(1, "DORMQR", opts, m-1, n, m-1, -1) + } else { + nb = impl.Ilaenv(1, "DORMQR", opts, m, n-1, n-1, -1) + } + } else { + if side == blas.Left { + nb = impl.Ilaenv(1, "DORMLQ", opts, m-1, n, m-1, -1) + } else { + nb = impl.Ilaenv(1, "DORMLQ", opts, m, n-1, n-1, -1) + } + } + lworkopt := max(1, nw) * nb + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + + minnqk := min(nq, k) + switch { + case applyQ && len(a) < (nq-1)*lda+minnqk: + panic(shortA) + case !applyQ && len(a) < (minnqk-1)*lda+nq: + panic(shortA) + case len(tau) < minnqk: + panic(shortTau) + case len(c) < (m-1)*ldc+n: + panic(shortC) + } + + if applyQ { + // Change the operation to get Q depending on the size of the initial + // matrix to Dgebrd. The size matters due to the storage location of + // the off-diagonal elements. + if nq >= k { + impl.Dormqr(side, trans, m, n, k, a, lda, tau[:k], c, ldc, work, lwork) + } else if nq > 1 { + mi := m + ni := n - 1 + i1 := 0 + i2 := 1 + if side == blas.Left { + mi = m - 1 + ni = n + i1 = 1 + i2 = 0 + } + impl.Dormqr(side, trans, mi, ni, nq-1, a[1*lda:], lda, tau[:nq-1], c[i1*ldc+i2:], ldc, work, lwork) + } + work[0] = float64(lworkopt) + return + } + + transt := blas.Trans + if trans == blas.Trans { + transt = blas.NoTrans + } + + // Change the operation to get P depending on the size of the initial + // matrix to Dgebrd. The size matters due to the storage location of + // the off-diagonal elements. + if nq > k { + impl.Dormlq(side, transt, m, n, k, a, lda, tau, c, ldc, work, lwork) + } else if nq > 1 { + mi := m + ni := n - 1 + i1 := 0 + i2 := 1 + if side == blas.Left { + mi = m - 1 + ni = n + i1 = 1 + i2 = 0 + } + impl.Dormlq(side, transt, mi, ni, nq-1, a[1:], lda, tau, c[i1*ldc+i2:], ldc, work, lwork) + } + work[0] = float64(lworkopt) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dormhr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dormhr.go new file mode 100644 index 0000000000..c00f440590 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dormhr.go @@ -0,0 +1,129 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dormhr multiplies an m×n general matrix C with an nq×nq orthogonal matrix Q +// Q * C, if side == blas.Left and trans == blas.NoTrans, +// Q^T * C, if side == blas.Left and trans == blas.Trans, +// C * Q, if side == blas.Right and trans == blas.NoTrans, +// C * Q^T, if side == blas.Right and trans == blas.Trans, +// where nq == m if side == blas.Left and nq == n if side == blas.Right. +// +// Q is defined implicitly as the product of ihi-ilo elementary reflectors, as +// returned by Dgehrd: +// Q = H_{ilo} H_{ilo+1} ... H_{ihi-1}. +// Q is equal to the identity matrix except in the submatrix +// Q[ilo+1:ihi+1,ilo+1:ihi+1]. +// +// ilo and ihi must have the same values as in the previous call of Dgehrd. It +// must hold that +// 0 <= ilo <= ihi < m, if m > 0 and side == blas.Left, +// ilo = 0 and ihi = -1, if m = 0 and side == blas.Left, +// 0 <= ilo <= ihi < n, if n > 0 and side == blas.Right, +// ilo = 0 and ihi = -1, if n = 0 and side == blas.Right. +// +// a and lda represent an m×m matrix if side == blas.Left and an n×n matrix if +// side == blas.Right. The matrix contains vectors which define the elementary +// reflectors, as returned by Dgehrd. +// +// tau contains the scalar factors of the elementary reflectors, as returned by +// Dgehrd. tau must have length m-1 if side == blas.Left and n-1 if side == +// blas.Right. +// +// c and ldc represent the m×n matrix C. On return, c is overwritten by the +// product with Q. +// +// work must have length at least max(1,lwork), and lwork must be at least +// max(1,n), if side == blas.Left, and max(1,m), if side == blas.Right. For +// optimum performance lwork should be at least n*nb if side == blas.Left and +// m*nb if side == blas.Right, where nb is the optimal block size. On return, +// work[0] will contain the optimal value of lwork. +// +// If lwork == -1, instead of performing Dormhr, only the optimal value of lwork +// will be stored in work[0]. +// +// If any requirement on input sizes is not met, Dormhr will panic. +// +// Dormhr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dormhr(side blas.Side, trans blas.Transpose, m, n, ilo, ihi int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) { + nq := n // The order of Q. + nw := m // The minimum length of work. + if side == blas.Left { + nq = m + nw = n + } + switch { + case side != blas.Left && side != blas.Right: + panic(badSide) + case trans != blas.NoTrans && trans != blas.Trans: + panic(badTrans) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case ilo < 0 || max(1, nq) <= ilo: + panic(badIlo) + case ihi < min(ilo, nq-1) || nq <= ihi: + panic(badIhi) + case lda < max(1, nq): + panic(badLdA) + case lwork < max(1, nw) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if m == 0 || n == 0 { + work[0] = 1 + return + } + + nh := ihi - ilo + var nb int + if side == blas.Left { + opts := "LN" + if trans == blas.Trans { + opts = "LT" + } + nb = impl.Ilaenv(1, "DORMQR", opts, nh, n, nh, -1) + } else { + opts := "RN" + if trans == blas.Trans { + opts = "RT" + } + nb = impl.Ilaenv(1, "DORMQR", opts, m, nh, nh, -1) + } + lwkopt := max(1, nw) * nb + if lwork == -1 { + work[0] = float64(lwkopt) + return + } + + if nh == 0 { + work[0] = 1 + return + } + + switch { + case len(a) < (nq-1)*lda+nq: + panic(shortA) + case len(c) < (m-1)*ldc+n: + panic(shortC) + case len(tau) != nq-1: + panic(badLenTau) + } + + if side == blas.Left { + impl.Dormqr(side, trans, nh, n, nh, a[(ilo+1)*lda+ilo:], lda, + tau[ilo:ihi], c[(ilo+1)*ldc:], ldc, work, lwork) + } else { + impl.Dormqr(side, trans, m, nh, nh, a[(ilo+1)*lda+ilo:], lda, + tau[ilo:ihi], c[ilo+1:], ldc, work, lwork) + } + work[0] = float64(lwkopt) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dorml2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dorml2.go new file mode 100644 index 0000000000..25aa83ac10 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dorml2.go @@ -0,0 +1,102 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dorml2 multiplies a general matrix C by an orthogonal matrix from an LQ factorization +// determined by Dgelqf. +// C = Q * C if side == blas.Left and trans == blas.NoTrans +// C = Q^T * C if side == blas.Left and trans == blas.Trans +// C = C * Q if side == blas.Right and trans == blas.NoTrans +// C = C * Q^T if side == blas.Right and trans == blas.Trans +// If side == blas.Left, a is a matrix of side k×m, and if side == blas.Right +// a is of size k×n. +// +// tau contains the Householder factors and is of length at least k and this function will +// panic otherwise. +// +// work is temporary storage of length at least n if side == blas.Left +// and at least m if side == blas.Right and this function will panic otherwise. +// +// Dorml2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dorml2(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64) { + left := side == blas.Left + switch { + case !left && side != blas.Right: + panic(badSide) + case trans != blas.Trans && trans != blas.NoTrans: + panic(badTrans) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case left && k > m: + panic(kGTM) + case !left && k > n: + panic(kGTN) + case left && lda < max(1, m): + panic(badLdA) + case !left && lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if m == 0 || n == 0 || k == 0 { + return + } + + switch { + case left && len(a) < (k-1)*lda+m: + panic(shortA) + case !left && len(a) < (k-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + case len(c) < (m-1)*ldc+n: + panic(shortC) + case left && len(work) < n: + panic(shortWork) + case !left && len(work) < m: + panic(shortWork) + } + + notrans := trans == blas.NoTrans + switch { + case left && notrans: + for i := 0; i < k; i++ { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m-i, n, a[i*lda+i:], 1, tau[i], c[i*ldc:], ldc, work) + a[i*lda+i] = aii + } + + case left && !notrans: + for i := k - 1; i >= 0; i-- { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m-i, n, a[i*lda+i:], 1, tau[i], c[i*ldc:], ldc, work) + a[i*lda+i] = aii + } + + case !left && notrans: + for i := k - 1; i >= 0; i-- { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m, n-i, a[i*lda+i:], 1, tau[i], c[i:], ldc, work) + a[i*lda+i] = aii + } + + case !left && !notrans: + for i := 0; i < k; i++ { + aii := a[i*lda+i] + a[i*lda+i] = 1 + impl.Dlarf(side, m, n-i, a[i*lda+i:], 1, tau[i], c[i:], ldc, work) + a[i*lda+i] = aii + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dormlq.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dormlq.go new file mode 100644 index 0000000000..6fcfc2fb19 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dormlq.go @@ -0,0 +1,174 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dormlq multiplies the matrix C by the orthogonal matrix Q defined by the +// slices a and tau. A and tau are as returned from Dgelqf. +// C = Q * C if side == blas.Left and trans == blas.NoTrans +// C = Q^T * C if side == blas.Left and trans == blas.Trans +// C = C * Q if side == blas.Right and trans == blas.NoTrans +// C = C * Q^T if side == blas.Right and trans == blas.Trans +// If side == blas.Left, A is a matrix of side k×m, and if side == blas.Right +// A is of size k×n. This uses a blocked algorithm. +// +// work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= m if side == blas.Left and lwork >= n if side == blas.Right, +// and this function will panic otherwise. +// Dormlq uses a block algorithm, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Dormlq, +// the optimal work length will be stored into work[0]. +// +// tau contains the Householder scales and must have length at least k, and +// this function will panic otherwise. +func (impl Implementation) Dormlq(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) { + left := side == blas.Left + nw := m + if left { + nw = n + } + switch { + case !left && side != blas.Right: + panic(badSide) + case trans != blas.Trans && trans != blas.NoTrans: + panic(badTrans) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case left && k > m: + panic(kGTM) + case !left && k > n: + panic(kGTN) + case left && lda < max(1, m): + panic(badLdA) + case !left && lda < max(1, n): + panic(badLdA) + case lwork < max(1, nw) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if m == 0 || n == 0 || k == 0 { + work[0] = 1 + return + } + + const ( + nbmax = 64 + ldt = nbmax + tsize = nbmax * ldt + ) + opts := string(side) + string(trans) + nb := min(nbmax, impl.Ilaenv(1, "DORMLQ", opts, m, n, k, -1)) + lworkopt := max(1, nw)*nb + tsize + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + + switch { + case left && len(a) < (k-1)*lda+m: + panic(shortA) + case !left && len(a) < (k-1)*lda+n: + panic(shortA) + case len(tau) < k: + panic(shortTau) + case len(c) < (m-1)*ldc+n: + panic(shortC) + } + + nbmin := 2 + if 1 < nb && nb < k { + iws := nw*nb + tsize + if lwork < iws { + nb = (lwork - tsize) / nw + nbmin = max(2, impl.Ilaenv(2, "DORMLQ", opts, m, n, k, -1)) + } + } + if nb < nbmin || k <= nb { + // Call unblocked code. + impl.Dorml2(side, trans, m, n, k, a, lda, tau, c, ldc, work) + work[0] = float64(lworkopt) + return + } + + t := work[:tsize] + wrk := work[tsize:] + ldwrk := nb + + notrans := trans == blas.NoTrans + transt := blas.NoTrans + if notrans { + transt = blas.Trans + } + + switch { + case left && notrans: + for i := 0; i < k; i += nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.RowWise, m-i, ib, + a[i*lda+i:], lda, + tau[i:], + t, ldt) + impl.Dlarfb(side, transt, lapack.Forward, lapack.RowWise, m-i, n, ib, + a[i*lda+i:], lda, + t, ldt, + c[i*ldc:], ldc, + wrk, ldwrk) + } + + case left && !notrans: + for i := ((k - 1) / nb) * nb; i >= 0; i -= nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.RowWise, m-i, ib, + a[i*lda+i:], lda, + tau[i:], + t, ldt) + impl.Dlarfb(side, transt, lapack.Forward, lapack.RowWise, m-i, n, ib, + a[i*lda+i:], lda, + t, ldt, + c[i*ldc:], ldc, + wrk, ldwrk) + } + + case !left && notrans: + for i := ((k - 1) / nb) * nb; i >= 0; i -= nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.RowWise, n-i, ib, + a[i*lda+i:], lda, + tau[i:], + t, ldt) + impl.Dlarfb(side, transt, lapack.Forward, lapack.RowWise, m, n-i, ib, + a[i*lda+i:], lda, + t, ldt, + c[i:], ldc, + wrk, ldwrk) + } + + case !left && !notrans: + for i := 0; i < k; i += nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.RowWise, n-i, ib, + a[i*lda+i:], lda, + tau[i:], + t, ldt) + impl.Dlarfb(side, transt, lapack.Forward, lapack.RowWise, m, n-i, ib, + a[i*lda+i:], lda, + t, ldt, + c[i:], ldc, + wrk, ldwrk) + } + } + work[0] = float64(lworkopt) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dormqr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dormqr.go new file mode 100644 index 0000000000..8ae4508654 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dormqr.go @@ -0,0 +1,177 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/lapack" +) + +// Dormqr multiplies an m×n matrix C by an orthogonal matrix Q as +// C = Q * C, if side == blas.Left and trans == blas.NoTrans, +// C = Q^T * C, if side == blas.Left and trans == blas.Trans, +// C = C * Q, if side == blas.Right and trans == blas.NoTrans, +// C = C * Q^T, if side == blas.Right and trans == blas.Trans, +// where Q is defined as the product of k elementary reflectors +// Q = H_0 * H_1 * ... * H_{k-1}. +// +// If side == blas.Left, A is an m×k matrix and 0 <= k <= m. +// If side == blas.Right, A is an n×k matrix and 0 <= k <= n. +// The ith column of A contains the vector which defines the elementary +// reflector H_i and tau[i] contains its scalar factor. tau must have length k +// and Dormqr will panic otherwise. Dgeqrf returns A and tau in the required +// form. +// +// work must have length at least max(1,lwork), and lwork must be at least n if +// side == blas.Left and at least m if side == blas.Right, otherwise Dormqr will +// panic. +// +// work is temporary storage, and lwork specifies the usable memory length. At +// minimum, lwork >= m if side == blas.Left and lwork >= n if side == +// blas.Right, and this function will panic otherwise. Larger values of lwork +// will generally give better performance. On return, work[0] will contain the +// optimal value of lwork. +// +// If lwork is -1, instead of performing Dormqr, the optimal workspace size will +// be stored into work[0]. +func (impl Implementation) Dormqr(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) { + left := side == blas.Left + nq := n + nw := m + if left { + nq = m + nw = n + } + switch { + case !left && side != blas.Right: + panic(badSide) + case trans != blas.NoTrans && trans != blas.Trans: + panic(badTrans) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case left && k > m: + panic(kGTM) + case !left && k > n: + panic(kGTN) + case lda < max(1, k): + panic(badLdA) + case ldc < max(1, n): + panic(badLdC) + case lwork < max(1, nw) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if m == 0 || n == 0 || k == 0 { + work[0] = 1 + return + } + + const ( + nbmax = 64 + ldt = nbmax + tsize = nbmax * ldt + ) + opts := string(side) + string(trans) + nb := min(nbmax, impl.Ilaenv(1, "DORMQR", opts, m, n, k, -1)) + lworkopt := max(1, nw)*nb + tsize + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + + switch { + case len(a) < (nq-1)*lda+k: + panic(shortA) + case len(tau) != k: + panic(badLenTau) + case len(c) < (m-1)*ldc+n: + panic(shortC) + } + + nbmin := 2 + if 1 < nb && nb < k { + if lwork < nw*nb+tsize { + nb = (lwork - tsize) / nw + nbmin = max(2, impl.Ilaenv(2, "DORMQR", opts, m, n, k, -1)) + } + } + + if nb < nbmin || k <= nb { + // Call unblocked code. + impl.Dorm2r(side, trans, m, n, k, a, lda, tau, c, ldc, work) + work[0] = float64(lworkopt) + return + } + + var ( + ldwork = nb + notrans = trans == blas.NoTrans + ) + switch { + case left && notrans: + for i := ((k - 1) / nb) * nb; i >= 0; i -= nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.ColumnWise, m-i, ib, + a[i*lda+i:], lda, + tau[i:], + work[:tsize], ldt) + impl.Dlarfb(side, trans, lapack.Forward, lapack.ColumnWise, m-i, n, ib, + a[i*lda+i:], lda, + work[:tsize], ldt, + c[i*ldc:], ldc, + work[tsize:], ldwork) + } + + case left && !notrans: + for i := 0; i < k; i += nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.ColumnWise, m-i, ib, + a[i*lda+i:], lda, + tau[i:], + work[:tsize], ldt) + impl.Dlarfb(side, trans, lapack.Forward, lapack.ColumnWise, m-i, n, ib, + a[i*lda+i:], lda, + work[:tsize], ldt, + c[i*ldc:], ldc, + work[tsize:], ldwork) + } + + case !left && notrans: + for i := 0; i < k; i += nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.ColumnWise, n-i, ib, + a[i*lda+i:], lda, + tau[i:], + work[:tsize], ldt) + impl.Dlarfb(side, trans, lapack.Forward, lapack.ColumnWise, m, n-i, ib, + a[i*lda+i:], lda, + work[:tsize], ldt, + c[i:], ldc, + work[tsize:], ldwork) + } + + case !left && !notrans: + for i := ((k - 1) / nb) * nb; i >= 0; i -= nb { + ib := min(nb, k-i) + impl.Dlarft(lapack.Forward, lapack.ColumnWise, n-i, ib, + a[i*lda+i:], lda, + tau[i:], + work[:tsize], ldt) + impl.Dlarfb(side, trans, lapack.Forward, lapack.ColumnWise, m, n-i, ib, + a[i*lda+i:], lda, + work[:tsize], ldt, + c[i:], ldc, + work[tsize:], ldwork) + } + } + work[0] = float64(lworkopt) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dormr2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dormr2.go new file mode 100644 index 0000000000..bb03f32c76 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dormr2.go @@ -0,0 +1,103 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dormr2 multiplies a general matrix C by an orthogonal matrix from a RQ factorization +// determined by Dgerqf. +// C = Q * C if side == blas.Left and trans == blas.NoTrans +// C = Q^T * C if side == blas.Left and trans == blas.Trans +// C = C * Q if side == blas.Right and trans == blas.NoTrans +// C = C * Q^T if side == blas.Right and trans == blas.Trans +// If side == blas.Left, a is a matrix of size k×m, and if side == blas.Right +// a is of size k×n. +// +// tau contains the Householder factors and is of length at least k and this function +// will panic otherwise. +// +// work is temporary storage of length at least n if side == blas.Left +// and at least m if side == blas.Right and this function will panic otherwise. +// +// Dormr2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dormr2(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64) { + left := side == blas.Left + nq := n + nw := m + if left { + nq = m + nw = n + } + switch { + case !left && side != blas.Right: + panic(badSide) + case trans != blas.NoTrans && trans != blas.Trans: + panic(badTrans) + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case k < 0: + panic(kLT0) + case left && k > m: + panic(kGTM) + case !left && k > n: + panic(kGTN) + case lda < max(1, nq): + panic(badLdA) + case ldc < max(1, n): + panic(badLdC) + } + + // Quick return if possible. + if m == 0 || n == 0 || k == 0 { + return + } + + switch { + case len(a) < (k-1)*lda+nq: + panic(shortA) + case len(tau) < k: + panic(shortTau) + case len(c) < (m-1)*ldc+n: + panic(shortC) + case len(work) < nw: + panic(shortWork) + } + + if left { + if trans == blas.NoTrans { + for i := k - 1; i >= 0; i-- { + aii := a[i*lda+(m-k+i)] + a[i*lda+(m-k+i)] = 1 + impl.Dlarf(side, m-k+i+1, n, a[i*lda:], 1, tau[i], c, ldc, work) + a[i*lda+(m-k+i)] = aii + } + return + } + for i := 0; i < k; i++ { + aii := a[i*lda+(m-k+i)] + a[i*lda+(m-k+i)] = 1 + impl.Dlarf(side, m-k+i+1, n, a[i*lda:], 1, tau[i], c, ldc, work) + a[i*lda+(m-k+i)] = aii + } + return + } + if trans == blas.NoTrans { + for i := 0; i < k; i++ { + aii := a[i*lda+(n-k+i)] + a[i*lda+(n-k+i)] = 1 + impl.Dlarf(side, m, n-k+i+1, a[i*lda:], 1, tau[i], c, ldc, work) + a[i*lda+(n-k+i)] = aii + } + return + } + for i := k - 1; i >= 0; i-- { + aii := a[i*lda+(n-k+i)] + a[i*lda+(n-k+i)] = 1 + impl.Dlarf(side, m, n-k+i+1, a[i*lda:], 1, tau[i], c, ldc, work) + a[i*lda+(n-k+i)] = aii + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dpbtf2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dpbtf2.go new file mode 100644 index 0000000000..a5beb80bca --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dpbtf2.go @@ -0,0 +1,110 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dpbtf2 computes the Cholesky factorization of a symmetric positive banded +// matrix ab. The matrix ab is n×n with kd diagonal bands. The Cholesky +// factorization computed is +// A = U^T * U if ul == blas.Upper +// A = L * L^T if ul == blas.Lower +// ul also specifies the storage of ab. If ul == blas.Upper, then +// ab is stored as an upper-triangular banded matrix with kd super-diagonals, +// and if ul == blas.Lower, ab is stored as a lower-triangular banded matrix +// with kd sub-diagonals. On exit, the banded matrix U or L is stored in-place +// into ab depending on the value of ul. Dpbtf2 returns whether the factorization +// was successfully completed. +// +// The band storage scheme is illustrated below when n = 6, and kd = 2. +// The resulting Cholesky decomposition is stored in the same elements as the +// input band matrix (a11 becomes u11 or l11, etc.). +// +// ul = blas.Upper +// a11 a12 a13 +// a22 a23 a24 +// a33 a34 a35 +// a44 a45 a46 +// a55 a56 * +// a66 * * +// +// ul = blas.Lower +// * * a11 +// * a21 a22 +// a31 a32 a33 +// a42 a43 a44 +// a53 a54 a55 +// a64 a65 a66 +// +// Dpbtf2 is the unblocked version of the algorithm, see Dpbtrf for the blocked +// version. +// +// Dpbtf2 is an internal routine, exported for testing purposes. +func (Implementation) Dpbtf2(ul blas.Uplo, n, kd int, ab []float64, ldab int) (ok bool) { + switch { + case ul != blas.Upper && ul != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case kd < 0: + panic(kdLT0) + case ldab < kd+1: + panic(badLdA) + } + + if n == 0 { + return + } + + if len(ab) < (n-1)*ldab+kd { + panic(shortAB) + } + + bi := blas64.Implementation() + + kld := max(1, ldab-1) + if ul == blas.Upper { + for j := 0; j < n; j++ { + // Compute U(J,J) and test for non positive-definiteness. + ajj := ab[j*ldab] + if ajj <= 0 { + return false + } + ajj = math.Sqrt(ajj) + ab[j*ldab] = ajj + // Compute elements j+1:j+kn of row J and update the trailing submatrix + // within the band. + kn := min(kd, n-j-1) + if kn > 0 { + bi.Dscal(kn, 1/ajj, ab[j*ldab+1:], 1) + bi.Dsyr(blas.Upper, kn, -1, ab[j*ldab+1:], 1, ab[(j+1)*ldab:], kld) + } + } + return true + } + for j := 0; j < n; j++ { + // Compute L(J,J) and test for non positive-definiteness. + ajj := ab[j*ldab+kd] + if ajj <= 0 { + return false + } + ajj = math.Sqrt(ajj) + ab[j*ldab+kd] = ajj + + // Compute elements J+1:J+KN of column J and update the trailing submatrix + // within the band. + kn := min(kd, n-j-1) + if kn > 0 { + bi.Dscal(kn, 1/ajj, ab[(j+1)*ldab+kd-1:], kld) + bi.Dsyr(blas.Lower, kn, -1, ab[(j+1)*ldab+kd-1:], kld, ab[(j+1)*ldab+kd:], kld) + } + } + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dpocon.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dpocon.go new file mode 100644 index 0000000000..7af4c18728 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dpocon.go @@ -0,0 +1,90 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dpocon estimates the reciprocal of the condition number of a positive-definite +// matrix A given the Cholesky decomposition of A. The condition number computed +// is based on the 1-norm and the ∞-norm. +// +// anorm is the 1-norm and the ∞-norm of the original matrix A. +// +// work is a temporary data slice of length at least 3*n and Dpocon will panic otherwise. +// +// iwork is a temporary data slice of length at least n and Dpocon will panic otherwise. +func (impl Implementation) Dpocon(uplo blas.Uplo, n int, a []float64, lda int, anorm float64, work []float64, iwork []int) float64 { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case anorm < 0: + panic(negANorm) + } + + // Quick return if possible. + if n == 0 { + return 1 + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(work) < 3*n: + panic(shortWork) + case len(iwork) < n: + panic(shortIWork) + } + + if anorm == 0 { + return 0 + } + + bi := blas64.Implementation() + + var ( + smlnum = dlamchS + rcond float64 + sl, su float64 + normin bool + ainvnm float64 + kase int + isave [3]int + ) + for { + ainvnm, kase = impl.Dlacn2(n, work[n:], work, iwork, ainvnm, kase, &isave) + if kase == 0 { + if ainvnm != 0 { + rcond = (1 / ainvnm) / anorm + } + return rcond + } + if uplo == blas.Upper { + sl = impl.Dlatrs(blas.Upper, blas.Trans, blas.NonUnit, normin, n, a, lda, work, work[2*n:]) + normin = true + su = impl.Dlatrs(blas.Upper, blas.NoTrans, blas.NonUnit, normin, n, a, lda, work, work[2*n:]) + } else { + sl = impl.Dlatrs(blas.Lower, blas.NoTrans, blas.NonUnit, normin, n, a, lda, work, work[2*n:]) + normin = true + su = impl.Dlatrs(blas.Lower, blas.Trans, blas.NonUnit, normin, n, a, lda, work, work[2*n:]) + } + scale := sl * su + if scale != 1 { + ix := bi.Idamax(n, work, 1) + if scale == 0 || scale < math.Abs(work[ix])*smlnum { + return rcond + } + impl.Drscl(n, scale, work, 1) + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dpotf2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dpotf2.go new file mode 100644 index 0000000000..5d3327c2d5 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dpotf2.go @@ -0,0 +1,82 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dpotf2 computes the Cholesky decomposition of the symmetric positive definite +// matrix a. If ul == blas.Upper, then a is stored as an upper-triangular matrix, +// and a = U^T U is stored in place into a. If ul == blas.Lower, then a = L L^T +// is computed and stored in-place into a. If a is not positive definite, false +// is returned. This is the unblocked version of the algorithm. +// +// Dpotf2 is an internal routine. It is exported for testing purposes. +func (Implementation) Dpotf2(ul blas.Uplo, n int, a []float64, lda int) (ok bool) { + switch { + case ul != blas.Upper && ul != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if n == 0 { + return true + } + + if len(a) < (n-1)*lda+n { + panic(shortA) + } + + bi := blas64.Implementation() + + if ul == blas.Upper { + for j := 0; j < n; j++ { + ajj := a[j*lda+j] + if j != 0 { + ajj -= bi.Ddot(j, a[j:], lda, a[j:], lda) + } + if ajj <= 0 || math.IsNaN(ajj) { + a[j*lda+j] = ajj + return false + } + ajj = math.Sqrt(ajj) + a[j*lda+j] = ajj + if j < n-1 { + bi.Dgemv(blas.Trans, j, n-j-1, + -1, a[j+1:], lda, a[j:], lda, + 1, a[j*lda+j+1:], 1) + bi.Dscal(n-j-1, 1/ajj, a[j*lda+j+1:], 1) + } + } + return true + } + for j := 0; j < n; j++ { + ajj := a[j*lda+j] + if j != 0 { + ajj -= bi.Ddot(j, a[j*lda:], 1, a[j*lda:], 1) + } + if ajj <= 0 || math.IsNaN(ajj) { + a[j*lda+j] = ajj + return false + } + ajj = math.Sqrt(ajj) + a[j*lda+j] = ajj + if j < n-1 { + bi.Dgemv(blas.NoTrans, n-j-1, j, + -1, a[(j+1)*lda:], lda, a[j*lda:], 1, + 1, a[(j+1)*lda+j:], lda) + bi.Dscal(n-j-1, 1/ajj, a[(j+1)*lda+j:], lda) + } + } + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrf.go new file mode 100644 index 0000000000..21241687f8 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrf.go @@ -0,0 +1,81 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dpotrf computes the Cholesky decomposition of the symmetric positive definite +// matrix a. If ul == blas.Upper, then a is stored as an upper-triangular matrix, +// and a = U^T U is stored in place into a. If ul == blas.Lower, then a = L L^T +// is computed and stored in-place into a. If a is not positive definite, false +// is returned. This is the blocked version of the algorithm. +func (impl Implementation) Dpotrf(ul blas.Uplo, n int, a []float64, lda int) (ok bool) { + switch { + case ul != blas.Upper && ul != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if n == 0 { + return true + } + + if len(a) < (n-1)*lda+n { + panic(shortA) + } + + nb := impl.Ilaenv(1, "DPOTRF", string(ul), n, -1, -1, -1) + if nb <= 1 || n <= nb { + return impl.Dpotf2(ul, n, a, lda) + } + bi := blas64.Implementation() + if ul == blas.Upper { + for j := 0; j < n; j += nb { + jb := min(nb, n-j) + bi.Dsyrk(blas.Upper, blas.Trans, jb, j, + -1, a[j:], lda, + 1, a[j*lda+j:], lda) + ok = impl.Dpotf2(blas.Upper, jb, a[j*lda+j:], lda) + if !ok { + return ok + } + if j+jb < n { + bi.Dgemm(blas.Trans, blas.NoTrans, jb, n-j-jb, j, + -1, a[j:], lda, a[j+jb:], lda, + 1, a[j*lda+j+jb:], lda) + bi.Dtrsm(blas.Left, blas.Upper, blas.Trans, blas.NonUnit, jb, n-j-jb, + 1, a[j*lda+j:], lda, + a[j*lda+j+jb:], lda) + } + } + return true + } + for j := 0; j < n; j += nb { + jb := min(nb, n-j) + bi.Dsyrk(blas.Lower, blas.NoTrans, jb, j, + -1, a[j*lda:], lda, + 1, a[j*lda+j:], lda) + ok := impl.Dpotf2(blas.Lower, jb, a[j*lda+j:], lda) + if !ok { + return ok + } + if j+jb < n { + bi.Dgemm(blas.NoTrans, blas.Trans, n-j-jb, jb, j, + -1, a[(j+jb)*lda:], lda, a[j*lda:], lda, + 1, a[(j+jb)*lda+j:], lda) + bi.Dtrsm(blas.Right, blas.Lower, blas.Trans, blas.NonUnit, n-j-jb, jb, + 1, a[j*lda+j:], lda, + a[(j+jb)*lda+j:], lda) + } + } + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dpotri.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dpotri.go new file mode 100644 index 0000000000..2394775c31 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dpotri.go @@ -0,0 +1,44 @@ +// Copyright ©2019 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/blas" + +// Dpotri computes the inverse of a real symmetric positive definite matrix A +// using its Cholesky factorization. +// +// On entry, a contains the triangular factor U or L from the Cholesky +// factorization A = U^T*U or A = L*L^T, as computed by Dpotrf. +// On return, a contains the upper or lower triangle of the (symmetric) +// inverse of A, overwriting the input factor U or L. +func (impl Implementation) Dpotri(uplo blas.Uplo, n int, a []float64, lda int) (ok bool) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if n == 0 { + return true + } + + if len(a) < (n-1)*lda+n { + panic(shortA) + } + + // Invert the triangular Cholesky factor U or L. + ok = impl.Dtrtri(uplo, blas.NonUnit, n, a, lda) + if !ok { + return false + } + + // Form inv(U)*inv(U)^T or inv(L)^T*inv(L). + impl.Dlauum(uplo, n, a, lda) + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrs.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrs.go new file mode 100644 index 0000000000..689e0439c2 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dpotrs.go @@ -0,0 +1,62 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dpotrs solves a system of n linear equations A*X = B where A is an n×n +// symmetric positive definite matrix and B is an n×nrhs matrix. The matrix A is +// represented by its Cholesky factorization +// A = U^T*U if uplo == blas.Upper +// A = L*L^T if uplo == blas.Lower +// as computed by Dpotrf. On entry, B contains the right-hand side matrix B, on +// return it contains the solution matrix X. +func (Implementation) Dpotrs(uplo blas.Uplo, n, nrhs int, a []float64, lda int, b []float64, ldb int) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case nrhs < 0: + panic(nrhsLT0) + case lda < max(1, n): + panic(badLdA) + case ldb < max(1, nrhs): + panic(badLdB) + } + + // Quick return if possible. + if n == 0 || nrhs == 0 { + return + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(b) < (n-1)*ldb+nrhs: + panic(shortB) + } + + bi := blas64.Implementation() + + if uplo == blas.Upper { + // Solve U^T * U * X = B where U is stored in the upper triangle of A. + + // Solve U^T * X = B, overwriting B with X. + bi.Dtrsm(blas.Left, blas.Upper, blas.Trans, blas.NonUnit, n, nrhs, 1, a, lda, b, ldb) + // Solve U * X = B, overwriting B with X. + bi.Dtrsm(blas.Left, blas.Upper, blas.NoTrans, blas.NonUnit, n, nrhs, 1, a, lda, b, ldb) + } else { + // Solve L * L^T * X = B where L is stored in the lower triangle of A. + + // Solve L * X = B, overwriting B with X. + bi.Dtrsm(blas.Left, blas.Lower, blas.NoTrans, blas.NonUnit, n, nrhs, 1, a, lda, b, ldb) + // Solve L^T * X = B, overwriting B with X. + bi.Dtrsm(blas.Left, blas.Lower, blas.Trans, blas.NonUnit, n, nrhs, 1, a, lda, b, ldb) + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/drscl.go b/vendor/gonum.org/v1/gonum/lapack/gonum/drscl.go new file mode 100644 index 0000000000..b2772dbc22 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/drscl.go @@ -0,0 +1,63 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas/blas64" +) + +// Drscl multiplies the vector x by 1/a being careful to avoid overflow or +// underflow where possible. +// +// Drscl is an internal routine. It is exported for testing purposes. +func (impl Implementation) Drscl(n int, a float64, x []float64, incX int) { + switch { + case n < 0: + panic(nLT0) + case incX <= 0: + panic(badIncX) + } + + // Quick return if possible. + if n == 0 { + return + } + + if len(x) < 1+(n-1)*incX { + panic(shortX) + } + + bi := blas64.Implementation() + + cden := a + cnum := 1.0 + smlnum := dlamchS + bignum := 1 / smlnum + for { + cden1 := cden * smlnum + cnum1 := cnum / bignum + var mul float64 + var done bool + switch { + case cnum != 0 && math.Abs(cden1) > math.Abs(cnum): + mul = smlnum + done = false + cden = cden1 + case math.Abs(cnum1) > math.Abs(cden): + mul = bignum + done = false + cnum = cnum1 + default: + mul = cnum / cden + done = true + } + bi.Dscal(n, mul, x, incX) + if done { + break + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dsteqr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dsteqr.go new file mode 100644 index 0000000000..d6c7861ab5 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dsteqr.go @@ -0,0 +1,376 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dsteqr computes the eigenvalues and optionally the eigenvectors of a symmetric +// tridiagonal matrix using the implicit QL or QR method. The eigenvectors of a +// full or band symmetric matrix can also be found if Dsytrd, Dsptrd, or Dsbtrd +// have been used to reduce this matrix to tridiagonal form. +// +// d, on entry, contains the diagonal elements of the tridiagonal matrix. On exit, +// d contains the eigenvalues in ascending order. d must have length n and +// Dsteqr will panic otherwise. +// +// e, on entry, contains the off-diagonal elements of the tridiagonal matrix on +// entry, and is overwritten during the call to Dsteqr. e must have length n-1 and +// Dsteqr will panic otherwise. +// +// z, on entry, contains the n×n orthogonal matrix used in the reduction to +// tridiagonal form if compz == lapack.EVOrig. On exit, if +// compz == lapack.EVOrig, z contains the orthonormal eigenvectors of the +// original symmetric matrix, and if compz == lapack.EVTridiag, z contains the +// orthonormal eigenvectors of the symmetric tridiagonal matrix. z is not used +// if compz == lapack.EVCompNone. +// +// work must have length at least max(1, 2*n-2) if the eigenvectors are computed, +// and Dsteqr will panic otherwise. +// +// Dsteqr is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dsteqr(compz lapack.EVComp, n int, d, e, z []float64, ldz int, work []float64) (ok bool) { + switch { + case compz != lapack.EVCompNone && compz != lapack.EVTridiag && compz != lapack.EVOrig: + panic(badEVComp) + case n < 0: + panic(nLT0) + case ldz < 1, compz != lapack.EVCompNone && ldz < n: + panic(badLdZ) + } + + // Quick return if possible. + if n == 0 { + return true + } + + switch { + case len(d) < n: + panic(shortD) + case len(e) < n-1: + panic(shortE) + case compz != lapack.EVCompNone && len(z) < (n-1)*ldz+n: + panic(shortZ) + case compz != lapack.EVCompNone && len(work) < max(1, 2*n-2): + panic(shortWork) + } + + var icompz int + if compz == lapack.EVOrig { + icompz = 1 + } else if compz == lapack.EVTridiag { + icompz = 2 + } + + if n == 1 { + if icompz == 2 { + z[0] = 1 + } + return true + } + + bi := blas64.Implementation() + + eps := dlamchE + eps2 := eps * eps + safmin := dlamchS + safmax := 1 / safmin + ssfmax := math.Sqrt(safmax) / 3 + ssfmin := math.Sqrt(safmin) / eps2 + + // Compute the eigenvalues and eigenvectors of the tridiagonal matrix. + if icompz == 2 { + impl.Dlaset(blas.All, n, n, 0, 1, z, ldz) + } + const maxit = 30 + nmaxit := n * maxit + + jtot := 0 + + // Determine where the matrix splits and choose QL or QR iteration for each + // block, according to whether top or bottom diagonal element is smaller. + l1 := 0 + nm1 := n - 1 + + type scaletype int + const ( + down scaletype = iota + 1 + up + ) + var iscale scaletype + + for { + if l1 > n-1 { + // Order eigenvalues and eigenvectors. + if icompz == 0 { + impl.Dlasrt(lapack.SortIncreasing, n, d) + } else { + // TODO(btracey): Consider replacing this sort with a call to sort.Sort. + for ii := 1; ii < n; ii++ { + i := ii - 1 + k := i + p := d[i] + for j := ii; j < n; j++ { + if d[j] < p { + k = j + p = d[j] + } + } + if k != i { + d[k] = d[i] + d[i] = p + bi.Dswap(n, z[i:], ldz, z[k:], ldz) + } + } + } + return true + } + if l1 > 0 { + e[l1-1] = 0 + } + var m int + if l1 <= nm1 { + for m = l1; m < nm1; m++ { + test := math.Abs(e[m]) + if test == 0 { + break + } + if test <= (math.Sqrt(math.Abs(d[m]))*math.Sqrt(math.Abs(d[m+1])))*eps { + e[m] = 0 + break + } + } + } + l := l1 + lsv := l + lend := m + lendsv := lend + l1 = m + 1 + if lend == l { + continue + } + + // Scale submatrix in rows and columns L to Lend + anorm := impl.Dlanst(lapack.MaxAbs, lend-l+1, d[l:], e[l:]) + switch { + case anorm == 0: + continue + case anorm > ssfmax: + iscale = down + // Pretend that d and e are matrices with 1 column. + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmax, lend-l+1, 1, d[l:], 1) + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmax, lend-l, 1, e[l:], 1) + case anorm < ssfmin: + iscale = up + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmin, lend-l+1, 1, d[l:], 1) + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmin, lend-l, 1, e[l:], 1) + } + + // Choose between QL and QR. + if math.Abs(d[lend]) < math.Abs(d[l]) { + lend = lsv + l = lendsv + } + if lend > l { + // QL Iteration. Look for small subdiagonal element. + for { + if l != lend { + for m = l; m < lend; m++ { + v := math.Abs(e[m]) + if v*v <= (eps2*math.Abs(d[m]))*math.Abs(d[m+1])+safmin { + break + } + } + } else { + m = lend + } + if m < lend { + e[m] = 0 + } + p := d[l] + if m == l { + // Eigenvalue found. + l++ + if l > lend { + break + } + continue + } + + // If remaining matrix is 2×2, use Dlae2 to compute its eigensystem. + if m == l+1 { + if icompz > 0 { + d[l], d[l+1], work[l], work[n-1+l] = impl.Dlaev2(d[l], e[l], d[l+1]) + impl.Dlasr(blas.Right, lapack.Variable, lapack.Backward, + n, 2, work[l:], work[n-1+l:], z[l:], ldz) + } else { + d[l], d[l+1] = impl.Dlae2(d[l], e[l], d[l+1]) + } + e[l] = 0 + l += 2 + if l > lend { + break + } + continue + } + + if jtot == nmaxit { + break + } + jtot++ + + // Form shift + g := (d[l+1] - p) / (2 * e[l]) + r := impl.Dlapy2(g, 1) + g = d[m] - p + e[l]/(g+math.Copysign(r, g)) + s := 1.0 + c := 1.0 + p = 0.0 + + // Inner loop + for i := m - 1; i >= l; i-- { + f := s * e[i] + b := c * e[i] + c, s, r = impl.Dlartg(g, f) + if i != m-1 { + e[i+1] = r + } + g = d[i+1] - p + r = (d[i]-g)*s + 2*c*b + p = s * r + d[i+1] = g + p + g = c*r - b + + // If eigenvectors are desired, then save rotations. + if icompz > 0 { + work[i] = c + work[n-1+i] = -s + } + } + // If eigenvectors are desired, then apply saved rotations. + if icompz > 0 { + mm := m - l + 1 + impl.Dlasr(blas.Right, lapack.Variable, lapack.Backward, + n, mm, work[l:], work[n-1+l:], z[l:], ldz) + } + d[l] -= p + e[l] = g + } + } else { + // QR Iteration. + // Look for small superdiagonal element. + for { + if l != lend { + for m = l; m > lend; m-- { + v := math.Abs(e[m-1]) + if v*v <= (eps2*math.Abs(d[m])*math.Abs(d[m-1]) + safmin) { + break + } + } + } else { + m = lend + } + if m > lend { + e[m-1] = 0 + } + p := d[l] + if m == l { + // Eigenvalue found + l-- + if l < lend { + break + } + continue + } + + // If remaining matrix is 2×2, use Dlae2 to compute its eigenvalues. + if m == l-1 { + if icompz > 0 { + d[l-1], d[l], work[m], work[n-1+m] = impl.Dlaev2(d[l-1], e[l-1], d[l]) + impl.Dlasr(blas.Right, lapack.Variable, lapack.Forward, + n, 2, work[m:], work[n-1+m:], z[l-1:], ldz) + } else { + d[l-1], d[l] = impl.Dlae2(d[l-1], e[l-1], d[l]) + } + e[l-1] = 0 + l -= 2 + if l < lend { + break + } + continue + } + if jtot == nmaxit { + break + } + jtot++ + + // Form shift. + g := (d[l-1] - p) / (2 * e[l-1]) + r := impl.Dlapy2(g, 1) + g = d[m] - p + (e[l-1])/(g+math.Copysign(r, g)) + s := 1.0 + c := 1.0 + p = 0.0 + + // Inner loop. + for i := m; i < l; i++ { + f := s * e[i] + b := c * e[i] + c, s, r = impl.Dlartg(g, f) + if i != m { + e[i-1] = r + } + g = d[i] - p + r = (d[i+1]-g)*s + 2*c*b + p = s * r + d[i] = g + p + g = c*r - b + + // If eigenvectors are desired, then save rotations. + if icompz > 0 { + work[i] = c + work[n-1+i] = s + } + } + + // If eigenvectors are desired, then apply saved rotations. + if icompz > 0 { + mm := l - m + 1 + impl.Dlasr(blas.Right, lapack.Variable, lapack.Forward, + n, mm, work[m:], work[n-1+m:], z[m:], ldz) + } + d[l] -= p + e[l-1] = g + } + } + + // Undo scaling if necessary. + switch iscale { + case down: + // Pretend that d and e are matrices with 1 column. + impl.Dlascl(lapack.General, 0, 0, ssfmax, anorm, lendsv-lsv+1, 1, d[lsv:], 1) + impl.Dlascl(lapack.General, 0, 0, ssfmax, anorm, lendsv-lsv, 1, e[lsv:], 1) + case up: + impl.Dlascl(lapack.General, 0, 0, ssfmin, anorm, lendsv-lsv+1, 1, d[lsv:], 1) + impl.Dlascl(lapack.General, 0, 0, ssfmin, anorm, lendsv-lsv, 1, e[lsv:], 1) + } + + // Check for no convergence to an eigenvalue after a total of n*maxit iterations. + if jtot >= nmaxit { + break + } + } + for i := 0; i < n-1; i++ { + if e[i] != 0 { + return false + } + } + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dsterf.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dsterf.go new file mode 100644 index 0000000000..dc1e178dfa --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dsterf.go @@ -0,0 +1,285 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/lapack" +) + +// Dsterf computes all eigenvalues of a symmetric tridiagonal matrix using the +// Pal-Walker-Kahan variant of the QL or QR algorithm. +// +// d contains the diagonal elements of the tridiagonal matrix on entry, and +// contains the eigenvalues in ascending order on exit. d must have length at +// least n, or Dsterf will panic. +// +// e contains the off-diagonal elements of the tridiagonal matrix on entry, and is +// overwritten during the call to Dsterf. e must have length of at least n-1 or +// Dsterf will panic. +// +// Dsterf is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dsterf(n int, d, e []float64) (ok bool) { + if n < 0 { + panic(nLT0) + } + + // Quick return if possible. + if n == 0 { + return true + } + + switch { + case len(d) < n: + panic(shortD) + case len(e) < n-1: + panic(shortE) + } + + if n == 1 { + return true + } + + const ( + none = 0 // The values are not scaled. + down = 1 // The values are scaled below ssfmax threshold. + up = 2 // The values are scaled below ssfmin threshold. + ) + + // Determine the unit roundoff for this environment. + eps := dlamchE + eps2 := eps * eps + safmin := dlamchS + safmax := 1 / safmin + ssfmax := math.Sqrt(safmax) / 3 + ssfmin := math.Sqrt(safmin) / eps2 + + // Compute the eigenvalues of the tridiagonal matrix. + maxit := 30 + nmaxit := n * maxit + jtot := 0 + + l1 := 0 + + for { + if l1 > n-1 { + impl.Dlasrt(lapack.SortIncreasing, n, d) + return true + } + if l1 > 0 { + e[l1-1] = 0 + } + var m int + for m = l1; m < n-1; m++ { + if math.Abs(e[m]) <= math.Sqrt(math.Abs(d[m]))*math.Sqrt(math.Abs(d[m+1]))*eps { + e[m] = 0 + break + } + } + + l := l1 + lsv := l + lend := m + lendsv := lend + l1 = m + 1 + if lend == 0 { + continue + } + + // Scale submatrix in rows and columns l to lend. + anorm := impl.Dlanst(lapack.MaxAbs, lend-l+1, d[l:], e[l:]) + iscale := none + if anorm == 0 { + continue + } + if anorm > ssfmax { + iscale = down + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmax, lend-l+1, 1, d[l:], n) + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmax, lend-l, 1, e[l:], n) + } else if anorm < ssfmin { + iscale = up + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmin, lend-l+1, 1, d[l:], n) + impl.Dlascl(lapack.General, 0, 0, anorm, ssfmin, lend-l, 1, e[l:], n) + } + + el := e[l:lend] + for i, v := range el { + el[i] *= v + } + + // Choose between QL and QR iteration. + if math.Abs(d[lend]) < math.Abs(d[l]) { + lend = lsv + l = lendsv + } + if lend >= l { + // QL Iteration. + // Look for small sub-diagonal element. + for { + if l != lend { + for m = l; m < lend; m++ { + if math.Abs(e[m]) <= eps2*(math.Abs(d[m]*d[m+1])) { + break + } + } + } else { + m = lend + } + if m < lend { + e[m] = 0 + } + p := d[l] + if m == l { + // Eigenvalue found. + l++ + if l > lend { + break + } + continue + } + // If remaining matrix is 2 by 2, use Dlae2 to compute its eigenvalues. + if m == l+1 { + d[l], d[l+1] = impl.Dlae2(d[l], math.Sqrt(e[l]), d[l+1]) + e[l] = 0 + l += 2 + if l > lend { + break + } + continue + } + if jtot == nmaxit { + break + } + jtot++ + + // Form shift. + rte := math.Sqrt(e[l]) + sigma := (d[l+1] - p) / (2 * rte) + r := impl.Dlapy2(sigma, 1) + sigma = p - (rte / (sigma + math.Copysign(r, sigma))) + + c := 1.0 + s := 0.0 + gamma := d[m] - sigma + p = gamma * gamma + + // Inner loop. + for i := m - 1; i >= l; i-- { + bb := e[i] + r := p + bb + if i != m-1 { + e[i+1] = s * r + } + oldc := c + c = p / r + s = bb / r + oldgam := gamma + alpha := d[i] + gamma = c*(alpha-sigma) - s*oldgam + d[i+1] = oldgam + (alpha - gamma) + if c != 0 { + p = (gamma * gamma) / c + } else { + p = oldc * bb + } + } + e[l] = s * p + d[l] = sigma + gamma + } + } else { + for { + // QR Iteration. + // Look for small super-diagonal element. + for m = l; m > lend; m-- { + if math.Abs(e[m-1]) <= eps2*math.Abs(d[m]*d[m-1]) { + break + } + } + if m > lend { + e[m-1] = 0 + } + p := d[l] + if m == l { + // Eigenvalue found. + l-- + if l < lend { + break + } + continue + } + + // If remaining matrix is 2 by 2, use Dlae2 to compute its eigenvalues. + if m == l-1 { + d[l], d[l-1] = impl.Dlae2(d[l], math.Sqrt(e[l-1]), d[l-1]) + e[l-1] = 0 + l -= 2 + if l < lend { + break + } + continue + } + if jtot == nmaxit { + break + } + jtot++ + + // Form shift. + rte := math.Sqrt(e[l-1]) + sigma := (d[l-1] - p) / (2 * rte) + r := impl.Dlapy2(sigma, 1) + sigma = p - (rte / (sigma + math.Copysign(r, sigma))) + + c := 1.0 + s := 0.0 + gamma := d[m] - sigma + p = gamma * gamma + + // Inner loop. + for i := m; i < l; i++ { + bb := e[i] + r := p + bb + if i != m { + e[i-1] = s * r + } + oldc := c + c = p / r + s = bb / r + oldgam := gamma + alpha := d[i+1] + gamma = c*(alpha-sigma) - s*oldgam + d[i] = oldgam + alpha - gamma + if c != 0 { + p = (gamma * gamma) / c + } else { + p = oldc * bb + } + } + e[l-1] = s * p + d[l] = sigma + gamma + } + } + + // Undo scaling if necessary + switch iscale { + case down: + impl.Dlascl(lapack.General, 0, 0, ssfmax, anorm, lendsv-lsv+1, 1, d[lsv:], n) + case up: + impl.Dlascl(lapack.General, 0, 0, ssfmin, anorm, lendsv-lsv+1, 1, d[lsv:], n) + } + + // Check for no convergence to an eigenvalue after a total of n*maxit iterations. + if jtot >= nmaxit { + break + } + } + for _, v := range e[:n-1] { + if v != 0 { + return false + } + } + impl.Dlasrt(lapack.SortIncreasing, n, d) + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dsyev.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dsyev.go new file mode 100644 index 0000000000..5f57f3a5c9 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dsyev.go @@ -0,0 +1,130 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dsyev computes all eigenvalues and, optionally, the eigenvectors of a real +// symmetric matrix A. +// +// w contains the eigenvalues in ascending order upon return. w must have length +// at least n, and Dsyev will panic otherwise. +// +// On entry, a contains the elements of the symmetric matrix A in the triangular +// portion specified by uplo. If jobz == lapack.EVCompute, a contains the +// orthonormal eigenvectors of A on exit, otherwise jobz must be lapack.EVNone +// and on exit the specified triangular region is overwritten. +// +// work is temporary storage, and lwork specifies the usable memory length. At minimum, +// lwork >= 3*n-1, and Dsyev will panic otherwise. The amount of blocking is +// limited by the usable length. If lwork == -1, instead of computing Dsyev the +// optimal work length is stored into work[0]. +func (impl Implementation) Dsyev(jobz lapack.EVJob, uplo blas.Uplo, n int, a []float64, lda int, w, work []float64, lwork int) (ok bool) { + switch { + case jobz != lapack.EVNone && jobz != lapack.EVCompute: + panic(badEVJob) + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case lwork < max(1, 3*n-1) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if n == 0 { + return true + } + + var opts string + if uplo == blas.Upper { + opts = "U" + } else { + opts = "L" + } + nb := impl.Ilaenv(1, "DSYTRD", opts, n, -1, -1, -1) + lworkopt := max(1, (nb+2)*n) + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(w) < n: + panic(shortW) + } + + if n == 1 { + w[0] = a[0] + work[0] = 2 + if jobz == lapack.EVCompute { + a[0] = 1 + } + return true + } + + safmin := dlamchS + eps := dlamchP + smlnum := safmin / eps + bignum := 1 / smlnum + rmin := math.Sqrt(smlnum) + rmax := math.Sqrt(bignum) + + // Scale matrix to allowable range, if necessary. + anrm := impl.Dlansy(lapack.MaxAbs, uplo, n, a, lda, work) + scaled := false + var sigma float64 + if anrm > 0 && anrm < rmin { + scaled = true + sigma = rmin / anrm + } else if anrm > rmax { + scaled = true + sigma = rmax / anrm + } + if scaled { + kind := lapack.LowerTri + if uplo == blas.Upper { + kind = lapack.UpperTri + } + impl.Dlascl(kind, 0, 0, 1, sigma, n, n, a, lda) + } + var inde int + indtau := inde + n + indwork := indtau + n + llwork := lwork - indwork + impl.Dsytrd(uplo, n, a, lda, w, work[inde:], work[indtau:], work[indwork:], llwork) + + // For eigenvalues only, call Dsterf. For eigenvectors, first call Dorgtr + // to generate the orthogonal matrix, then call Dsteqr. + if jobz == lapack.EVNone { + ok = impl.Dsterf(n, w, work[inde:]) + } else { + impl.Dorgtr(uplo, n, a, lda, work[indtau:], work[indwork:], llwork) + ok = impl.Dsteqr(lapack.EVComp(jobz), n, w, work[inde:], a, lda, work[indtau:]) + } + if !ok { + return false + } + + // If the matrix was scaled, then rescale eigenvalues appropriately. + if scaled { + bi := blas64.Implementation() + bi.Dscal(n, 1/sigma, w, 1) + } + work[0] = float64(lworkopt) + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dsytd2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dsytd2.go new file mode 100644 index 0000000000..23cfd05773 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dsytd2.go @@ -0,0 +1,136 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dsytd2 reduces a symmetric n×n matrix A to symmetric tridiagonal form T by +// an orthogonal similarity transformation +// Q^T * A * Q = T +// On entry, the matrix is contained in the specified triangle of a. On exit, +// if uplo == blas.Upper, the diagonal and first super-diagonal of a are +// overwritten with the elements of T. The elements above the first super-diagonal +// are overwritten with the elementary reflectors that are used with +// the elements written to tau in order to construct Q. If uplo == blas.Lower, +// the elements are written in the lower triangular region. +// +// d must have length at least n. e and tau must have length at least n-1. Dsytd2 +// will panic if these sizes are not met. +// +// Q is represented as a product of elementary reflectors. +// If uplo == blas.Upper +// Q = H_{n-2} * ... * H_1 * H_0 +// and if uplo == blas.Lower +// Q = H_0 * H_1 * ... * H_{n-2} +// where +// H_i = I - tau * v * v^T +// where tau is stored in tau[i], and v is stored in a. +// +// If uplo == blas.Upper, v[0:i-1] is stored in A[0:i-1,i+1], v[i] = 1, and +// v[i+1:] = 0. The elements of a are +// [ d e v2 v3 v4] +// [ d e v3 v4] +// [ d e v4] +// [ d e] +// [ d] +// If uplo == blas.Lower, v[0:i+1] = 0, v[i+1] = 1, and v[i+2:] is stored in +// A[i+2:n,i]. +// The elements of a are +// [ d ] +// [ e d ] +// [v1 e d ] +// [v1 v2 e d ] +// [v1 v2 v3 e d] +// +// Dsytd2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dsytd2(uplo blas.Uplo, n int, a []float64, lda int, d, e, tau []float64) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + // Quick return if possible. + if n == 0 { + return + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(d) < n: + panic(shortD) + case len(e) < n-1: + panic(shortE) + case len(tau) < n-1: + panic(shortTau) + } + + bi := blas64.Implementation() + + if uplo == blas.Upper { + // Reduce the upper triangle of A. + for i := n - 2; i >= 0; i-- { + // Generate elementary reflector H_i = I - tau * v * v^T to + // annihilate A[i:i-1, i+1]. + var taui float64 + a[i*lda+i+1], taui = impl.Dlarfg(i+1, a[i*lda+i+1], a[i+1:], lda) + e[i] = a[i*lda+i+1] + if taui != 0 { + // Apply H_i from both sides to A[0:i,0:i]. + a[i*lda+i+1] = 1 + + // Compute x := tau * A * v storing x in tau[0:i]. + bi.Dsymv(uplo, i+1, taui, a, lda, a[i+1:], lda, 0, tau, 1) + + // Compute w := x - 1/2 * tau * (x^T * v) * v. + alpha := -0.5 * taui * bi.Ddot(i+1, tau, 1, a[i+1:], lda) + bi.Daxpy(i+1, alpha, a[i+1:], lda, tau, 1) + + // Apply the transformation as a rank-2 update + // A = A - v * w^T - w * v^T. + bi.Dsyr2(uplo, i+1, -1, a[i+1:], lda, tau, 1, a, lda) + a[i*lda+i+1] = e[i] + } + d[i+1] = a[(i+1)*lda+i+1] + tau[i] = taui + } + d[0] = a[0] + return + } + // Reduce the lower triangle of A. + for i := 0; i < n-1; i++ { + // Generate elementary reflector H_i = I - tau * v * v^T to + // annihilate A[i+2:n, i]. + var taui float64 + a[(i+1)*lda+i], taui = impl.Dlarfg(n-i-1, a[(i+1)*lda+i], a[min(i+2, n-1)*lda+i:], lda) + e[i] = a[(i+1)*lda+i] + if taui != 0 { + // Apply H_i from both sides to A[i+1:n, i+1:n]. + a[(i+1)*lda+i] = 1 + + // Compute x := tau * A * v, storing y in tau[i:n-1]. + bi.Dsymv(uplo, n-i-1, taui, a[(i+1)*lda+i+1:], lda, a[(i+1)*lda+i:], lda, 0, tau[i:], 1) + + // Compute w := x - 1/2 * tau * (x^T * v) * v. + alpha := -0.5 * taui * bi.Ddot(n-i-1, tau[i:], 1, a[(i+1)*lda+i:], lda) + bi.Daxpy(n-i-1, alpha, a[(i+1)*lda+i:], lda, tau[i:], 1) + + // Apply the transformation as a rank-2 update + // A = A - v * w^T - w * v^T. + bi.Dsyr2(uplo, n-i-1, -1, a[(i+1)*lda+i:], lda, tau[i:], 1, a[(i+1)*lda+i+1:], lda) + a[(i+1)*lda+i] = e[i] + } + d[i] = a[i*lda+i] + tau[i] = taui + } + d[n-1] = a[(n-1)*lda+n-1] +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dsytrd.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dsytrd.go new file mode 100644 index 0000000000..df47568e92 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dsytrd.go @@ -0,0 +1,172 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dsytrd reduces a symmetric n×n matrix A to symmetric tridiagonal form by an +// orthogonal similarity transformation +// Q^T * A * Q = T +// where Q is an orthonormal matrix and T is symmetric and tridiagonal. +// +// On entry, a contains the elements of the input matrix in the triangle specified +// by uplo. On exit, the diagonal and sub/super-diagonal are overwritten by the +// corresponding elements of the tridiagonal matrix T. The remaining elements in +// the triangle, along with the array tau, contain the data to construct Q as +// the product of elementary reflectors. +// +// If uplo == blas.Upper, Q is constructed with +// Q = H_{n-2} * ... * H_1 * H_0 +// where +// H_i = I - tau_i * v * v^T +// v is constructed as v[i+1:n] = 0, v[i] = 1, v[0:i-1] is stored in A[0:i-1, i+1]. +// The elements of A are +// [ d e v1 v2 v3] +// [ d e v2 v3] +// [ d e v3] +// [ d e] +// [ e] +// +// If uplo == blas.Lower, Q is constructed with +// Q = H_0 * H_1 * ... * H_{n-2} +// where +// H_i = I - tau_i * v * v^T +// v is constructed as v[0:i+1] = 0, v[i+1] = 1, v[i+2:n] is stored in A[i+2:n, i]. +// The elements of A are +// [ d ] +// [ e d ] +// [v0 e d ] +// [v0 v1 e d ] +// [v0 v1 v2 e d] +// +// d must have length n, and e and tau must have length n-1. Dsytrd will panic if +// these conditions are not met. +// +// work is temporary storage, and lwork specifies the usable memory length. At minimum, +// lwork >= 1, and Dsytrd will panic otherwise. The amount of blocking is +// limited by the usable length. +// If lwork == -1, instead of computing Dsytrd the optimal work length is stored +// into work[0]. +// +// Dsytrd is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dsytrd(uplo blas.Uplo, n int, a []float64, lda int, d, e, tau, work []float64, lwork int) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + case lwork < 1 && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if n == 0 { + work[0] = 1 + return + } + + nb := impl.Ilaenv(1, "DSYTRD", string(uplo), n, -1, -1, -1) + lworkopt := n * nb + if lwork == -1 { + work[0] = float64(lworkopt) + return + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(d) < n: + panic(shortD) + case len(e) < n-1: + panic(shortE) + case len(tau) < n-1: + panic(shortTau) + } + + bi := blas64.Implementation() + + nx := n + iws := 1 + var ldwork int + if 1 < nb && nb < n { + // Determine when to cross over from blocked to unblocked code. The last + // block is always handled by unblocked code. + nx = max(nb, impl.Ilaenv(3, "DSYTRD", string(uplo), n, -1, -1, -1)) + if nx < n { + // Determine if workspace is large enough for blocked code. + ldwork = nb + iws = n * ldwork + if lwork < iws { + // Not enough workspace to use optimal nb: determine the minimum + // value of nb and reduce nb or force use of unblocked code by + // setting nx = n. + nb = max(lwork/n, 1) + nbmin := impl.Ilaenv(2, "DSYTRD", string(uplo), n, -1, -1, -1) + if nb < nbmin { + nx = n + } + } + } else { + nx = n + } + } else { + nb = 1 + } + ldwork = nb + + if uplo == blas.Upper { + // Reduce the upper triangle of A. Columns 0:kk are handled by the + // unblocked method. + var i int + kk := n - ((n-nx+nb-1)/nb)*nb + for i = n - nb; i >= kk; i -= nb { + // Reduce columns i:i+nb to tridiagonal form and form the matrix W + // which is needed to update the unreduced part of the matrix. + impl.Dlatrd(uplo, i+nb, nb, a, lda, e, tau, work, ldwork) + + // Update the unreduced submatrix A[0:i-1,0:i-1], using an update + // of the form A = A - V*W^T - W*V^T. + bi.Dsyr2k(uplo, blas.NoTrans, i, nb, -1, a[i:], lda, work, ldwork, 1, a, lda) + + // Copy superdiagonal elements back into A, and diagonal elements into D. + for j := i; j < i+nb; j++ { + a[(j-1)*lda+j] = e[j-1] + d[j] = a[j*lda+j] + } + } + // Use unblocked code to reduce the last or only block + // check that i == kk. + impl.Dsytd2(uplo, kk, a, lda, d, e, tau) + } else { + var i int + // Reduce the lower triangle of A. + for i = 0; i < n-nx; i += nb { + // Reduce columns 0:i+nb to tridiagonal form and form the matrix W + // which is needed to update the unreduced part of the matrix. + impl.Dlatrd(uplo, n-i, nb, a[i*lda+i:], lda, e[i:], tau[i:], work, ldwork) + + // Update the unreduced submatrix A[i+ib:n, i+ib:n], using an update + // of the form A = A + V*W^T - W*V^T. + bi.Dsyr2k(uplo, blas.NoTrans, n-i-nb, nb, -1, a[(i+nb)*lda+i:], lda, + work[nb*ldwork:], ldwork, 1, a[(i+nb)*lda+i+nb:], lda) + + // Copy subdiagonal elements back into A, and diagonal elements into D. + for j := i; j < i+nb; j++ { + a[(j+1)*lda+j] = e[j] + d[j] = a[j*lda+j] + } + } + // Use unblocked code to reduce the last or only block. + impl.Dsytd2(uplo, n-i, a[i*lda+i:], lda, d[i:], e[i:], tau[i:]) + } + work[0] = float64(iws) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtgsja.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtgsja.go new file mode 100644 index 0000000000..d70d196234 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dtgsja.go @@ -0,0 +1,373 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dtgsja computes the generalized singular value decomposition (GSVD) +// of two real upper triangular or trapezoidal matrices A and B. +// +// A and B have the following forms, which may be obtained by the +// preprocessing subroutine Dggsvp from a general m×n matrix A and p×n +// matrix B: +// +// n-k-l k l +// A = k [ 0 A12 A13 ] if m-k-l >= 0; +// l [ 0 0 A23 ] +// m-k-l [ 0 0 0 ] +// +// n-k-l k l +// A = k [ 0 A12 A13 ] if m-k-l < 0; +// m-k [ 0 0 A23 ] +// +// n-k-l k l +// B = l [ 0 0 B13 ] +// p-l [ 0 0 0 ] +// +// where the k×k matrix A12 and l×l matrix B13 are non-singular +// upper triangular. A23 is l×l upper triangular if m-k-l >= 0, +// otherwise A23 is (m-k)×l upper trapezoidal. +// +// On exit, +// +// U^T*A*Q = D1*[ 0 R ], V^T*B*Q = D2*[ 0 R ], +// +// where U, V and Q are orthogonal matrices. +// R is a non-singular upper triangular matrix, and D1 and D2 are +// diagonal matrices, which are of the following structures: +// +// If m-k-l >= 0, +// +// k l +// D1 = k [ I 0 ] +// l [ 0 C ] +// m-k-l [ 0 0 ] +// +// k l +// D2 = l [ 0 S ] +// p-l [ 0 0 ] +// +// n-k-l k l +// [ 0 R ] = k [ 0 R11 R12 ] k +// l [ 0 0 R22 ] l +// +// where +// +// C = diag( alpha_k, ... , alpha_{k+l} ), +// S = diag( beta_k, ... , beta_{k+l} ), +// C^2 + S^2 = I. +// +// R is stored in +// A[0:k+l, n-k-l:n] +// on exit. +// +// If m-k-l < 0, +// +// k m-k k+l-m +// D1 = k [ I 0 0 ] +// m-k [ 0 C 0 ] +// +// k m-k k+l-m +// D2 = m-k [ 0 S 0 ] +// k+l-m [ 0 0 I ] +// p-l [ 0 0 0 ] +// +// n-k-l k m-k k+l-m +// [ 0 R ] = k [ 0 R11 R12 R13 ] +// m-k [ 0 0 R22 R23 ] +// k+l-m [ 0 0 0 R33 ] +// +// where +// C = diag( alpha_k, ... , alpha_m ), +// S = diag( beta_k, ... , beta_m ), +// C^2 + S^2 = I. +// +// R = [ R11 R12 R13 ] is stored in A[0:m, n-k-l:n] +// [ 0 R22 R23 ] +// and R33 is stored in +// B[m-k:l, n+m-k-l:n] on exit. +// +// The computation of the orthogonal transformation matrices U, V or Q +// is optional. These matrices may either be formed explicitly, or they +// may be post-multiplied into input matrices U1, V1, or Q1. +// +// Dtgsja essentially uses a variant of Kogbetliantz algorithm to reduce +// min(l,m-k)×l triangular or trapezoidal matrix A23 and l×l +// matrix B13 to the form: +// +// U1^T*A13*Q1 = C1*R1; V1^T*B13*Q1 = S1*R1, +// +// where U1, V1 and Q1 are orthogonal matrices. C1 and S1 are diagonal +// matrices satisfying +// +// C1^2 + S1^2 = I, +// +// and R1 is an l×l non-singular upper triangular matrix. +// +// jobU, jobV and jobQ are options for computing the orthogonal matrices. The behavior +// is as follows +// jobU == lapack.GSVDU Compute orthogonal matrix U +// jobU == lapack.GSVDUnit Use unit-initialized matrix +// jobU == lapack.GSVDNone Do not compute orthogonal matrix. +// The behavior is the same for jobV and jobQ with the exception that instead of +// lapack.GSVDU these accept lapack.GSVDV and lapack.GSVDQ respectively. +// The matrices U, V and Q must be m×m, p×p and n×n respectively unless the +// relevant job parameter is lapack.GSVDNone. +// +// k and l specify the sub-blocks in the input matrices A and B: +// A23 = A[k:min(k+l,m), n-l:n) and B13 = B[0:l, n-l:n] +// of A and B, whose GSVD is going to be computed by Dtgsja. +// +// tola and tolb are the convergence criteria for the Jacobi-Kogbetliantz +// iteration procedure. Generally, they are the same as used in the preprocessing +// step, for example, +// tola = max(m, n)*norm(A)*eps, +// tolb = max(p, n)*norm(B)*eps, +// where eps is the machine epsilon. +// +// work must have length at least 2*n, otherwise Dtgsja will panic. +// +// alpha and beta must have length n or Dtgsja will panic. On exit, alpha and +// beta contain the generalized singular value pairs of A and B +// alpha[0:k] = 1, +// beta[0:k] = 0, +// if m-k-l >= 0, +// alpha[k:k+l] = diag(C), +// beta[k:k+l] = diag(S), +// if m-k-l < 0, +// alpha[k:m]= C, alpha[m:k+l]= 0 +// beta[k:m] = S, beta[m:k+l] = 1. +// if k+l < n, +// alpha[k+l:n] = 0 and +// beta[k+l:n] = 0. +// +// On exit, A[n-k:n, 0:min(k+l,m)] contains the triangular matrix R or part of R +// and if necessary, B[m-k:l, n+m-k-l:n] contains a part of R. +// +// Dtgsja returns whether the routine converged and the number of iteration cycles +// that were run. +// +// Dtgsja is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dtgsja(jobU, jobV, jobQ lapack.GSVDJob, m, p, n, k, l int, a []float64, lda int, b []float64, ldb int, tola, tolb float64, alpha, beta, u []float64, ldu int, v []float64, ldv int, q []float64, ldq int, work []float64) (cycles int, ok bool) { + const maxit = 40 + + initu := jobU == lapack.GSVDUnit + wantu := initu || jobU == lapack.GSVDU + + initv := jobV == lapack.GSVDUnit + wantv := initv || jobV == lapack.GSVDV + + initq := jobQ == lapack.GSVDUnit + wantq := initq || jobQ == lapack.GSVDQ + + switch { + case !initu && !wantu && jobU != lapack.GSVDNone: + panic(badGSVDJob + "U") + case !initv && !wantv && jobV != lapack.GSVDNone: + panic(badGSVDJob + "V") + case !initq && !wantq && jobQ != lapack.GSVDNone: + panic(badGSVDJob + "Q") + case m < 0: + panic(mLT0) + case p < 0: + panic(pLT0) + case n < 0: + panic(nLT0) + + case lda < max(1, n): + panic(badLdA) + case len(a) < (m-1)*lda+n: + panic(shortA) + + case ldb < max(1, n): + panic(badLdB) + case len(b) < (p-1)*ldb+n: + panic(shortB) + + case len(alpha) != n: + panic(badLenAlpha) + case len(beta) != n: + panic(badLenBeta) + + case ldu < 1, wantu && ldu < m: + panic(badLdU) + case wantu && len(u) < (m-1)*ldu+m: + panic(shortU) + + case ldv < 1, wantv && ldv < p: + panic(badLdV) + case wantv && len(v) < (p-1)*ldv+p: + panic(shortV) + + case ldq < 1, wantq && ldq < n: + panic(badLdQ) + case wantq && len(q) < (n-1)*ldq+n: + panic(shortQ) + + case len(work) < 2*n: + panic(shortWork) + } + + // Initialize U, V and Q, if necessary + if initu { + impl.Dlaset(blas.All, m, m, 0, 1, u, ldu) + } + if initv { + impl.Dlaset(blas.All, p, p, 0, 1, v, ldv) + } + if initq { + impl.Dlaset(blas.All, n, n, 0, 1, q, ldq) + } + + bi := blas64.Implementation() + minTol := math.Min(tola, tolb) + + // Loop until convergence. + upper := false + for cycles = 1; cycles <= maxit; cycles++ { + upper = !upper + + for i := 0; i < l-1; i++ { + for j := i + 1; j < l; j++ { + var a1, a2, a3 float64 + if k+i < m { + a1 = a[(k+i)*lda+n-l+i] + } + if k+j < m { + a3 = a[(k+j)*lda+n-l+j] + } + + b1 := b[i*ldb+n-l+i] + b3 := b[j*ldb+n-l+j] + + var b2 float64 + if upper { + if k+i < m { + a2 = a[(k+i)*lda+n-l+j] + } + b2 = b[i*ldb+n-l+j] + } else { + if k+j < m { + a2 = a[(k+j)*lda+n-l+i] + } + b2 = b[j*ldb+n-l+i] + } + + csu, snu, csv, snv, csq, snq := impl.Dlags2(upper, a1, a2, a3, b1, b2, b3) + + // Update (k+i)-th and (k+j)-th rows of matrix A: U^T*A. + if k+j < m { + bi.Drot(l, a[(k+j)*lda+n-l:], 1, a[(k+i)*lda+n-l:], 1, csu, snu) + } + + // Update i-th and j-th rows of matrix B: V^T*B. + bi.Drot(l, b[j*ldb+n-l:], 1, b[i*ldb+n-l:], 1, csv, snv) + + // Update (n-l+i)-th and (n-l+j)-th columns of matrices + // A and B: A*Q and B*Q. + bi.Drot(min(k+l, m), a[n-l+j:], lda, a[n-l+i:], lda, csq, snq) + bi.Drot(l, b[n-l+j:], ldb, b[n-l+i:], ldb, csq, snq) + + if upper { + if k+i < m { + a[(k+i)*lda+n-l+j] = 0 + } + b[i*ldb+n-l+j] = 0 + } else { + if k+j < m { + a[(k+j)*lda+n-l+i] = 0 + } + b[j*ldb+n-l+i] = 0 + } + + // Update orthogonal matrices U, V, Q, if desired. + if wantu && k+j < m { + bi.Drot(m, u[k+j:], ldu, u[k+i:], ldu, csu, snu) + } + if wantv { + bi.Drot(p, v[j:], ldv, v[i:], ldv, csv, snv) + } + if wantq { + bi.Drot(n, q[n-l+j:], ldq, q[n-l+i:], ldq, csq, snq) + } + } + } + + if !upper { + // The matrices A13 and B13 were lower triangular at the start + // of the cycle, and are now upper triangular. + // + // Convergence test: test the parallelism of the corresponding + // rows of A and B. + var error float64 + for i := 0; i < min(l, m-k); i++ { + bi.Dcopy(l-i, a[(k+i)*lda+n-l+i:], 1, work, 1) + bi.Dcopy(l-i, b[i*ldb+n-l+i:], 1, work[l:], 1) + ssmin := impl.Dlapll(l-i, work, 1, work[l:], 1) + error = math.Max(error, ssmin) + } + if math.Abs(error) <= minTol { + // The algorithm has converged. + // Compute the generalized singular value pairs (alpha, beta) + // and set the triangular matrix R to array A. + for i := 0; i < k; i++ { + alpha[i] = 1 + beta[i] = 0 + } + + for i := 0; i < min(l, m-k); i++ { + a1 := a[(k+i)*lda+n-l+i] + b1 := b[i*ldb+n-l+i] + + if a1 != 0 { + gamma := b1 / a1 + + // Change sign if necessary. + if gamma < 0 { + bi.Dscal(l-i, -1, b[i*ldb+n-l+i:], 1) + if wantv { + bi.Dscal(p, -1, v[i:], ldv) + } + } + beta[k+i], alpha[k+i], _ = impl.Dlartg(math.Abs(gamma), 1) + + if alpha[k+i] >= beta[k+i] { + bi.Dscal(l-i, 1/alpha[k+i], a[(k+i)*lda+n-l+i:], 1) + } else { + bi.Dscal(l-i, 1/beta[k+i], b[i*ldb+n-l+i:], 1) + bi.Dcopy(l-i, b[i*ldb+n-l+i:], 1, a[(k+i)*lda+n-l+i:], 1) + } + } else { + alpha[k+i] = 0 + beta[k+i] = 1 + bi.Dcopy(l-i, b[i*ldb+n-l+i:], 1, a[(k+i)*lda+n-l+i:], 1) + } + } + + for i := m; i < k+l; i++ { + alpha[i] = 0 + beta[i] = 1 + } + if k+l < n { + for i := k + l; i < n; i++ { + alpha[i] = 0 + beta[i] = 0 + } + } + + return cycles, true + } + } + } + + // The algorithm has not converged after maxit cycles. + return cycles, false +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrcon.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrcon.go new file mode 100644 index 0000000000..899c95dd58 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrcon.go @@ -0,0 +1,90 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dtrcon estimates the reciprocal of the condition number of a triangular matrix A. +// The condition number computed may be based on the 1-norm or the ∞-norm. +// +// work is a temporary data slice of length at least 3*n and Dtrcon will panic otherwise. +// +// iwork is a temporary data slice of length at least n and Dtrcon will panic otherwise. +func (impl Implementation) Dtrcon(norm lapack.MatrixNorm, uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int, work []float64, iwork []int) float64 { + switch { + case norm != lapack.MaxColumnSum && norm != lapack.MaxRowSum: + panic(badNorm) + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case diag != blas.NonUnit && diag != blas.Unit: + panic(badDiag) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + if n == 0 { + return 1 + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(work) < 3*n: + panic(shortWork) + case len(iwork) < n: + panic(shortIWork) + } + + bi := blas64.Implementation() + + var rcond float64 + smlnum := dlamchS * float64(n) + + anorm := impl.Dlantr(norm, uplo, diag, n, n, a, lda, work) + + if anorm <= 0 { + return rcond + } + var ainvnm float64 + var normin bool + kase1 := 2 + if norm == lapack.MaxColumnSum { + kase1 = 1 + } + var kase int + isave := new([3]int) + var scale float64 + for { + ainvnm, kase = impl.Dlacn2(n, work[n:], work, iwork, ainvnm, kase, isave) + if kase == 0 { + if ainvnm != 0 { + rcond = (1 / anorm) / ainvnm + } + return rcond + } + if kase == kase1 { + scale = impl.Dlatrs(uplo, blas.NoTrans, diag, normin, n, a, lda, work, work[2*n:]) + } else { + scale = impl.Dlatrs(uplo, blas.Trans, diag, normin, n, a, lda, work, work[2*n:]) + } + normin = true + if scale != 1 { + ix := bi.Idamax(n, work, 1) + xnorm := math.Abs(work[ix]) + if scale == 0 || scale < xnorm*smlnum { + return rcond + } + impl.Drscl(n, scale, work, 1) + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrevc3.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrevc3.go new file mode 100644 index 0000000000..17121b8dbf --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrevc3.go @@ -0,0 +1,885 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" +) + +// Dtrevc3 computes some or all of the right and/or left eigenvectors of an n×n +// upper quasi-triangular matrix T in Schur canonical form. Matrices of this +// type are produced by the Schur factorization of a real general matrix A +// A = Q T Q^T, +// as computed by Dhseqr. +// +// The right eigenvector x of T corresponding to an +// eigenvalue λ is defined by +// T x = λ x, +// and the left eigenvector y is defined by +// y^T T = λ y^T. +// +// The eigenvalues are read directly from the diagonal blocks of T. +// +// This routine returns the matrices X and/or Y of right and left eigenvectors +// of T, or the products Q*X and/or Q*Y, where Q is an input matrix. If Q is the +// orthogonal factor that reduces a matrix A to Schur form T, then Q*X and Q*Y +// are the matrices of right and left eigenvectors of A. +// +// If side == lapack.EVRight, only right eigenvectors will be computed. +// If side == lapack.EVLeft, only left eigenvectors will be computed. +// If side == lapack.EVBoth, both right and left eigenvectors will be computed. +// For other values of side, Dtrevc3 will panic. +// +// If howmny == lapack.EVAll, all right and/or left eigenvectors will be +// computed. +// If howmny == lapack.EVAllMulQ, all right and/or left eigenvectors will be +// computed and multiplied from left by the matrices in VR and/or VL. +// If howmny == lapack.EVSelected, right and/or left eigenvectors will be +// computed as indicated by selected. +// For other values of howmny, Dtrevc3 will panic. +// +// selected specifies which eigenvectors will be computed. It must have length n +// if howmny == lapack.EVSelected, and it is not referenced otherwise. +// If w_j is a real eigenvalue, the corresponding real eigenvector will be +// computed if selected[j] is true. +// If w_j and w_{j+1} are the real and imaginary parts of a complex eigenvalue, +// the corresponding complex eigenvector is computed if either selected[j] or +// selected[j+1] is true, and on return selected[j] will be set to true and +// selected[j+1] will be set to false. +// +// VL and VR are n×mm matrices. If howmny is lapack.EVAll or +// lapack.AllEVMulQ, mm must be at least n. If howmny is +// lapack.EVSelected, mm must be large enough to store the selected +// eigenvectors. Each selected real eigenvector occupies one column and each +// selected complex eigenvector occupies two columns. If mm is not sufficiently +// large, Dtrevc3 will panic. +// +// On entry, if howmny is lapack.EVAllMulQ, it is assumed that VL (if side +// is lapack.EVLeft or lapack.EVBoth) contains an n×n matrix QL, +// and that VR (if side is lapack.EVLeft or lapack.EVBoth) contains +// an n×n matrix QR. QL and QR are typically the orthogonal matrix Q of Schur +// vectors returned by Dhseqr. +// +// On return, if side is lapack.EVLeft or lapack.EVBoth, +// VL will contain: +// if howmny == lapack.EVAll, the matrix Y of left eigenvectors of T, +// if howmny == lapack.EVAllMulQ, the matrix Q*Y, +// if howmny == lapack.EVSelected, the left eigenvectors of T specified by +// selected, stored consecutively in the +// columns of VL, in the same order as their +// eigenvalues. +// VL is not referenced if side == lapack.EVRight. +// +// On return, if side is lapack.EVRight or lapack.EVBoth, +// VR will contain: +// if howmny == lapack.EVAll, the matrix X of right eigenvectors of T, +// if howmny == lapack.EVAllMulQ, the matrix Q*X, +// if howmny == lapack.EVSelected, the left eigenvectors of T specified by +// selected, stored consecutively in the +// columns of VR, in the same order as their +// eigenvalues. +// VR is not referenced if side == lapack.EVLeft. +// +// Complex eigenvectors corresponding to a complex eigenvalue are stored in VL +// and VR in two consecutive columns, the first holding the real part, and the +// second the imaginary part. +// +// Each eigenvector will be normalized so that the element of largest magnitude +// has magnitude 1. Here the magnitude of a complex number (x,y) is taken to be +// |x| + |y|. +// +// work must have length at least lwork and lwork must be at least max(1,3*n), +// otherwise Dtrevc3 will panic. For optimum performance, lwork should be at +// least n+2*n*nb, where nb is the optimal blocksize. +// +// If lwork == -1, instead of performing Dtrevc3, the function only estimates +// the optimal workspace size based on n and stores it into work[0]. +// +// Dtrevc3 returns the number of columns in VL and/or VR actually used to store +// the eigenvectors. +// +// Dtrevc3 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dtrevc3(side lapack.EVSide, howmny lapack.EVHowMany, selected []bool, n int, t []float64, ldt int, vl []float64, ldvl int, vr []float64, ldvr int, mm int, work []float64, lwork int) (m int) { + bothv := side == lapack.EVBoth + rightv := side == lapack.EVRight || bothv + leftv := side == lapack.EVLeft || bothv + switch { + case !rightv && !leftv: + panic(badEVSide) + case howmny != lapack.EVAll && howmny != lapack.EVAllMulQ && howmny != lapack.EVSelected: + panic(badEVHowMany) + case n < 0: + panic(nLT0) + case ldt < max(1, n): + panic(badLdT) + case mm < 0: + panic(mmLT0) + case ldvl < 1: + // ldvl and ldvr are also checked below after the computation of + // m (number of columns of VL and VR) in case of howmny == EVSelected. + panic(badLdVL) + case ldvr < 1: + panic(badLdVR) + case lwork < max(1, 3*n) && lwork != -1: + panic(badLWork) + case len(work) < max(1, lwork): + panic(shortWork) + } + + // Quick return if possible. + if n == 0 { + work[0] = 1 + return 0 + } + + // Normally we don't check slice lengths until after the workspace + // query. However, even in case of the workspace query we need to + // compute and return the value of m, and since the computation accesses t, + // we put the length check of t here. + if len(t) < (n-1)*ldt+n { + panic(shortT) + } + + if howmny == lapack.EVSelected { + if len(selected) != n { + panic(badLenSelected) + } + // Set m to the number of columns required to store the selected + // eigenvectors, and standardize the slice selected. + // Each selected real eigenvector occupies one column and each + // selected complex eigenvector occupies two columns. + for j := 0; j < n; { + if j == n-1 || t[(j+1)*ldt+j] == 0 { + // Diagonal 1×1 block corresponding to a + // real eigenvalue. + if selected[j] { + m++ + } + j++ + } else { + // Diagonal 2×2 block corresponding to a + // complex eigenvalue. + if selected[j] || selected[j+1] { + selected[j] = true + selected[j+1] = false + m += 2 + } + j += 2 + } + } + } else { + m = n + } + if mm < m { + panic(badMm) + } + + // Quick return in case of a workspace query. + nb := impl.Ilaenv(1, "DTREVC", string(side)+string(howmny), n, -1, -1, -1) + if lwork == -1 { + work[0] = float64(n + 2*n*nb) + return m + } + + // Quick return if no eigenvectors were selected. + if m == 0 { + return 0 + } + + switch { + case leftv && ldvl < mm: + panic(badLdVL) + case leftv && len(vl) < (n-1)*ldvl+mm: + panic(shortVL) + + case rightv && ldvr < mm: + panic(badLdVR) + case rightv && len(vr) < (n-1)*ldvr+mm: + panic(shortVR) + } + + // Use blocked version of back-transformation if sufficient workspace. + // Zero-out the workspace to avoid potential NaN propagation. + const ( + nbmin = 8 + nbmax = 128 + ) + if howmny == lapack.EVAllMulQ && lwork >= n+2*n*nbmin { + nb = min((lwork-n)/(2*n), nbmax) + impl.Dlaset(blas.All, n, 1+2*nb, 0, 0, work[:n+2*nb*n], 1+2*nb) + } else { + nb = 1 + } + + // Set the constants to control overflow. + ulp := dlamchP + smlnum := float64(n) / ulp * dlamchS + bignum := (1 - ulp) / smlnum + + // Split work into a vector of column norms and an n×2*nb matrix b. + norms := work[:n] + ldb := 2 * nb + b := work[n : n+n*ldb] + + // Compute 1-norm of each column of strictly upper triangular part of T + // to control overflow in triangular solver. + norms[0] = 0 + for j := 1; j < n; j++ { + var cn float64 + for i := 0; i < j; i++ { + cn += math.Abs(t[i*ldt+j]) + } + norms[j] = cn + } + + bi := blas64.Implementation() + + var ( + x [4]float64 + + iv int // Index of column in current block. + is int + + // ip is used below to specify the real or complex eigenvalue: + // ip == 0, real eigenvalue, + // 1, first of conjugate complex pair (wr,wi), + // -1, second of conjugate complex pair (wr,wi). + ip int + iscomplex [nbmax]int // Stores ip for each column in current block. + ) + + if side == lapack.EVLeft { + goto leftev + } + + // Compute right eigenvectors. + + // For complex right vector, iv-1 is for real part and iv for complex + // part. Non-blocked version always uses iv=1, blocked version starts + // with iv=nb-1 and goes down to 0 or 1. + iv = max(2, nb) - 1 + ip = 0 + is = m - 1 + for ki := n - 1; ki >= 0; ki-- { + if ip == -1 { + // Previous iteration (ki+1) was second of + // conjugate pair, so this ki is first of + // conjugate pair. + ip = 1 + continue + } + + if ki == 0 || t[ki*ldt+ki-1] == 0 { + // Last column or zero on sub-diagonal, so this + // ki must be real eigenvalue. + ip = 0 + } else { + // Non-zero on sub-diagonal, so this ki is + // second of conjugate pair. + ip = -1 + } + + if howmny == lapack.EVSelected { + if ip == 0 { + if !selected[ki] { + continue + } + } else if !selected[ki-1] { + continue + } + } + + // Compute the ki-th eigenvalue (wr,wi). + wr := t[ki*ldt+ki] + var wi float64 + if ip != 0 { + wi = math.Sqrt(math.Abs(t[ki*ldt+ki-1])) * math.Sqrt(math.Abs(t[(ki-1)*ldt+ki])) + } + smin := math.Max(ulp*(math.Abs(wr)+math.Abs(wi)), smlnum) + + if ip == 0 { + // Real right eigenvector. + + b[ki*ldb+iv] = 1 + // Form right-hand side. + for k := 0; k < ki; k++ { + b[k*ldb+iv] = -t[k*ldt+ki] + } + // Solve upper quasi-triangular system: + // [ T[0:ki,0:ki] - wr ]*X = scale*b. + for j := ki - 1; j >= 0; { + if j == 0 || t[j*ldt+j-1] == 0 { + // 1×1 diagonal block. + scale, xnorm, _ := impl.Dlaln2(false, 1, 1, smin, 1, t[j*ldt+j:], ldt, + 1, 1, b[j*ldb+iv:], ldb, wr, 0, x[:1], 2) + // Scale X[0,0] to avoid overflow when updating the + // right-hand side. + if xnorm > 1 && norms[j] > bignum/xnorm { + x[0] /= xnorm + scale /= xnorm + } + // Scale if necessary. + if scale != 1 { + bi.Dscal(ki+1, scale, b[iv:], ldb) + } + b[j*ldb+iv] = x[0] + // Update right-hand side. + bi.Daxpy(j, -x[0], t[j:], ldt, b[iv:], ldb) + j-- + } else { + // 2×2 diagonal block. + scale, xnorm, _ := impl.Dlaln2(false, 2, 1, smin, 1, t[(j-1)*ldt+j-1:], ldt, + 1, 1, b[(j-1)*ldb+iv:], ldb, wr, 0, x[:3], 2) + // Scale X[0,0] and X[1,0] to avoid overflow + // when updating the right-hand side. + if xnorm > 1 { + beta := math.Max(norms[j-1], norms[j]) + if beta > bignum/xnorm { + x[0] /= xnorm + x[2] /= xnorm + scale /= xnorm + } + } + // Scale if necessary. + if scale != 1 { + bi.Dscal(ki+1, scale, b[iv:], ldb) + } + b[(j-1)*ldb+iv] = x[0] + b[j*ldb+iv] = x[2] + // Update right-hand side. + bi.Daxpy(j-1, -x[0], t[j-1:], ldt, b[iv:], ldb) + bi.Daxpy(j-1, -x[2], t[j:], ldt, b[iv:], ldb) + j -= 2 + } + } + // Copy the vector x or Q*x to VR and normalize. + switch { + case howmny != lapack.EVAllMulQ: + // No back-transform: copy x to VR and normalize. + bi.Dcopy(ki+1, b[iv:], ldb, vr[is:], ldvr) + ii := bi.Idamax(ki+1, vr[is:], ldvr) + remax := 1 / math.Abs(vr[ii*ldvr+is]) + bi.Dscal(ki+1, remax, vr[is:], ldvr) + for k := ki + 1; k < n; k++ { + vr[k*ldvr+is] = 0 + } + case nb == 1: + // Version 1: back-transform each vector with GEMV, Q*x. + if ki > 0 { + bi.Dgemv(blas.NoTrans, n, ki, 1, vr, ldvr, b[iv:], ldb, + b[ki*ldb+iv], vr[ki:], ldvr) + } + ii := bi.Idamax(n, vr[ki:], ldvr) + remax := 1 / math.Abs(vr[ii*ldvr+ki]) + bi.Dscal(n, remax, vr[ki:], ldvr) + default: + // Version 2: back-transform block of vectors with GEMM. + // Zero out below vector. + for k := ki + 1; k < n; k++ { + b[k*ldb+iv] = 0 + } + iscomplex[iv] = ip + // Back-transform and normalization is done below. + } + } else { + // Complex right eigenvector. + + // Initial solve + // [ ( T[ki-1,ki-1] T[ki-1,ki] ) - (wr + i*wi) ]*X = 0. + // [ ( T[ki, ki-1] T[ki, ki] ) ] + if math.Abs(t[(ki-1)*ldt+ki]) >= math.Abs(t[ki*ldt+ki-1]) { + b[(ki-1)*ldb+iv-1] = 1 + b[ki*ldb+iv] = wi / t[(ki-1)*ldt+ki] + } else { + b[(ki-1)*ldb+iv-1] = -wi / t[ki*ldt+ki-1] + b[ki*ldb+iv] = 1 + } + b[ki*ldb+iv-1] = 0 + b[(ki-1)*ldb+iv] = 0 + // Form right-hand side. + for k := 0; k < ki-1; k++ { + b[k*ldb+iv-1] = -b[(ki-1)*ldb+iv-1] * t[k*ldt+ki-1] + b[k*ldb+iv] = -b[ki*ldb+iv] * t[k*ldt+ki] + } + // Solve upper quasi-triangular system: + // [ T[0:ki-1,0:ki-1] - (wr+i*wi) ]*X = scale*(b1+i*b2) + for j := ki - 2; j >= 0; { + if j == 0 || t[j*ldt+j-1] == 0 { + // 1×1 diagonal block. + + scale, xnorm, _ := impl.Dlaln2(false, 1, 2, smin, 1, t[j*ldt+j:], ldt, + 1, 1, b[j*ldb+iv-1:], ldb, wr, wi, x[:2], 2) + // Scale X[0,0] and X[0,1] to avoid + // overflow when updating the right-hand side. + if xnorm > 1 && norms[j] > bignum/xnorm { + x[0] /= xnorm + x[1] /= xnorm + scale /= xnorm + } + // Scale if necessary. + if scale != 1 { + bi.Dscal(ki+1, scale, b[iv-1:], ldb) + bi.Dscal(ki+1, scale, b[iv:], ldb) + } + b[j*ldb+iv-1] = x[0] + b[j*ldb+iv] = x[1] + // Update the right-hand side. + bi.Daxpy(j, -x[0], t[j:], ldt, b[iv-1:], ldb) + bi.Daxpy(j, -x[1], t[j:], ldt, b[iv:], ldb) + j-- + } else { + // 2×2 diagonal block. + + scale, xnorm, _ := impl.Dlaln2(false, 2, 2, smin, 1, t[(j-1)*ldt+j-1:], ldt, + 1, 1, b[(j-1)*ldb+iv-1:], ldb, wr, wi, x[:], 2) + // Scale X to avoid overflow when updating + // the right-hand side. + if xnorm > 1 { + beta := math.Max(norms[j-1], norms[j]) + if beta > bignum/xnorm { + rec := 1 / xnorm + x[0] *= rec + x[1] *= rec + x[2] *= rec + x[3] *= rec + scale *= rec + } + } + // Scale if necessary. + if scale != 1 { + bi.Dscal(ki+1, scale, b[iv-1:], ldb) + bi.Dscal(ki+1, scale, b[iv:], ldb) + } + b[(j-1)*ldb+iv-1] = x[0] + b[(j-1)*ldb+iv] = x[1] + b[j*ldb+iv-1] = x[2] + b[j*ldb+iv] = x[3] + // Update the right-hand side. + bi.Daxpy(j-1, -x[0], t[j-1:], ldt, b[iv-1:], ldb) + bi.Daxpy(j-1, -x[1], t[j-1:], ldt, b[iv:], ldb) + bi.Daxpy(j-1, -x[2], t[j:], ldt, b[iv-1:], ldb) + bi.Daxpy(j-1, -x[3], t[j:], ldt, b[iv:], ldb) + j -= 2 + } + } + + // Copy the vector x or Q*x to VR and normalize. + switch { + case howmny != lapack.EVAllMulQ: + // No back-transform: copy x to VR and normalize. + bi.Dcopy(ki+1, b[iv-1:], ldb, vr[is-1:], ldvr) + bi.Dcopy(ki+1, b[iv:], ldb, vr[is:], ldvr) + emax := 0.0 + for k := 0; k <= ki; k++ { + emax = math.Max(emax, math.Abs(vr[k*ldvr+is-1])+math.Abs(vr[k*ldvr+is])) + } + remax := 1 / emax + bi.Dscal(ki+1, remax, vr[is-1:], ldvr) + bi.Dscal(ki+1, remax, vr[is:], ldvr) + for k := ki + 1; k < n; k++ { + vr[k*ldvr+is-1] = 0 + vr[k*ldvr+is] = 0 + } + case nb == 1: + // Version 1: back-transform each vector with GEMV, Q*x. + if ki-1 > 0 { + bi.Dgemv(blas.NoTrans, n, ki-1, 1, vr, ldvr, b[iv-1:], ldb, + b[(ki-1)*ldb+iv-1], vr[ki-1:], ldvr) + bi.Dgemv(blas.NoTrans, n, ki-1, 1, vr, ldvr, b[iv:], ldb, + b[ki*ldb+iv], vr[ki:], ldvr) + } else { + bi.Dscal(n, b[(ki-1)*ldb+iv-1], vr[ki-1:], ldvr) + bi.Dscal(n, b[ki*ldb+iv], vr[ki:], ldvr) + } + emax := 0.0 + for k := 0; k < n; k++ { + emax = math.Max(emax, math.Abs(vr[k*ldvr+ki-1])+math.Abs(vr[k*ldvr+ki])) + } + remax := 1 / emax + bi.Dscal(n, remax, vr[ki-1:], ldvr) + bi.Dscal(n, remax, vr[ki:], ldvr) + default: + // Version 2: back-transform block of vectors with GEMM. + // Zero out below vector. + for k := ki + 1; k < n; k++ { + b[k*ldb+iv-1] = 0 + b[k*ldb+iv] = 0 + } + iscomplex[iv-1] = -ip + iscomplex[iv] = ip + iv-- + // Back-transform and normalization is done below. + } + } + if nb > 1 { + // Blocked version of back-transform. + + // For complex case, ki2 includes both vectors (ki-1 and ki). + ki2 := ki + if ip != 0 { + ki2-- + } + // Columns iv:nb of b are valid vectors. + // When the number of vectors stored reaches nb-1 or nb, + // or if this was last vector, do the Gemm. + if iv < 2 || ki2 == 0 { + bi.Dgemm(blas.NoTrans, blas.NoTrans, n, nb-iv, ki2+nb-iv, + 1, vr, ldvr, b[iv:], ldb, + 0, b[nb+iv:], ldb) + // Normalize vectors. + var remax float64 + for k := iv; k < nb; k++ { + if iscomplex[k] == 0 { + // Real eigenvector. + ii := bi.Idamax(n, b[nb+k:], ldb) + remax = 1 / math.Abs(b[ii*ldb+nb+k]) + } else if iscomplex[k] == 1 { + // First eigenvector of conjugate pair. + emax := 0.0 + for ii := 0; ii < n; ii++ { + emax = math.Max(emax, math.Abs(b[ii*ldb+nb+k])+math.Abs(b[ii*ldb+nb+k+1])) + } + remax = 1 / emax + // Second eigenvector of conjugate pair + // will reuse this value of remax. + } + bi.Dscal(n, remax, b[nb+k:], ldb) + } + impl.Dlacpy(blas.All, n, nb-iv, b[nb+iv:], ldb, vr[ki2:], ldvr) + iv = nb - 1 + } else { + iv-- + } + } + is-- + if ip != 0 { + is-- + } + } + + if side == lapack.EVRight { + return m + } + +leftev: + // Compute left eigenvectors. + + // For complex left vector, iv is for real part and iv+1 for complex + // part. Non-blocked version always uses iv=0. Blocked version starts + // with iv=0, goes up to nb-2 or nb-1. + iv = 0 + ip = 0 + is = 0 + for ki := 0; ki < n; ki++ { + if ip == 1 { + // Previous iteration ki-1 was first of conjugate pair, + // so this ki is second of conjugate pair. + ip = -1 + continue + } + + if ki == n-1 || t[(ki+1)*ldt+ki] == 0 { + // Last column or zero on sub-diagonal, so this ki must + // be real eigenvalue. + ip = 0 + } else { + // Non-zero on sub-diagonal, so this ki is first of + // conjugate pair. + ip = 1 + } + if howmny == lapack.EVSelected && !selected[ki] { + continue + } + + // Compute the ki-th eigenvalue (wr,wi). + wr := t[ki*ldt+ki] + var wi float64 + if ip != 0 { + wi = math.Sqrt(math.Abs(t[ki*ldt+ki+1])) * math.Sqrt(math.Abs(t[(ki+1)*ldt+ki])) + } + smin := math.Max(ulp*(math.Abs(wr)+math.Abs(wi)), smlnum) + + if ip == 0 { + // Real left eigenvector. + + b[ki*ldb+iv] = 1 + // Form right-hand side. + for k := ki + 1; k < n; k++ { + b[k*ldb+iv] = -t[ki*ldt+k] + } + // Solve transposed quasi-triangular system: + // [ T[ki+1:n,ki+1:n] - wr ]^T * X = scale*b + vmax := 1.0 + vcrit := bignum + for j := ki + 1; j < n; { + if j == n-1 || t[(j+1)*ldt+j] == 0 { + // 1×1 diagonal block. + + // Scale if necessary to avoid overflow + // when forming the right-hand side. + if norms[j] > vcrit { + rec := 1 / vmax + bi.Dscal(n-ki, rec, b[ki*ldb+iv:], ldb) + vmax = 1 + } + b[j*ldb+iv] -= bi.Ddot(j-ki-1, t[(ki+1)*ldt+j:], ldt, b[(ki+1)*ldb+iv:], ldb) + // Solve [ T[j,j] - wr ]^T * X = b. + scale, _, _ := impl.Dlaln2(false, 1, 1, smin, 1, t[j*ldt+j:], ldt, + 1, 1, b[j*ldb+iv:], ldb, wr, 0, x[:1], 2) + // Scale if necessary. + if scale != 1 { + bi.Dscal(n-ki, scale, b[ki*ldb+iv:], ldb) + } + b[j*ldb+iv] = x[0] + vmax = math.Max(math.Abs(b[j*ldb+iv]), vmax) + vcrit = bignum / vmax + j++ + } else { + // 2×2 diagonal block. + + // Scale if necessary to avoid overflow + // when forming the right-hand side. + beta := math.Max(norms[j], norms[j+1]) + if beta > vcrit { + bi.Dscal(n-ki+1, 1/vmax, b[ki*ldb+iv:], 1) + vmax = 1 + } + b[j*ldb+iv] -= bi.Ddot(j-ki-1, t[(ki+1)*ldt+j:], ldt, b[(ki+1)*ldb+iv:], ldb) + b[(j+1)*ldb+iv] -= bi.Ddot(j-ki-1, t[(ki+1)*ldt+j+1:], ldt, b[(ki+1)*ldb+iv:], ldb) + // Solve + // [ T[j,j]-wr T[j,j+1] ]^T * X = scale*[ b1 ] + // [ T[j+1,j] T[j+1,j+1]-wr ] [ b2 ] + scale, _, _ := impl.Dlaln2(true, 2, 1, smin, 1, t[j*ldt+j:], ldt, + 1, 1, b[j*ldb+iv:], ldb, wr, 0, x[:3], 2) + // Scale if necessary. + if scale != 1 { + bi.Dscal(n-ki, scale, b[ki*ldb+iv:], ldb) + } + b[j*ldb+iv] = x[0] + b[(j+1)*ldb+iv] = x[2] + vmax = math.Max(vmax, math.Max(math.Abs(b[j*ldb+iv]), math.Abs(b[(j+1)*ldb+iv]))) + vcrit = bignum / vmax + j += 2 + } + } + // Copy the vector x or Q*x to VL and normalize. + switch { + case howmny != lapack.EVAllMulQ: + // No back-transform: copy x to VL and normalize. + bi.Dcopy(n-ki, b[ki*ldb+iv:], ldb, vl[ki*ldvl+is:], ldvl) + ii := bi.Idamax(n-ki, vl[ki*ldvl+is:], ldvl) + ki + remax := 1 / math.Abs(vl[ii*ldvl+is]) + bi.Dscal(n-ki, remax, vl[ki*ldvl+is:], ldvl) + for k := 0; k < ki; k++ { + vl[k*ldvl+is] = 0 + } + case nb == 1: + // Version 1: back-transform each vector with Gemv, Q*x. + if n-ki-1 > 0 { + bi.Dgemv(blas.NoTrans, n, n-ki-1, + 1, vl[ki+1:], ldvl, b[(ki+1)*ldb+iv:], ldb, + b[ki*ldb+iv], vl[ki:], ldvl) + } + ii := bi.Idamax(n, vl[ki:], ldvl) + remax := 1 / math.Abs(vl[ii*ldvl+ki]) + bi.Dscal(n, remax, vl[ki:], ldvl) + default: + // Version 2: back-transform block of vectors with Gemm + // zero out above vector. + for k := 0; k < ki; k++ { + b[k*ldb+iv] = 0 + } + iscomplex[iv] = ip + // Back-transform and normalization is done below. + } + } else { + // Complex left eigenvector. + + // Initial solve: + // [ [ T[ki,ki] T[ki,ki+1] ]^T - (wr - i* wi) ]*X = 0. + // [ [ T[ki+1,ki] T[ki+1,ki+1] ] ] + if math.Abs(t[ki*ldt+ki+1]) >= math.Abs(t[(ki+1)*ldt+ki]) { + b[ki*ldb+iv] = wi / t[ki*ldt+ki+1] + b[(ki+1)*ldb+iv+1] = 1 + } else { + b[ki*ldb+iv] = 1 + b[(ki+1)*ldb+iv+1] = -wi / t[(ki+1)*ldt+ki] + } + b[(ki+1)*ldb+iv] = 0 + b[ki*ldb+iv+1] = 0 + // Form right-hand side. + for k := ki + 2; k < n; k++ { + b[k*ldb+iv] = -b[ki*ldb+iv] * t[ki*ldt+k] + b[k*ldb+iv+1] = -b[(ki+1)*ldb+iv+1] * t[(ki+1)*ldt+k] + } + // Solve transposed quasi-triangular system: + // [ T[ki+2:n,ki+2:n]^T - (wr-i*wi) ]*X = b1+i*b2 + vmax := 1.0 + vcrit := bignum + for j := ki + 2; j < n; { + if j == n-1 || t[(j+1)*ldt+j] == 0 { + // 1×1 diagonal block. + + // Scale if necessary to avoid overflow + // when forming the right-hand side elements. + if norms[j] > vcrit { + rec := 1 / vmax + bi.Dscal(n-ki, rec, b[ki*ldb+iv:], ldb) + bi.Dscal(n-ki, rec, b[ki*ldb+iv+1:], ldb) + vmax = 1 + } + b[j*ldb+iv] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j:], ldt, b[(ki+2)*ldb+iv:], ldb) + b[j*ldb+iv+1] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j:], ldt, b[(ki+2)*ldb+iv+1:], ldb) + // Solve [ T[j,j]-(wr-i*wi) ]*(X11+i*X12) = b1+i*b2. + scale, _, _ := impl.Dlaln2(false, 1, 2, smin, 1, t[j*ldt+j:], ldt, + 1, 1, b[j*ldb+iv:], ldb, wr, -wi, x[:2], 2) + // Scale if necessary. + if scale != 1 { + bi.Dscal(n-ki, scale, b[ki*ldb+iv:], ldb) + bi.Dscal(n-ki, scale, b[ki*ldb+iv+1:], ldb) + } + b[j*ldb+iv] = x[0] + b[j*ldb+iv+1] = x[1] + vmax = math.Max(vmax, math.Max(math.Abs(b[j*ldb+iv]), math.Abs(b[j*ldb+iv+1]))) + vcrit = bignum / vmax + j++ + } else { + // 2×2 diagonal block. + + // Scale if necessary to avoid overflow + // when forming the right-hand side elements. + if math.Max(norms[j], norms[j+1]) > vcrit { + rec := 1 / vmax + bi.Dscal(n-ki, rec, b[ki*ldb+iv:], ldb) + bi.Dscal(n-ki, rec, b[ki*ldb+iv+1:], ldb) + vmax = 1 + } + b[j*ldb+iv] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j:], ldt, b[(ki+2)*ldb+iv:], ldb) + b[j*ldb+iv+1] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j:], ldt, b[(ki+2)*ldb+iv+1:], ldb) + b[(j+1)*ldb+iv] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j+1:], ldt, b[(ki+2)*ldb+iv:], ldb) + b[(j+1)*ldb+iv+1] -= bi.Ddot(j-ki-2, t[(ki+2)*ldt+j+1:], ldt, b[(ki+2)*ldb+iv+1:], ldb) + // Solve 2×2 complex linear equation + // [ [T[j,j] T[j,j+1] ]^T - (wr-i*wi)*I ]*X = scale*b + // [ [T[j+1,j] T[j+1,j+1]] ] + scale, _, _ := impl.Dlaln2(true, 2, 2, smin, 1, t[j*ldt+j:], ldt, + 1, 1, b[j*ldb+iv:], ldb, wr, -wi, x[:], 2) + // Scale if necessary. + if scale != 1 { + bi.Dscal(n-ki, scale, b[ki*ldb+iv:], ldb) + bi.Dscal(n-ki, scale, b[ki*ldb+iv+1:], ldb) + } + b[j*ldb+iv] = x[0] + b[j*ldb+iv+1] = x[1] + b[(j+1)*ldb+iv] = x[2] + b[(j+1)*ldb+iv+1] = x[3] + vmax01 := math.Max(math.Abs(x[0]), math.Abs(x[1])) + vmax23 := math.Max(math.Abs(x[2]), math.Abs(x[3])) + vmax = math.Max(vmax, math.Max(vmax01, vmax23)) + vcrit = bignum / vmax + j += 2 + } + } + // Copy the vector x or Q*x to VL and normalize. + switch { + case howmny != lapack.EVAllMulQ: + // No back-transform: copy x to VL and normalize. + bi.Dcopy(n-ki, b[ki*ldb+iv:], ldb, vl[ki*ldvl+is:], ldvl) + bi.Dcopy(n-ki, b[ki*ldb+iv+1:], ldb, vl[ki*ldvl+is+1:], ldvl) + emax := 0.0 + for k := ki; k < n; k++ { + emax = math.Max(emax, math.Abs(vl[k*ldvl+is])+math.Abs(vl[k*ldvl+is+1])) + } + remax := 1 / emax + bi.Dscal(n-ki, remax, vl[ki*ldvl+is:], ldvl) + bi.Dscal(n-ki, remax, vl[ki*ldvl+is+1:], ldvl) + for k := 0; k < ki; k++ { + vl[k*ldvl+is] = 0 + vl[k*ldvl+is+1] = 0 + } + case nb == 1: + // Version 1: back-transform each vector with GEMV, Q*x. + if n-ki-2 > 0 { + bi.Dgemv(blas.NoTrans, n, n-ki-2, + 1, vl[ki+2:], ldvl, b[(ki+2)*ldb+iv:], ldb, + b[ki*ldb+iv], vl[ki:], ldvl) + bi.Dgemv(blas.NoTrans, n, n-ki-2, + 1, vl[ki+2:], ldvl, b[(ki+2)*ldb+iv+1:], ldb, + b[(ki+1)*ldb+iv+1], vl[ki+1:], ldvl) + } else { + bi.Dscal(n, b[ki*ldb+iv], vl[ki:], ldvl) + bi.Dscal(n, b[(ki+1)*ldb+iv+1], vl[ki+1:], ldvl) + } + emax := 0.0 + for k := 0; k < n; k++ { + emax = math.Max(emax, math.Abs(vl[k*ldvl+ki])+math.Abs(vl[k*ldvl+ki+1])) + } + remax := 1 / emax + bi.Dscal(n, remax, vl[ki:], ldvl) + bi.Dscal(n, remax, vl[ki+1:], ldvl) + default: + // Version 2: back-transform block of vectors with GEMM. + // Zero out above vector. + // Could go from ki-nv+1 to ki-1. + for k := 0; k < ki; k++ { + b[k*ldb+iv] = 0 + b[k*ldb+iv+1] = 0 + } + iscomplex[iv] = ip + iscomplex[iv+1] = -ip + iv++ + // Back-transform and normalization is done below. + } + } + if nb > 1 { + // Blocked version of back-transform. + // For complex case, ki2 includes both vectors ki and ki+1. + ki2 := ki + if ip != 0 { + ki2++ + } + // Columns [0:iv] of work are valid vectors. When the + // number of vectors stored reaches nb-1 or nb, or if + // this was last vector, do the Gemm. + if iv >= nb-2 || ki2 == n-1 { + bi.Dgemm(blas.NoTrans, blas.NoTrans, n, iv+1, n-ki2+iv, + 1, vl[ki2-iv:], ldvl, b[(ki2-iv)*ldb:], ldb, + 0, b[nb:], ldb) + // Normalize vectors. + var remax float64 + for k := 0; k <= iv; k++ { + if iscomplex[k] == 0 { + // Real eigenvector. + ii := bi.Idamax(n, b[nb+k:], ldb) + remax = 1 / math.Abs(b[ii*ldb+nb+k]) + } else if iscomplex[k] == 1 { + // First eigenvector of conjugate pair. + emax := 0.0 + for ii := 0; ii < n; ii++ { + emax = math.Max(emax, math.Abs(b[ii*ldb+nb+k])+math.Abs(b[ii*ldb+nb+k+1])) + } + remax = 1 / emax + // Second eigenvector of conjugate pair + // will reuse this value of remax. + } + bi.Dscal(n, remax, b[nb+k:], ldb) + } + impl.Dlacpy(blas.All, n, iv+1, b[nb:], ldb, vl[ki2-iv:], ldvl) + iv = 0 + } else { + iv++ + } + } + is++ + if ip != 0 { + is++ + } + } + + return m +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrexc.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrexc.go new file mode 100644 index 0000000000..9f3f90bad8 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrexc.go @@ -0,0 +1,230 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/lapack" + +// Dtrexc reorders the real Schur factorization of a n×n real matrix +// A = Q*T*Q^T +// so that the diagonal block of T with row index ifst is moved to row ilst. +// +// On entry, T must be in Schur canonical form, that is, block upper triangular +// with 1×1 and 2×2 diagonal blocks; each 2×2 diagonal block has its diagonal +// elements equal and its off-diagonal elements of opposite sign. +// +// On return, T will be reordered by an orthogonal similarity transformation Z +// as Z^T*T*Z, and will be again in Schur canonical form. +// +// If compq is lapack.UpdateSchur, on return the matrix Q of Schur vectors will be +// updated by post-multiplying it with Z. +// If compq is lapack.UpdateSchurNone, the matrix Q is not referenced and will not be +// updated. +// For other values of compq Dtrexc will panic. +// +// ifst and ilst specify the reordering of the diagonal blocks of T. The block +// with row index ifst is moved to row ilst, by a sequence of transpositions +// between adjacent blocks. +// +// If ifst points to the second row of a 2×2 block, ifstOut will point to the +// first row, otherwise it will be equal to ifst. +// +// ilstOut will point to the first row of the block in its final position. If ok +// is true, ilstOut may differ from ilst by +1 or -1. +// +// It must hold that +// 0 <= ifst < n, and 0 <= ilst < n, +// otherwise Dtrexc will panic. +// +// If ok is false, two adjacent blocks were too close to swap because the +// problem is very ill-conditioned. T may have been partially reordered, and +// ilstOut will point to the first row of the block at the position to which it +// has been moved. +// +// work must have length at least n, otherwise Dtrexc will panic. +// +// Dtrexc is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dtrexc(compq lapack.UpdateSchurComp, n int, t []float64, ldt int, q []float64, ldq int, ifst, ilst int, work []float64) (ifstOut, ilstOut int, ok bool) { + switch { + case compq != lapack.UpdateSchur && compq != lapack.UpdateSchurNone: + panic(badUpdateSchurComp) + case n < 0: + panic(nLT0) + case ldt < max(1, n): + panic(badLdT) + case ldq < 1, compq == lapack.UpdateSchur && ldq < n: + panic(badLdQ) + case (ifst < 0 || n <= ifst) && n > 0: + panic(badIfst) + case (ilst < 0 || n <= ilst) && n > 0: + panic(badIlst) + } + + // Quick return if possible. + if n == 0 { + return ifst, ilst, true + } + + switch { + case len(t) < (n-1)*ldt+n: + panic(shortT) + case compq == lapack.UpdateSchur && len(q) < (n-1)*ldq+n: + panic(shortQ) + case len(work) < n: + panic(shortWork) + } + + // Quick return if possible. + if n == 1 { + return ifst, ilst, true + } + + // Determine the first row of specified block + // and find out it is 1×1 or 2×2. + if ifst > 0 && t[ifst*ldt+ifst-1] != 0 { + ifst-- + } + nbf := 1 // Size of the first block. + if ifst+1 < n && t[(ifst+1)*ldt+ifst] != 0 { + nbf = 2 + } + // Determine the first row of the final block + // and find out it is 1×1 or 2×2. + if ilst > 0 && t[ilst*ldt+ilst-1] != 0 { + ilst-- + } + nbl := 1 // Size of the last block. + if ilst+1 < n && t[(ilst+1)*ldt+ilst] != 0 { + nbl = 2 + } + + ok = true + wantq := compq == lapack.UpdateSchur + + switch { + case ifst == ilst: + return ifst, ilst, true + + case ifst < ilst: + // Update ilst. + switch { + case nbf == 2 && nbl == 1: + ilst-- + case nbf == 1 && nbl == 2: + ilst++ + } + here := ifst + for here < ilst { + // Swap block with next one below. + if nbf == 1 || nbf == 2 { + // Current block either 1×1 or 2×2. + nbnext := 1 // Size of the next block. + if here+nbf+1 < n && t[(here+nbf+1)*ldt+here+nbf] != 0 { + nbnext = 2 + } + ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, nbf, nbnext, work) + if !ok { + return ifst, here, false + } + here += nbnext + // Test if 2×2 block breaks into two 1×1 blocks. + if nbf == 2 && t[(here+1)*ldt+here] == 0 { + nbf = 3 + } + continue + } + + // Current block consists of two 1×1 blocks each of + // which must be swapped individually. + nbnext := 1 // Size of the next block. + if here+3 < n && t[(here+3)*ldt+here+2] != 0 { + nbnext = 2 + } + ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here+1, 1, nbnext, work) + if !ok { + return ifst, here, false + } + if nbnext == 1 { + // Swap two 1×1 blocks, no problems possible. + impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, 1, nbnext, work) + here++ + continue + } + // Recompute nbnext in case 2×2 split. + if t[(here+2)*ldt+here+1] == 0 { + nbnext = 1 + } + if nbnext == 2 { + // 2×2 block did not split. + ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, 1, nbnext, work) + if !ok { + return ifst, here, false + } + } else { + // 2×2 block did split. + impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, 1, 1, work) + impl.Dlaexc(wantq, n, t, ldt, q, ldq, here+1, 1, 1, work) + } + here += 2 + } + return ifst, here, true + + default: // ifst > ilst + here := ifst + for here > ilst { + // Swap block with next one above. + if nbf == 1 || nbf == 2 { + // Current block either 1×1 or 2×2. + nbnext := 1 + if here-2 >= 0 && t[(here-1)*ldt+here-2] != 0 { + nbnext = 2 + } + ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here-nbnext, nbnext, nbf, work) + if !ok { + return ifst, here, false + } + here -= nbnext + // Test if 2×2 block breaks into two 1×1 blocks. + if nbf == 2 && t[(here+1)*ldt+here] == 0 { + nbf = 3 + } + continue + } + + // Current block consists of two 1×1 blocks each of + // which must be swapped individually. + nbnext := 1 + if here-2 >= 0 && t[(here-1)*ldt+here-2] != 0 { + nbnext = 2 + } + ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here-nbnext, nbnext, 1, work) + if !ok { + return ifst, here, false + } + if nbnext == 1 { + // Swap two 1×1 blocks, no problems possible. + impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, nbnext, 1, work) + here-- + continue + } + // Recompute nbnext in case 2×2 split. + if t[here*ldt+here-1] == 0 { + nbnext = 1 + } + if nbnext == 2 { + // 2×2 block did not split. + ok = impl.Dlaexc(wantq, n, t, ldt, q, ldq, here-1, 2, 1, work) + if !ok { + return ifst, here, false + } + } else { + // 2×2 block did split. + impl.Dlaexc(wantq, n, t, ldt, q, ldq, here, 1, 1, work) + impl.Dlaexc(wantq, n, t, ldt, q, ldq, here-1, 1, 1, work) + } + here -= 2 + } + return ifst, here, true + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrti2.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrti2.go new file mode 100644 index 0000000000..efc24b65ea --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrti2.go @@ -0,0 +1,69 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dtrti2 computes the inverse of a triangular matrix, storing the result in place +// into a. This is the BLAS level 2 version of the algorithm. +// +// Dtrti2 is an internal routine. It is exported for testing purposes. +func (impl Implementation) Dtrti2(uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case diag != blas.NonUnit && diag != blas.Unit: + panic(badDiag) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + if n == 0 { + return + } + + if len(a) < (n-1)*lda+n { + panic(shortA) + } + + bi := blas64.Implementation() + + nonUnit := diag == blas.NonUnit + // TODO(btracey): Replace this with a row-major ordering. + if uplo == blas.Upper { + for j := 0; j < n; j++ { + var ajj float64 + if nonUnit { + ajj = 1 / a[j*lda+j] + a[j*lda+j] = ajj + ajj *= -1 + } else { + ajj = -1 + } + bi.Dtrmv(blas.Upper, blas.NoTrans, diag, j, a, lda, a[j:], lda) + bi.Dscal(j, ajj, a[j:], lda) + } + return + } + for j := n - 1; j >= 0; j-- { + var ajj float64 + if nonUnit { + ajj = 1 / a[j*lda+j] + a[j*lda+j] = ajj + ajj *= -1 + } else { + ajj = -1 + } + if j < n-1 { + bi.Dtrmv(blas.Lower, blas.NoTrans, diag, n-j-1, a[(j+1)*lda+j+1:], lda, a[(j+1)*lda+j:], lda) + bi.Dscal(n-j-1, ajj, a[(j+1)*lda+j:], lda) + } + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtri.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtri.go new file mode 100644 index 0000000000..6ec3663c35 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtri.go @@ -0,0 +1,72 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dtrtri computes the inverse of a triangular matrix, storing the result in place +// into a. This is the BLAS level 3 version of the algorithm which builds upon +// Dtrti2 to operate on matrix blocks instead of only individual columns. +// +// Dtrtri will not perform the inversion if the matrix is singular, and returns +// a boolean indicating whether the inversion was successful. +func (impl Implementation) Dtrtri(uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int) (ok bool) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case diag != blas.NonUnit && diag != blas.Unit: + panic(badDiag) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + if n == 0 { + return true + } + + if len(a) < (n-1)*lda+n { + panic(shortA) + } + + if diag == blas.NonUnit { + for i := 0; i < n; i++ { + if a[i*lda+i] == 0 { + return false + } + } + } + + bi := blas64.Implementation() + + nb := impl.Ilaenv(1, "DTRTRI", "UD", n, -1, -1, -1) + if nb <= 1 || nb > n { + impl.Dtrti2(uplo, diag, n, a, lda) + return true + } + if uplo == blas.Upper { + for j := 0; j < n; j += nb { + jb := min(nb, n-j) + bi.Dtrmm(blas.Left, blas.Upper, blas.NoTrans, diag, j, jb, 1, a, lda, a[j:], lda) + bi.Dtrsm(blas.Right, blas.Upper, blas.NoTrans, diag, j, jb, -1, a[j*lda+j:], lda, a[j:], lda) + impl.Dtrti2(blas.Upper, diag, jb, a[j*lda+j:], lda) + } + return true + } + nn := ((n - 1) / nb) * nb + for j := nn; j >= 0; j -= nb { + jb := min(nb, n-j) + if j+jb <= n-1 { + bi.Dtrmm(blas.Left, blas.Lower, blas.NoTrans, diag, n-j-jb, jb, 1, a[(j+jb)*lda+j+jb:], lda, a[(j+jb)*lda+j:], lda) + bi.Dtrsm(blas.Right, blas.Lower, blas.NoTrans, diag, n-j-jb, jb, -1, a[j*lda+j:], lda, a[(j+jb)*lda+j:], lda) + } + impl.Dtrti2(blas.Lower, diag, jb, a[j*lda+j:], lda) + } + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtrs.go b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtrs.go new file mode 100644 index 0000000000..1752dc5c85 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/dtrtrs.go @@ -0,0 +1,55 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +// Dtrtrs solves a triangular system of the form A * X = B or A^T * X = B. Dtrtrs +// returns whether the solve completed successfully. If A is singular, no solve is performed. +func (impl Implementation) Dtrtrs(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, nrhs int, a []float64, lda int, b []float64, ldb int) (ok bool) { + switch { + case uplo != blas.Upper && uplo != blas.Lower: + panic(badUplo) + case trans != blas.NoTrans && trans != blas.Trans && trans != blas.ConjTrans: + panic(badTrans) + case diag != blas.NonUnit && diag != blas.Unit: + panic(badDiag) + case n < 0: + panic(nLT0) + case nrhs < 0: + panic(nrhsLT0) + case lda < max(1, n): + panic(badLdA) + case ldb < max(1, nrhs): + panic(badLdB) + } + + if n == 0 { + return true + } + + switch { + case len(a) < (n-1)*lda+n: + panic(shortA) + case len(b) < (n-1)*ldb+nrhs: + panic(shortB) + } + + // Check for singularity. + nounit := diag == blas.NonUnit + if nounit { + for i := 0; i < n; i++ { + if a[i*lda+i] == 0 { + return false + } + } + } + bi := blas64.Implementation() + bi.Dtrsm(blas.Left, uplo, trans, diag, n, nrhs, 1, a, lda, b, ldb) + return true +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/errors.go b/vendor/gonum.org/v1/gonum/lapack/gonum/errors.go new file mode 100644 index 0000000000..3c0cb68efe --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/errors.go @@ -0,0 +1,174 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +// This list is duplicated in netlib/lapack/netlib. Keep in sync. +const ( + // Panic strings for bad enumeration values. + badApplyOrtho = "lapack: bad ApplyOrtho" + badBalanceJob = "lapack: bad BalanceJob" + badDiag = "lapack: bad Diag" + badDirect = "lapack: bad Direct" + badEVComp = "lapack: bad EVComp" + badEVHowMany = "lapack: bad EVHowMany" + badEVJob = "lapack: bad EVJob" + badEVSide = "lapack: bad EVSide" + badGSVDJob = "lapack: bad GSVDJob" + badGenOrtho = "lapack: bad GenOrtho" + badLeftEVJob = "lapack: bad LeftEVJob" + badMatrixType = "lapack: bad MatrixType" + badNorm = "lapack: bad Norm" + badPivot = "lapack: bad Pivot" + badRightEVJob = "lapack: bad RightEVJob" + badSVDJob = "lapack: bad SVDJob" + badSchurComp = "lapack: bad SchurComp" + badSchurJob = "lapack: bad SchurJob" + badSide = "lapack: bad Side" + badSort = "lapack: bad Sort" + badStoreV = "lapack: bad StoreV" + badTrans = "lapack: bad Trans" + badUpdateSchurComp = "lapack: bad UpdateSchurComp" + badUplo = "lapack: bad Uplo" + bothSVDOver = "lapack: both jobU and jobVT are lapack.SVDOverwrite" + + // Panic strings for bad numerical and string values. + badIfst = "lapack: ifst out of range" + badIhi = "lapack: ihi out of range" + badIhiz = "lapack: ihiz out of range" + badIlo = "lapack: ilo out of range" + badIloz = "lapack: iloz out of range" + badIlst = "lapack: ilst out of range" + badIsave = "lapack: bad isave value" + badIspec = "lapack: bad ispec value" + badJ1 = "lapack: j1 out of range" + badJpvt = "lapack: bad element of jpvt" + badK1 = "lapack: k1 out of range" + badK2 = "lapack: k2 out of range" + badKacc22 = "lapack: invalid value of kacc22" + badKbot = "lapack: kbot out of range" + badKtop = "lapack: ktop out of range" + badLWork = "lapack: insufficient declared workspace length" + badMm = "lapack: mm out of range" + badN1 = "lapack: bad value of n1" + badN2 = "lapack: bad value of n2" + badNa = "lapack: bad value of na" + badName = "lapack: bad name" + badNh = "lapack: bad value of nh" + badNw = "lapack: bad value of nw" + badPp = "lapack: bad value of pp" + badShifts = "lapack: bad shifts" + i0LT0 = "lapack: i0 < 0" + kGTM = "lapack: k > m" + kGTN = "lapack: k > n" + kLT0 = "lapack: k < 0" + kLT1 = "lapack: k < 1" + kdLT0 = "lapack: kd < 0" + mGTN = "lapack: m > n" + mLT0 = "lapack: m < 0" + mmLT0 = "lapack: mm < 0" + n0LT0 = "lapack: n0 < 0" + nGTM = "lapack: n > m" + nLT0 = "lapack: n < 0" + nLT1 = "lapack: n < 1" + nLTM = "lapack: n < m" + nanCFrom = "lapack: cfrom is NaN" + nanCTo = "lapack: cto is NaN" + nbGTM = "lapack: nb > m" + nbGTN = "lapack: nb > n" + nbLT0 = "lapack: nb < 0" + nccLT0 = "lapack: ncc < 0" + ncvtLT0 = "lapack: ncvt < 0" + negANorm = "lapack: anorm < 0" + negZ = "lapack: negative z value" + nhLT0 = "lapack: nh < 0" + notIsolated = "lapack: block is not isolated" + nrhsLT0 = "lapack: nrhs < 0" + nruLT0 = "lapack: nru < 0" + nshftsLT0 = "lapack: nshfts < 0" + nshftsOdd = "lapack: nshfts must be even" + nvLT0 = "lapack: nv < 0" + offsetGTM = "lapack: offset > m" + offsetLT0 = "lapack: offset < 0" + pLT0 = "lapack: p < 0" + recurLT0 = "lapack: recur < 0" + zeroCFrom = "lapack: zero cfrom" + + // Panic strings for bad slice lengths. + badLenAlpha = "lapack: bad length of alpha" + badLenBeta = "lapack: bad length of beta" + badLenIpiv = "lapack: bad length of ipiv" + badLenJpvt = "lapack: bad length of jpvt" + badLenK = "lapack: bad length of k" + badLenSelected = "lapack: bad length of selected" + badLenSi = "lapack: bad length of si" + badLenSr = "lapack: bad length of sr" + badLenTau = "lapack: bad length of tau" + badLenWi = "lapack: bad length of wi" + badLenWr = "lapack: bad length of wr" + + // Panic strings for insufficient slice lengths. + shortA = "lapack: insufficient length of a" + shortAB = "lapack: insufficient length of ab" + shortAuxv = "lapack: insufficient length of auxv" + shortB = "lapack: insufficient length of b" + shortC = "lapack: insufficient length of c" + shortCNorm = "lapack: insufficient length of cnorm" + shortD = "lapack: insufficient length of d" + shortE = "lapack: insufficient length of e" + shortF = "lapack: insufficient length of f" + shortH = "lapack: insufficient length of h" + shortIWork = "lapack: insufficient length of iwork" + shortIsgn = "lapack: insufficient length of isgn" + shortQ = "lapack: insufficient length of q" + shortS = "lapack: insufficient length of s" + shortScale = "lapack: insufficient length of scale" + shortT = "lapack: insufficient length of t" + shortTau = "lapack: insufficient length of tau" + shortTauP = "lapack: insufficient length of tauP" + shortTauQ = "lapack: insufficient length of tauQ" + shortU = "lapack: insufficient length of u" + shortV = "lapack: insufficient length of v" + shortVL = "lapack: insufficient length of vl" + shortVR = "lapack: insufficient length of vr" + shortVT = "lapack: insufficient length of vt" + shortVn1 = "lapack: insufficient length of vn1" + shortVn2 = "lapack: insufficient length of vn2" + shortW = "lapack: insufficient length of w" + shortWH = "lapack: insufficient length of wh" + shortWV = "lapack: insufficient length of wv" + shortWi = "lapack: insufficient length of wi" + shortWork = "lapack: insufficient length of work" + shortWr = "lapack: insufficient length of wr" + shortX = "lapack: insufficient length of x" + shortY = "lapack: insufficient length of y" + shortZ = "lapack: insufficient length of z" + + // Panic strings for bad leading dimensions of matrices. + badLdA = "lapack: bad leading dimension of A" + badLdB = "lapack: bad leading dimension of B" + badLdC = "lapack: bad leading dimension of C" + badLdF = "lapack: bad leading dimension of F" + badLdH = "lapack: bad leading dimension of H" + badLdQ = "lapack: bad leading dimension of Q" + badLdT = "lapack: bad leading dimension of T" + badLdU = "lapack: bad leading dimension of U" + badLdV = "lapack: bad leading dimension of V" + badLdVL = "lapack: bad leading dimension of VL" + badLdVR = "lapack: bad leading dimension of VR" + badLdVT = "lapack: bad leading dimension of VT" + badLdW = "lapack: bad leading dimension of W" + badLdWH = "lapack: bad leading dimension of WH" + badLdWV = "lapack: bad leading dimension of WV" + badLdWork = "lapack: bad leading dimension of Work" + badLdX = "lapack: bad leading dimension of X" + badLdY = "lapack: bad leading dimension of Y" + badLdZ = "lapack: bad leading dimension of Z" + + // Panic strings for bad vector increments. + absIncNotOne = "lapack: increment not one or negative one" + badIncX = "lapack: incX <= 0" + badIncY = "lapack: incY <= 0" + zeroIncV = "lapack: incv == 0" +) diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/iladlc.go b/vendor/gonum.org/v1/gonum/lapack/gonum/iladlc.go new file mode 100644 index 0000000000..b251d72691 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/iladlc.go @@ -0,0 +1,45 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +// Iladlc scans a matrix for its last non-zero column. Returns -1 if the matrix +// is all zeros. +// +// Iladlc is an internal routine. It is exported for testing purposes. +func (Implementation) Iladlc(m, n int, a []float64, lda int) int { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + if n == 0 || m == 0 { + return -1 + } + + if len(a) < (m-1)*lda+n { + panic(shortA) + } + + // Test common case where corner is non-zero. + if a[n-1] != 0 || a[(m-1)*lda+(n-1)] != 0 { + return n - 1 + } + + // Scan each row tracking the highest column seen. + highest := -1 + for i := 0; i < m; i++ { + for j := n - 1; j >= 0; j-- { + if a[i*lda+j] != 0 { + highest = max(highest, j) + break + } + } + } + return highest +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/iladlr.go b/vendor/gonum.org/v1/gonum/lapack/gonum/iladlr.go new file mode 100644 index 0000000000..b73fe18ea2 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/iladlr.go @@ -0,0 +1,41 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +// Iladlr scans a matrix for its last non-zero row. Returns -1 if the matrix +// is all zeros. +// +// Iladlr is an internal routine. It is exported for testing purposes. +func (Implementation) Iladlr(m, n int, a []float64, lda int) int { + switch { + case m < 0: + panic(mLT0) + case n < 0: + panic(nLT0) + case lda < max(1, n): + panic(badLdA) + } + + if n == 0 || m == 0 { + return -1 + } + + if len(a) < (m-1)*lda+n { + panic(shortA) + } + + // Check the common case where the corner is non-zero + if a[(m-1)*lda] != 0 || a[(m-1)*lda+n-1] != 0 { + return m - 1 + } + for i := m - 1; i >= 0; i-- { + for j := 0; j < n; j++ { + if a[i*lda+j] != 0 { + return i + } + } + } + return -1 +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/ilaenv.go b/vendor/gonum.org/v1/gonum/lapack/gonum/ilaenv.go new file mode 100644 index 0000000000..c134d21bb1 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/ilaenv.go @@ -0,0 +1,387 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +// Ilaenv returns algorithm tuning parameters for the algorithm given by the +// input string. ispec specifies the parameter to return: +// 1: The optimal block size for a blocked algorithm. +// 2: The minimum block size for a blocked algorithm. +// 3: The block size of unprocessed data at which a blocked algorithm should +// crossover to an unblocked version. +// 4: The number of shifts. +// 5: The minimum column dimension for blocking to be used. +// 6: The crossover point for SVD (to use QR factorization or not). +// 7: The number of processors. +// 8: The crossover point for multi-shift in QR and QZ methods for non-symmetric eigenvalue problems. +// 9: Maximum size of the subproblems in divide-and-conquer algorithms. +// 10: ieee NaN arithmetic can be trusted not to trap. +// 11: infinity arithmetic can be trusted not to trap. +// 12...16: parameters for Dhseqr and related functions. See Iparmq for more +// information. +// +// Ilaenv is an internal routine. It is exported for testing purposes. +func (impl Implementation) Ilaenv(ispec int, name string, opts string, n1, n2, n3, n4 int) int { + // TODO(btracey): Replace this with a constant lookup? A list of constants? + sname := name[0] == 'S' || name[0] == 'D' + cname := name[0] == 'C' || name[0] == 'Z' + if !sname && !cname { + panic(badName) + } + c2 := name[1:3] + c3 := name[3:6] + c4 := c3[1:3] + + switch ispec { + default: + panic(badIspec) + case 1: + switch c2 { + default: + panic(badName) + case "GE": + switch c3 { + default: + panic(badName) + case "TRF": + if sname { + return 64 + } + return 64 + case "QRF", "RQF", "LQF", "QLF": + if sname { + return 32 + } + return 32 + case "HRD": + if sname { + return 32 + } + return 32 + case "BRD": + if sname { + return 32 + } + return 32 + case "TRI": + if sname { + return 64 + } + return 64 + } + case "PO": + switch c3 { + default: + panic(badName) + case "TRF": + if sname { + return 64 + } + return 64 + } + case "SY": + switch c3 { + default: + panic(badName) + case "TRF": + if sname { + return 64 + } + return 64 + case "TRD": + return 32 + case "GST": + return 64 + } + case "HE": + switch c3 { + default: + panic(badName) + case "TRF": + return 64 + case "TRD": + return 32 + case "GST": + return 64 + } + case "OR": + switch c3[0] { + default: + panic(badName) + case 'G': + switch c3[1:] { + default: + panic(badName) + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 32 + } + case 'M': + switch c3[1:] { + default: + panic(badName) + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 32 + } + } + case "UN": + switch c3[0] { + default: + panic(badName) + case 'G': + switch c3[1:] { + default: + panic(badName) + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 32 + } + case 'M': + switch c3[1:] { + default: + panic(badName) + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 32 + } + } + case "GB": + switch c3 { + default: + panic(badName) + case "TRF": + if sname { + if n4 <= 64 { + return 1 + } + return 32 + } + if n4 <= 64 { + return 1 + } + return 32 + } + case "PB": + switch c3 { + default: + panic(badName) + case "TRF": + if sname { + if n4 <= 64 { + return 1 + } + return 32 + } + if n4 <= 64 { + return 1 + } + return 32 + } + case "TR": + switch c3 { + default: + panic(badName) + case "TRI": + if sname { + return 64 + } + return 64 + case "EVC": + if sname { + return 64 + } + return 64 + } + case "LA": + switch c3 { + default: + panic(badName) + case "UUM": + if sname { + return 64 + } + return 64 + } + case "ST": + if sname && c3 == "EBZ" { + return 1 + } + panic(badName) + } + case 2: + switch c2 { + default: + panic(badName) + case "GE": + switch c3 { + default: + panic(badName) + case "QRF", "RQF", "LQF", "QLF": + if sname { + return 2 + } + return 2 + case "HRD": + if sname { + return 2 + } + return 2 + case "BRD": + if sname { + return 2 + } + return 2 + case "TRI": + if sname { + return 2 + } + return 2 + } + case "SY": + switch c3 { + default: + panic(badName) + case "TRF": + if sname { + return 8 + } + return 8 + case "TRD": + if sname { + return 2 + } + panic(badName) + } + case "HE": + if c3 == "TRD" { + return 2 + } + panic(badName) + case "OR": + if !sname { + panic(badName) + } + switch c3[0] { + default: + panic(badName) + case 'G': + switch c4 { + default: + panic(badName) + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 2 + } + case 'M': + switch c4 { + default: + panic(badName) + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 2 + } + } + case "UN": + switch c3[0] { + default: + panic(badName) + case 'G': + switch c4 { + default: + panic(badName) + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 2 + } + case 'M': + switch c4 { + default: + panic(badName) + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 2 + } + } + } + case 3: + switch c2 { + default: + panic(badName) + case "GE": + switch c3 { + default: + panic(badName) + case "QRF", "RQF", "LQF", "QLF": + if sname { + return 128 + } + return 128 + case "HRD": + if sname { + return 128 + } + return 128 + case "BRD": + if sname { + return 128 + } + return 128 + } + case "SY": + if sname && c3 == "TRD" { + return 32 + } + panic(badName) + case "HE": + if c3 == "TRD" { + return 32 + } + panic(badName) + case "OR": + switch c3[0] { + default: + panic(badName) + case 'G': + switch c4 { + default: + panic(badName) + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 128 + } + } + case "UN": + switch c3[0] { + default: + panic(badName) + case 'G': + switch c4 { + default: + panic(badName) + case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": + return 128 + } + } + } + case 4: + // Used by xHSEQR + return 6 + case 5: + // Not used + return 2 + case 6: + // Used by xGELSS and xGESVD + return int(float64(min(n1, n2)) * 1.6) + case 7: + // Not used + return 1 + case 8: + // Used by xHSEQR + return 50 + case 9: + // used by xGELSD and xGESDD + return 25 + case 10: + // Go guarantees ieee + return 1 + case 11: + // Go guarantees ieee + return 1 + case 12, 13, 14, 15, 16: + // Dhseqr and related functions for eigenvalue problems. + return impl.Iparmq(ispec, name, opts, n1, n2, n3, n4) + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/iparmq.go b/vendor/gonum.org/v1/gonum/lapack/gonum/iparmq.go new file mode 100644 index 0000000000..3800f11ce1 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/iparmq.go @@ -0,0 +1,115 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "math" + +// Iparmq returns problem and machine dependent parameters useful for Dhseqr and +// related subroutines for eigenvalue problems. +// +// ispec specifies the parameter to return: +// 12: Crossover point between Dlahqr and Dlaqr0. Will be at least 11. +// 13: Deflation window size. +// 14: Nibble crossover point. Determines when to skip a multi-shift QR sweep. +// 15: Number of simultaneous shifts in a multishift QR iteration. +// 16: Select structured matrix multiply. +// For other values of ispec Iparmq will panic. +// +// name is the name of the calling function. name must be in uppercase but this +// is not checked. +// +// opts is not used and exists for future use. +// +// n is the order of the Hessenberg matrix H. +// +// ilo and ihi specify the block [ilo:ihi+1,ilo:ihi+1] that is being processed. +// +// lwork is the amount of workspace available. +// +// Except for ispec input parameters are not checked. +// +// Iparmq is an internal routine. It is exported for testing purposes. +func (Implementation) Iparmq(ispec int, name, opts string, n, ilo, ihi, lwork int) int { + nh := ihi - ilo + 1 + ns := 2 + switch { + case nh >= 30: + ns = 4 + case nh >= 60: + ns = 10 + case nh >= 150: + ns = max(10, nh/int(math.Log(float64(nh))/math.Ln2)) + case nh >= 590: + ns = 64 + case nh >= 3000: + ns = 128 + case nh >= 6000: + ns = 256 + } + ns = max(2, ns-(ns%2)) + + switch ispec { + default: + panic(badIspec) + + case 12: + // Matrices of order smaller than nmin get sent to Dlahqr, the + // classic double shift algorithm. This must be at least 11. + const nmin = 75 + return nmin + + case 13: + const knwswp = 500 + if nh <= knwswp { + return ns + } + return 3 * ns / 2 + + case 14: + // Skip a computationally expensive multi-shift QR sweep with + // Dlaqr5 whenever aggressive early deflation finds at least + // nibble*(window size)/100 deflations. The default, small, + // value reflects the expectation that the cost of looking + // through the deflation window with Dlaqr3 will be + // substantially smaller. + const nibble = 14 + return nibble + + case 15: + return ns + + case 16: + if len(name) != 6 { + panic(badName) + } + const ( + k22min = 14 + kacmin = 14 + ) + var acc22 int + switch { + case name[1:] == "GGHRD" || name[1:] == "GGHD3": + acc22 = 1 + if nh >= k22min { + acc22 = 2 + } + case name[3:] == "EXC": + if nh >= kacmin { + acc22 = 1 + } + if nh >= k22min { + acc22 = 2 + } + case name[1:] == "HSEQR" || name[1:5] == "LAQR": + if ns >= kacmin { + acc22 = 1 + } + if ns >= k22min { + acc22 = 2 + } + } + return acc22 + } +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/lapack.go b/vendor/gonum.org/v1/gonum/lapack/gonum/lapack.go new file mode 100644 index 0000000000..434da02d2d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/lapack.go @@ -0,0 +1,51 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonum + +import "gonum.org/v1/gonum/lapack" + +// Implementation is the native Go implementation of LAPACK routines. It +// is built on top of calls to the return of blas64.Implementation(), so while +// this code is in pure Go, the underlying BLAS implementation may not be. +type Implementation struct{} + +var _ lapack.Float64 = Implementation{} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func abs(a int) int { + if a < 0 { + return -a + } + return a +} + +const ( + // dlamchE is the machine epsilon. For IEEE this is 2^{-53}. + dlamchE = 1.0 / (1 << 53) + + // dlamchB is the radix of the machine (the base of the number system). + dlamchB = 2 + + // dlamchP is base * eps. + dlamchP = dlamchB * dlamchE + + // dlamchS is the "safe minimum", that is, the lowest number such that + // 1/dlamchS does not overflow, or also the smallest normal number. + // For IEEE this is 2^{-1022}. + dlamchS = 1.0 / (1 << 256) / (1 << 256) / (1 << 256) / (1 << 254) +) diff --git a/vendor/gonum.org/v1/gonum/lapack/lapack.go b/vendor/gonum.org/v1/gonum/lapack/lapack.go new file mode 100644 index 0000000000..eef14c17a4 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/lapack.go @@ -0,0 +1,213 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lapack + +import "gonum.org/v1/gonum/blas" + +// Complex128 defines the public complex128 LAPACK API supported by gonum/lapack. +type Complex128 interface{} + +// Float64 defines the public float64 LAPACK API supported by gonum/lapack. +type Float64 interface { + Dgecon(norm MatrixNorm, n int, a []float64, lda int, anorm float64, work []float64, iwork []int) float64 + Dgeev(jobvl LeftEVJob, jobvr RightEVJob, n int, a []float64, lda int, wr, wi []float64, vl []float64, ldvl int, vr []float64, ldvr int, work []float64, lwork int) (first int) + Dgels(trans blas.Transpose, m, n, nrhs int, a []float64, lda int, b []float64, ldb int, work []float64, lwork int) bool + Dgelqf(m, n int, a []float64, lda int, tau, work []float64, lwork int) + Dgeqrf(m, n int, a []float64, lda int, tau, work []float64, lwork int) + Dgesvd(jobU, jobVT SVDJob, m, n int, a []float64, lda int, s, u []float64, ldu int, vt []float64, ldvt int, work []float64, lwork int) (ok bool) + Dgetrf(m, n int, a []float64, lda int, ipiv []int) (ok bool) + Dgetri(n int, a []float64, lda int, ipiv []int, work []float64, lwork int) (ok bool) + Dgetrs(trans blas.Transpose, n, nrhs int, a []float64, lda int, ipiv []int, b []float64, ldb int) + Dggsvd3(jobU, jobV, jobQ GSVDJob, m, n, p int, a []float64, lda int, b []float64, ldb int, alpha, beta, u []float64, ldu int, v []float64, ldv int, q []float64, ldq int, work []float64, lwork int, iwork []int) (k, l int, ok bool) + Dlantr(norm MatrixNorm, uplo blas.Uplo, diag blas.Diag, m, n int, a []float64, lda int, work []float64) float64 + Dlange(norm MatrixNorm, m, n int, a []float64, lda int, work []float64) float64 + Dlansy(norm MatrixNorm, uplo blas.Uplo, n int, a []float64, lda int, work []float64) float64 + Dlapmt(forward bool, m, n int, x []float64, ldx int, k []int) + Dormqr(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) + Dormlq(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) + Dpocon(uplo blas.Uplo, n int, a []float64, lda int, anorm float64, work []float64, iwork []int) float64 + Dpotrf(ul blas.Uplo, n int, a []float64, lda int) (ok bool) + Dpotri(ul blas.Uplo, n int, a []float64, lda int) (ok bool) + Dpotrs(ul blas.Uplo, n, nrhs int, a []float64, lda int, b []float64, ldb int) + Dsyev(jobz EVJob, uplo blas.Uplo, n int, a []float64, lda int, w, work []float64, lwork int) (ok bool) + Dtrcon(norm MatrixNorm, uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int, work []float64, iwork []int) float64 + Dtrtri(uplo blas.Uplo, diag blas.Diag, n int, a []float64, lda int) (ok bool) + Dtrtrs(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, nrhs int, a []float64, lda int, b []float64, ldb int) (ok bool) +} + +// Direct specifies the direction of the multiplication for the Householder matrix. +type Direct byte + +const ( + Forward Direct = 'F' // Reflectors are right-multiplied, H_0 * H_1 * ... * H_{k-1}. + Backward Direct = 'B' // Reflectors are left-multiplied, H_{k-1} * ... * H_1 * H_0. +) + +// Sort is the sorting order. +type Sort byte + +const ( + SortIncreasing Sort = 'I' + SortDecreasing Sort = 'D' +) + +// StoreV indicates the storage direction of elementary reflectors. +type StoreV byte + +const ( + ColumnWise StoreV = 'C' // Reflector stored in a column of the matrix. + RowWise StoreV = 'R' // Reflector stored in a row of the matrix. +) + +// MatrixNorm represents the kind of matrix norm to compute. +type MatrixNorm byte + +const ( + MaxAbs MatrixNorm = 'M' // max(abs(A(i,j))) + MaxColumnSum MatrixNorm = 'O' // Maximum absolute column sum (one norm) + MaxRowSum MatrixNorm = 'I' // Maximum absolute row sum (infinity norm) + Frobenius MatrixNorm = 'F' // Frobenius norm (sqrt of sum of squares) +) + +// MatrixType represents the kind of matrix represented in the data. +type MatrixType byte + +const ( + General MatrixType = 'G' // A general dense matrix. + UpperTri MatrixType = 'U' // An upper triangular matrix. + LowerTri MatrixType = 'L' // A lower triangular matrix. +) + +// Pivot specifies the pivot type for plane rotations. +type Pivot byte + +const ( + Variable Pivot = 'V' + Top Pivot = 'T' + Bottom Pivot = 'B' +) + +// ApplyOrtho specifies which orthogonal matrix is applied in Dormbr. +type ApplyOrtho byte + +const ( + ApplyP ApplyOrtho = 'P' // Apply P or P^T. + ApplyQ ApplyOrtho = 'Q' // Apply Q or Q^T. +) + +// GenOrtho specifies which orthogonal matrix is generated in Dorgbr. +type GenOrtho byte + +const ( + GeneratePT GenOrtho = 'P' // Generate P^T. + GenerateQ GenOrtho = 'Q' // Generate Q. +) + +// SVDJob specifies the singular vector computation type for SVD. +type SVDJob byte + +const ( + SVDAll SVDJob = 'A' // Compute all columns of the orthogonal matrix U or V. + SVDStore SVDJob = 'S' // Compute the singular vectors and store them in the orthogonal matrix U or V. + SVDOverwrite SVDJob = 'O' // Compute the singular vectors and overwrite them on the input matrix A. + SVDNone SVDJob = 'N' // Do not compute singular vectors. +) + +// GSVDJob specifies the singular vector computation type for Generalized SVD. +type GSVDJob byte + +const ( + GSVDU GSVDJob = 'U' // Compute orthogonal matrix U. + GSVDV GSVDJob = 'V' // Compute orthogonal matrix V. + GSVDQ GSVDJob = 'Q' // Compute orthogonal matrix Q. + GSVDUnit GSVDJob = 'I' // Use unit-initialized matrix. + GSVDNone GSVDJob = 'N' // Do not compute orthogonal matrix. +) + +// EVComp specifies how eigenvectors are computed in Dsteqr. +type EVComp byte + +const ( + EVOrig EVComp = 'V' // Compute eigenvectors of the original symmetric matrix. + EVTridiag EVComp = 'I' // Compute eigenvectors of the tridiagonal matrix. + EVCompNone EVComp = 'N' // Do not compute eigenvectors. +) + +// EVJob specifies whether eigenvectors are computed in Dsyev. +type EVJob byte + +const ( + EVCompute EVJob = 'V' // Compute eigenvectors. + EVNone EVJob = 'N' // Do not compute eigenvectors. +) + +// LeftEVJob specifies whether left eigenvectors are computed in Dgeev. +type LeftEVJob byte + +const ( + LeftEVCompute LeftEVJob = 'V' // Compute left eigenvectors. + LeftEVNone LeftEVJob = 'N' // Do not compute left eigenvectors. +) + +// RightEVJob specifies whether right eigenvectors are computed in Dgeev. +type RightEVJob byte + +const ( + RightEVCompute RightEVJob = 'V' // Compute right eigenvectors. + RightEVNone RightEVJob = 'N' // Do not compute right eigenvectors. +) + +// BalanceJob specifies matrix balancing operation. +type BalanceJob byte + +const ( + Permute BalanceJob = 'P' + Scale BalanceJob = 'S' + PermuteScale BalanceJob = 'B' + BalanceNone BalanceJob = 'N' +) + +// SchurJob specifies whether the Schur form is computed in Dhseqr. +type SchurJob byte + +const ( + EigenvaluesOnly SchurJob = 'E' + EigenvaluesAndSchur SchurJob = 'S' +) + +// SchurComp specifies whether and how the Schur vectors are computed in Dhseqr. +type SchurComp byte + +const ( + SchurOrig SchurComp = 'V' // Compute Schur vectors of the original matrix. + SchurHess SchurComp = 'I' // Compute Schur vectors of the upper Hessenberg matrix. + SchurNone SchurComp = 'N' // Do not compute Schur vectors. +) + +// UpdateSchurComp specifies whether the matrix of Schur vectors is updated in Dtrexc. +type UpdateSchurComp byte + +const ( + UpdateSchur UpdateSchurComp = 'V' // Update the matrix of Schur vectors. + UpdateSchurNone UpdateSchurComp = 'N' // Do not update the matrix of Schur vectors. +) + +// EVSide specifies what eigenvectors are computed in Dtrevc3. +type EVSide byte + +const ( + EVRight EVSide = 'R' // Compute only right eigenvectors. + EVLeft EVSide = 'L' // Compute only left eigenvectors. + EVBoth EVSide = 'B' // Compute both right and left eigenvectors. +) + +// EVHowMany specifies which eigenvectors are computed in Dtrevc3 and how. +type EVHowMany byte + +const ( + EVAll EVHowMany = 'A' // Compute all right and/or left eigenvectors. + EVAllMulQ EVHowMany = 'B' // Compute all right and/or left eigenvectors multiplied by an input matrix. + EVSelected EVHowMany = 'S' // Compute selected right and/or left eigenvectors. +) diff --git a/vendor/gonum.org/v1/gonum/lapack/lapack64/doc.go b/vendor/gonum.org/v1/gonum/lapack/lapack64/doc.go new file mode 100644 index 0000000000..da19e3ec78 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/lapack64/doc.go @@ -0,0 +1,20 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lapack64 provides a set of convenient wrapper functions for LAPACK +// calls, as specified in the netlib standard (www.netlib.org). +// +// The native Go routines are used by default, and the Use function can be used +// to set an alternative implementation. +// +// If the type of matrix (General, Symmetric, etc.) is known and fixed, it is +// used in the wrapper signature. In many cases, however, the type of the matrix +// changes during the call to the routine, for example the matrix is symmetric on +// entry and is triangular on exit. In these cases the correct types should be checked +// in the documentation. +// +// The full set of Lapack functions is very large, and it is not clear that a +// full implementation is desirable, let alone feasible. Please open up an issue +// if there is a specific function you need and/or are willing to implement. +package lapack64 // import "gonum.org/v1/gonum/lapack/lapack64" diff --git a/vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go b/vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go new file mode 100644 index 0000000000..208ee1f43f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go @@ -0,0 +1,581 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lapack64 + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/gonum" +) + +var lapack64 lapack.Float64 = gonum.Implementation{} + +// Use sets the LAPACK float64 implementation to be used by subsequent BLAS calls. +// The default implementation is native.Implementation. +func Use(l lapack.Float64) { + lapack64 = l +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +// Potrf computes the Cholesky factorization of a. +// The factorization has the form +// A = U^T * U if a.Uplo == blas.Upper, or +// A = L * L^T if a.Uplo == blas.Lower, +// where U is an upper triangular matrix and L is lower triangular. +// The triangular matrix is returned in t, and the underlying data between +// a and t is shared. The returned bool indicates whether a is positive +// definite and the factorization could be finished. +func Potrf(a blas64.Symmetric) (t blas64.Triangular, ok bool) { + ok = lapack64.Dpotrf(a.Uplo, a.N, a.Data, max(1, a.Stride)) + t.Uplo = a.Uplo + t.N = a.N + t.Data = a.Data + t.Stride = a.Stride + t.Diag = blas.NonUnit + return +} + +// Potri computes the inverse of a real symmetric positive definite matrix A +// using its Cholesky factorization. +// +// On entry, t contains the triangular factor U or L from the Cholesky +// factorization A = U^T*U or A = L*L^T, as computed by Potrf. +// +// On return, the upper or lower triangle of the (symmetric) inverse of A is +// stored in t, overwriting the input factor U or L, and also returned in a. The +// underlying data between a and t is shared. +// +// The returned bool indicates whether the inverse was computed successfully. +func Potri(t blas64.Triangular) (a blas64.Symmetric, ok bool) { + ok = lapack64.Dpotri(t.Uplo, t.N, t.Data, max(1, t.Stride)) + a.Uplo = t.Uplo + a.N = t.N + a.Data = t.Data + a.Stride = t.Stride + return +} + +// Potrs solves a system of n linear equations A*X = B where A is an n×n +// symmetric positive definite matrix and B is an n×nrhs matrix, using the +// Cholesky factorization A = U^T*U or A = L*L^T. t contains the corresponding +// triangular factor as returned by Potrf. On entry, B contains the right-hand +// side matrix B, on return it contains the solution matrix X. +func Potrs(t blas64.Triangular, b blas64.General) { + lapack64.Dpotrs(t.Uplo, t.N, b.Cols, t.Data, max(1, t.Stride), b.Data, max(1, b.Stride)) +} + +// Gecon estimates the reciprocal of the condition number of the n×n matrix A +// given the LU decomposition of the matrix. The condition number computed may +// be based on the 1-norm or the ∞-norm. +// +// a contains the result of the LU decomposition of A as computed by Getrf. +// +// anorm is the corresponding 1-norm or ∞-norm of the original matrix A. +// +// work is a temporary data slice of length at least 4*n and Gecon will panic otherwise. +// +// iwork is a temporary data slice of length at least n and Gecon will panic otherwise. +func Gecon(norm lapack.MatrixNorm, a blas64.General, anorm float64, work []float64, iwork []int) float64 { + return lapack64.Dgecon(norm, a.Cols, a.Data, max(1, a.Stride), anorm, work, iwork) +} + +// Gels finds a minimum-norm solution based on the matrices A and B using the +// QR or LQ factorization. Gels returns false if the matrix +// A is singular, and true if this solution was successfully found. +// +// The minimization problem solved depends on the input parameters. +// +// 1. If m >= n and trans == blas.NoTrans, Gels finds X such that || A*X - B||_2 +// is minimized. +// 2. If m < n and trans == blas.NoTrans, Gels finds the minimum norm solution of +// A * X = B. +// 3. If m >= n and trans == blas.Trans, Gels finds the minimum norm solution of +// A^T * X = B. +// 4. If m < n and trans == blas.Trans, Gels finds X such that || A*X - B||_2 +// is minimized. +// Note that the least-squares solutions (cases 1 and 3) perform the minimization +// per column of B. This is not the same as finding the minimum-norm matrix. +// +// The matrix A is a general matrix of size m×n and is modified during this call. +// The input matrix B is of size max(m,n)×nrhs, and serves two purposes. On entry, +// the elements of b specify the input matrix B. B has size m×nrhs if +// trans == blas.NoTrans, and n×nrhs if trans == blas.Trans. On exit, the +// leading submatrix of b contains the solution vectors X. If trans == blas.NoTrans, +// this submatrix is of size n×nrhs, and of size m×nrhs otherwise. +// +// Work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= max(m,n) + max(m,n,nrhs), and this function will panic +// otherwise. A longer work will enable blocked algorithms to be called. +// In the special case that lwork == -1, work[0] will be set to the optimal working +// length. +func Gels(trans blas.Transpose, a blas64.General, b blas64.General, work []float64, lwork int) bool { + return lapack64.Dgels(trans, a.Rows, a.Cols, b.Cols, a.Data, max(1, a.Stride), b.Data, max(1, b.Stride), work, lwork) +} + +// Geqrf computes the QR factorization of the m×n matrix A using a blocked +// algorithm. A is modified to contain the information to construct Q and R. +// The upper triangle of a contains the matrix R. The lower triangular elements +// (not including the diagonal) contain the elementary reflectors. tau is modified +// to contain the reflector scales. tau must have length at least min(m,n), and +// this function will panic otherwise. +// +// The ith elementary reflector can be explicitly constructed by first extracting +// the +// v[j] = 0 j < i +// v[j] = 1 j == i +// v[j] = a[j*lda+i] j > i +// and computing H_i = I - tau[i] * v * v^T. +// +// The orthonormal matrix Q can be constucted from a product of these elementary +// reflectors, Q = H_0 * H_1 * ... * H_{k-1}, where k = min(m,n). +// +// Work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= m and this function will panic otherwise. +// Geqrf is a blocked QR factorization, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Geqrf, +// the optimal work length will be stored into work[0]. +func Geqrf(a blas64.General, tau, work []float64, lwork int) { + lapack64.Dgeqrf(a.Rows, a.Cols, a.Data, max(1, a.Stride), tau, work, lwork) +} + +// Gelqf computes the LQ factorization of the m×n matrix A using a blocked +// algorithm. A is modified to contain the information to construct L and Q. The +// lower triangle of a contains the matrix L. The elements above the diagonal +// and the slice tau represent the matrix Q. tau is modified to contain the +// reflector scales. tau must have length at least min(m,n), and this function +// will panic otherwise. +// +// See Geqrf for a description of the elementary reflectors and orthonormal +// matrix Q. Q is constructed as a product of these elementary reflectors, +// Q = H_{k-1} * ... * H_1 * H_0. +// +// Work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= m and this function will panic otherwise. +// Gelqf is a blocked LQ factorization, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Gelqf, +// the optimal work length will be stored into work[0]. +func Gelqf(a blas64.General, tau, work []float64, lwork int) { + lapack64.Dgelqf(a.Rows, a.Cols, a.Data, max(1, a.Stride), tau, work, lwork) +} + +// Gesvd computes the singular value decomposition of the input matrix A. +// +// The singular value decomposition is +// A = U * Sigma * V^T +// where Sigma is an m×n diagonal matrix containing the singular values of A, +// U is an m×m orthogonal matrix and V is an n×n orthogonal matrix. The first +// min(m,n) columns of U and V are the left and right singular vectors of A +// respectively. +// +// jobU and jobVT are options for computing the singular vectors. The behavior +// is as follows +// jobU == lapack.SVDAll All m columns of U are returned in u +// jobU == lapack.SVDStore The first min(m,n) columns are returned in u +// jobU == lapack.SVDOverwrite The first min(m,n) columns of U are written into a +// jobU == lapack.SVDNone The columns of U are not computed. +// The behavior is the same for jobVT and the rows of V^T. At most one of jobU +// and jobVT can equal lapack.SVDOverwrite, and Gesvd will panic otherwise. +// +// On entry, a contains the data for the m×n matrix A. During the call to Gesvd +// the data is overwritten. On exit, A contains the appropriate singular vectors +// if either job is lapack.SVDOverwrite. +// +// s is a slice of length at least min(m,n) and on exit contains the singular +// values in decreasing order. +// +// u contains the left singular vectors on exit, stored columnwise. If +// jobU == lapack.SVDAll, u is of size m×m. If jobU == lapack.SVDStore u is +// of size m×min(m,n). If jobU == lapack.SVDOverwrite or lapack.SVDNone, u is +// not used. +// +// vt contains the left singular vectors on exit, stored rowwise. If +// jobV == lapack.SVDAll, vt is of size n×m. If jobVT == lapack.SVDStore vt is +// of size min(m,n)×n. If jobVT == lapack.SVDOverwrite or lapack.SVDNone, vt is +// not used. +// +// work is a slice for storing temporary memory, and lwork is the usable size of +// the slice. lwork must be at least max(5*min(m,n), 3*min(m,n)+max(m,n)). +// If lwork == -1, instead of performing Gesvd, the optimal work length will be +// stored into work[0]. Gesvd will panic if the working memory has insufficient +// storage. +// +// Gesvd returns whether the decomposition successfully completed. +func Gesvd(jobU, jobVT lapack.SVDJob, a, u, vt blas64.General, s, work []float64, lwork int) (ok bool) { + return lapack64.Dgesvd(jobU, jobVT, a.Rows, a.Cols, a.Data, max(1, a.Stride), s, u.Data, max(1, u.Stride), vt.Data, max(1, vt.Stride), work, lwork) +} + +// Getrf computes the LU decomposition of the m×n matrix A. +// The LU decomposition is a factorization of A into +// A = P * L * U +// where P is a permutation matrix, L is a unit lower triangular matrix, and +// U is a (usually) non-unit upper triangular matrix. On exit, L and U are stored +// in place into a. +// +// ipiv is a permutation vector. It indicates that row i of the matrix was +// changed with ipiv[i]. ipiv must have length at least min(m,n), and will panic +// otherwise. ipiv is zero-indexed. +// +// Getrf is the blocked version of the algorithm. +// +// Getrf returns whether the matrix A is singular. The LU decomposition will +// be computed regardless of the singularity of A, but division by zero +// will occur if the false is returned and the result is used to solve a +// system of equations. +func Getrf(a blas64.General, ipiv []int) bool { + return lapack64.Dgetrf(a.Rows, a.Cols, a.Data, max(1, a.Stride), ipiv) +} + +// Getri computes the inverse of the matrix A using the LU factorization computed +// by Getrf. On entry, a contains the PLU decomposition of A as computed by +// Getrf and on exit contains the reciprocal of the original matrix. +// +// Getri will not perform the inversion if the matrix is singular, and returns +// a boolean indicating whether the inversion was successful. +// +// Work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= n and this function will panic otherwise. +// Getri is a blocked inversion, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Getri, +// the optimal work length will be stored into work[0]. +func Getri(a blas64.General, ipiv []int, work []float64, lwork int) (ok bool) { + return lapack64.Dgetri(a.Cols, a.Data, max(1, a.Stride), ipiv, work, lwork) +} + +// Getrs solves a system of equations using an LU factorization. +// The system of equations solved is +// A * X = B if trans == blas.Trans +// A^T * X = B if trans == blas.NoTrans +// A is a general n×n matrix with stride lda. B is a general matrix of size n×nrhs. +// +// On entry b contains the elements of the matrix B. On exit, b contains the +// elements of X, the solution to the system of equations. +// +// a and ipiv contain the LU factorization of A and the permutation indices as +// computed by Getrf. ipiv is zero-indexed. +func Getrs(trans blas.Transpose, a blas64.General, b blas64.General, ipiv []int) { + lapack64.Dgetrs(trans, a.Cols, b.Cols, a.Data, max(1, a.Stride), ipiv, b.Data, max(1, b.Stride)) +} + +// Ggsvd3 computes the generalized singular value decomposition (GSVD) +// of an m×n matrix A and p×n matrix B: +// U^T*A*Q = D1*[ 0 R ] +// +// V^T*B*Q = D2*[ 0 R ] +// where U, V and Q are orthogonal matrices. +// +// Ggsvd3 returns k and l, the dimensions of the sub-blocks. k+l +// is the effective numerical rank of the (m+p)×n matrix [ A^T B^T ]^T. +// R is a (k+l)×(k+l) nonsingular upper triangular matrix, D1 and +// D2 are m×(k+l) and p×(k+l) diagonal matrices and of the following +// structures, respectively: +// +// If m-k-l >= 0, +// +// k l +// D1 = k [ I 0 ] +// l [ 0 C ] +// m-k-l [ 0 0 ] +// +// k l +// D2 = l [ 0 S ] +// p-l [ 0 0 ] +// +// n-k-l k l +// [ 0 R ] = k [ 0 R11 R12 ] k +// l [ 0 0 R22 ] l +// +// where +// +// C = diag( alpha_k, ... , alpha_{k+l} ), +// S = diag( beta_k, ... , beta_{k+l} ), +// C^2 + S^2 = I. +// +// R is stored in +// A[0:k+l, n-k-l:n] +// on exit. +// +// If m-k-l < 0, +// +// k m-k k+l-m +// D1 = k [ I 0 0 ] +// m-k [ 0 C 0 ] +// +// k m-k k+l-m +// D2 = m-k [ 0 S 0 ] +// k+l-m [ 0 0 I ] +// p-l [ 0 0 0 ] +// +// n-k-l k m-k k+l-m +// [ 0 R ] = k [ 0 R11 R12 R13 ] +// m-k [ 0 0 R22 R23 ] +// k+l-m [ 0 0 0 R33 ] +// +// where +// C = diag( alpha_k, ... , alpha_m ), +// S = diag( beta_k, ... , beta_m ), +// C^2 + S^2 = I. +// +// R = [ R11 R12 R13 ] is stored in A[1:m, n-k-l+1:n] +// [ 0 R22 R23 ] +// and R33 is stored in +// B[m-k:l, n+m-k-l:n] on exit. +// +// Ggsvd3 computes C, S, R, and optionally the orthogonal transformation +// matrices U, V and Q. +// +// jobU, jobV and jobQ are options for computing the orthogonal matrices. The behavior +// is as follows +// jobU == lapack.GSVDU Compute orthogonal matrix U +// jobU == lapack.GSVDNone Do not compute orthogonal matrix. +// The behavior is the same for jobV and jobQ with the exception that instead of +// lapack.GSVDU these accept lapack.GSVDV and lapack.GSVDQ respectively. +// The matrices U, V and Q must be m×m, p×p and n×n respectively unless the +// relevant job parameter is lapack.GSVDNone. +// +// alpha and beta must have length n or Ggsvd3 will panic. On exit, alpha and +// beta contain the generalized singular value pairs of A and B +// alpha[0:k] = 1, +// beta[0:k] = 0, +// if m-k-l >= 0, +// alpha[k:k+l] = diag(C), +// beta[k:k+l] = diag(S), +// if m-k-l < 0, +// alpha[k:m]= C, alpha[m:k+l]= 0 +// beta[k:m] = S, beta[m:k+l] = 1. +// if k+l < n, +// alpha[k+l:n] = 0 and +// beta[k+l:n] = 0. +// +// On exit, iwork contains the permutation required to sort alpha descending. +// +// iwork must have length n, work must have length at least max(1, lwork), and +// lwork must be -1 or greater than n, otherwise Ggsvd3 will panic. If +// lwork is -1, work[0] holds the optimal lwork on return, but Ggsvd3 does +// not perform the GSVD. +func Ggsvd3(jobU, jobV, jobQ lapack.GSVDJob, a, b blas64.General, alpha, beta []float64, u, v, q blas64.General, work []float64, lwork int, iwork []int) (k, l int, ok bool) { + return lapack64.Dggsvd3(jobU, jobV, jobQ, a.Rows, a.Cols, b.Rows, a.Data, max(1, a.Stride), b.Data, max(1, b.Stride), alpha, beta, u.Data, max(1, u.Stride), v.Data, max(1, v.Stride), q.Data, max(1, q.Stride), work, lwork, iwork) +} + +// Lange computes the matrix norm of the general m×n matrix A. The input norm +// specifies the norm computed. +// lapack.MaxAbs: the maximum absolute value of an element. +// lapack.MaxColumnSum: the maximum column sum of the absolute values of the entries. +// lapack.MaxRowSum: the maximum row sum of the absolute values of the entries. +// lapack.Frobenius: the square root of the sum of the squares of the entries. +// If norm == lapack.MaxColumnSum, work must be of length n, and this function will panic otherwise. +// There are no restrictions on work for the other matrix norms. +func Lange(norm lapack.MatrixNorm, a blas64.General, work []float64) float64 { + return lapack64.Dlange(norm, a.Rows, a.Cols, a.Data, max(1, a.Stride), work) +} + +// Lansy computes the specified norm of an n×n symmetric matrix. If +// norm == lapack.MaxColumnSum or norm == lapackMaxRowSum work must have length +// at least n and this function will panic otherwise. +// There are no restrictions on work for the other matrix norms. +func Lansy(norm lapack.MatrixNorm, a blas64.Symmetric, work []float64) float64 { + return lapack64.Dlansy(norm, a.Uplo, a.N, a.Data, max(1, a.Stride), work) +} + +// Lantr computes the specified norm of an m×n trapezoidal matrix A. If +// norm == lapack.MaxColumnSum work must have length at least n and this function +// will panic otherwise. There are no restrictions on work for the other matrix norms. +func Lantr(norm lapack.MatrixNorm, a blas64.Triangular, work []float64) float64 { + return lapack64.Dlantr(norm, a.Uplo, a.Diag, a.N, a.N, a.Data, max(1, a.Stride), work) +} + +// Lapmt rearranges the columns of the m×n matrix X as specified by the +// permutation k_0, k_1, ..., k_{n-1} of the integers 0, ..., n-1. +// +// If forward is true a forward permutation is performed: +// +// X[0:m, k[j]] is moved to X[0:m, j] for j = 0, 1, ..., n-1. +// +// otherwise a backward permutation is performed: +// +// X[0:m, j] is moved to X[0:m, k[j]] for j = 0, 1, ..., n-1. +// +// k must have length n, otherwise Lapmt will panic. k is zero-indexed. +func Lapmt(forward bool, x blas64.General, k []int) { + lapack64.Dlapmt(forward, x.Rows, x.Cols, x.Data, max(1, x.Stride), k) +} + +// Ormlq multiplies the matrix C by the othogonal matrix Q defined by +// A and tau. A and tau are as returned from Gelqf. +// C = Q * C if side == blas.Left and trans == blas.NoTrans +// C = Q^T * C if side == blas.Left and trans == blas.Trans +// C = C * Q if side == blas.Right and trans == blas.NoTrans +// C = C * Q^T if side == blas.Right and trans == blas.Trans +// If side == blas.Left, A is a matrix of side k×m, and if side == blas.Right +// A is of size k×n. This uses a blocked algorithm. +// +// Work is temporary storage, and lwork specifies the usable memory length. +// At minimum, lwork >= m if side == blas.Left and lwork >= n if side == blas.Right, +// and this function will panic otherwise. +// Ormlq uses a block algorithm, but the block size is limited +// by the temporary space available. If lwork == -1, instead of performing Ormlq, +// the optimal work length will be stored into work[0]. +// +// Tau contains the Householder scales and must have length at least k, and +// this function will panic otherwise. +func Ormlq(side blas.Side, trans blas.Transpose, a blas64.General, tau []float64, c blas64.General, work []float64, lwork int) { + lapack64.Dormlq(side, trans, c.Rows, c.Cols, a.Rows, a.Data, max(1, a.Stride), tau, c.Data, max(1, c.Stride), work, lwork) +} + +// Ormqr multiplies an m×n matrix C by an orthogonal matrix Q as +// C = Q * C, if side == blas.Left and trans == blas.NoTrans, +// C = Q^T * C, if side == blas.Left and trans == blas.Trans, +// C = C * Q, if side == blas.Right and trans == blas.NoTrans, +// C = C * Q^T, if side == blas.Right and trans == blas.Trans, +// where Q is defined as the product of k elementary reflectors +// Q = H_0 * H_1 * ... * H_{k-1}. +// +// If side == blas.Left, A is an m×k matrix and 0 <= k <= m. +// If side == blas.Right, A is an n×k matrix and 0 <= k <= n. +// The ith column of A contains the vector which defines the elementary +// reflector H_i and tau[i] contains its scalar factor. tau must have length k +// and Ormqr will panic otherwise. Geqrf returns A and tau in the required +// form. +// +// work must have length at least max(1,lwork), and lwork must be at least n if +// side == blas.Left and at least m if side == blas.Right, otherwise Ormqr will +// panic. +// +// work is temporary storage, and lwork specifies the usable memory length. At +// minimum, lwork >= m if side == blas.Left and lwork >= n if side == +// blas.Right, and this function will panic otherwise. Larger values of lwork +// will generally give better performance. On return, work[0] will contain the +// optimal value of lwork. +// +// If lwork is -1, instead of performing Ormqr, the optimal workspace size will +// be stored into work[0]. +func Ormqr(side blas.Side, trans blas.Transpose, a blas64.General, tau []float64, c blas64.General, work []float64, lwork int) { + lapack64.Dormqr(side, trans, c.Rows, c.Cols, a.Cols, a.Data, max(1, a.Stride), tau, c.Data, max(1, c.Stride), work, lwork) +} + +// Pocon estimates the reciprocal of the condition number of a positive-definite +// matrix A given the Cholesky decmposition of A. The condition number computed +// is based on the 1-norm and the ∞-norm. +// +// anorm is the 1-norm and the ∞-norm of the original matrix A. +// +// work is a temporary data slice of length at least 3*n and Pocon will panic otherwise. +// +// iwork is a temporary data slice of length at least n and Pocon will panic otherwise. +func Pocon(a blas64.Symmetric, anorm float64, work []float64, iwork []int) float64 { + return lapack64.Dpocon(a.Uplo, a.N, a.Data, max(1, a.Stride), anorm, work, iwork) +} + +// Syev computes all eigenvalues and, optionally, the eigenvectors of a real +// symmetric matrix A. +// +// w contains the eigenvalues in ascending order upon return. w must have length +// at least n, and Syev will panic otherwise. +// +// On entry, a contains the elements of the symmetric matrix A in the triangular +// portion specified by uplo. If jobz == lapack.EVCompute, a contains the +// orthonormal eigenvectors of A on exit, otherwise jobz must be lapack.EVNone +// and on exit the specified triangular region is overwritten. +// +// Work is temporary storage, and lwork specifies the usable memory length. At minimum, +// lwork >= 3*n-1, and Syev will panic otherwise. The amount of blocking is +// limited by the usable length. If lwork == -1, instead of computing Syev the +// optimal work length is stored into work[0]. +func Syev(jobz lapack.EVJob, a blas64.Symmetric, w, work []float64, lwork int) (ok bool) { + return lapack64.Dsyev(jobz, a.Uplo, a.N, a.Data, max(1, a.Stride), w, work, lwork) +} + +// Trcon estimates the reciprocal of the condition number of a triangular matrix A. +// The condition number computed may be based on the 1-norm or the ∞-norm. +// +// work is a temporary data slice of length at least 3*n and Trcon will panic otherwise. +// +// iwork is a temporary data slice of length at least n and Trcon will panic otherwise. +func Trcon(norm lapack.MatrixNorm, a blas64.Triangular, work []float64, iwork []int) float64 { + return lapack64.Dtrcon(norm, a.Uplo, a.Diag, a.N, a.Data, max(1, a.Stride), work, iwork) +} + +// Trtri computes the inverse of a triangular matrix, storing the result in place +// into a. +// +// Trtri will not perform the inversion if the matrix is singular, and returns +// a boolean indicating whether the inversion was successful. +func Trtri(a blas64.Triangular) (ok bool) { + return lapack64.Dtrtri(a.Uplo, a.Diag, a.N, a.Data, max(1, a.Stride)) +} + +// Trtrs solves a triangular system of the form A * X = B or A^T * X = B. Trtrs +// returns whether the solve completed successfully. If A is singular, no solve is performed. +func Trtrs(trans blas.Transpose, a blas64.Triangular, b blas64.General) (ok bool) { + return lapack64.Dtrtrs(a.Uplo, trans, a.Diag, a.N, b.Cols, a.Data, max(1, a.Stride), b.Data, max(1, b.Stride)) +} + +// Geev computes the eigenvalues and, optionally, the left and/or right +// eigenvectors for an n×n real nonsymmetric matrix A. +// +// The right eigenvector v_j of A corresponding to an eigenvalue λ_j +// is defined by +// A v_j = λ_j v_j, +// and the left eigenvector u_j corresponding to an eigenvalue λ_j is defined by +// u_j^H A = λ_j u_j^H, +// where u_j^H is the conjugate transpose of u_j. +// +// On return, A will be overwritten and the left and right eigenvectors will be +// stored, respectively, in the columns of the n×n matrices VL and VR in the +// same order as their eigenvalues. If the j-th eigenvalue is real, then +// u_j = VL[:,j], +// v_j = VR[:,j], +// and if it is not real, then j and j+1 form a complex conjugate pair and the +// eigenvectors can be recovered as +// u_j = VL[:,j] + i*VL[:,j+1], +// u_{j+1} = VL[:,j] - i*VL[:,j+1], +// v_j = VR[:,j] + i*VR[:,j+1], +// v_{j+1} = VR[:,j] - i*VR[:,j+1], +// where i is the imaginary unit. The computed eigenvectors are normalized to +// have Euclidean norm equal to 1 and largest component real. +// +// Left eigenvectors will be computed only if jobvl == lapack.LeftEVCompute, +// otherwise jobvl must be lapack.LeftEVNone. +// Right eigenvectors will be computed only if jobvr == lapack.RightEVCompute, +// otherwise jobvr must be lapack.RightEVNone. +// For other values of jobvl and jobvr Geev will panic. +// +// On return, wr and wi will contain the real and imaginary parts, respectively, +// of the computed eigenvalues. Complex conjugate pairs of eigenvalues appear +// consecutively with the eigenvalue having the positive imaginary part first. +// wr and wi must have length n, and Geev will panic otherwise. +// +// work must have length at least lwork and lwork must be at least max(1,4*n) if +// the left or right eigenvectors are computed, and at least max(1,3*n) if no +// eigenvectors are computed. For good performance, lwork must generally be +// larger. On return, optimal value of lwork will be stored in work[0]. +// +// If lwork == -1, instead of performing Geev, the function only calculates the +// optimal vaule of lwork and stores it into work[0]. +// +// On return, first will be the index of the first valid eigenvalue. +// If first == 0, all eigenvalues and eigenvectors have been computed. +// If first is positive, Geev failed to compute all the eigenvalues, no +// eigenvectors have been computed and wr[first:] and wi[first:] contain those +// eigenvalues which have converged. +func Geev(jobvl lapack.LeftEVJob, jobvr lapack.RightEVJob, a blas64.General, wr, wi []float64, vl, vr blas64.General, work []float64, lwork int) (first int) { + n := a.Rows + if a.Cols != n { + panic("lapack64: matrix not square") + } + if jobvl == lapack.LeftEVCompute && (vl.Rows != n || vl.Cols != n) { + panic("lapack64: bad size of VL") + } + if jobvr == lapack.RightEVCompute && (vr.Rows != n || vr.Cols != n) { + panic("lapack64: bad size of VR") + } + return lapack64.Dgeev(jobvl, jobvr, n, a.Data, max(1, a.Stride), wr, wi, vl.Data, max(1, vl.Stride), vr.Data, max(1, vr.Stride), work, lwork) +} diff --git a/vendor/gonum.org/v1/gonum/mat/README.md b/vendor/gonum.org/v1/gonum/mat/README.md new file mode 100644 index 0000000000..0f77e470ed --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/README.md @@ -0,0 +1,3 @@ +# Gonum matrix [![GoDoc](https://godoc.org/gonum.org/v1/gonum/mat?status.svg)](https://godoc.org/gonum.org/v1/gonum/mat) + +Package mat is a matrix package for the Go language. diff --git a/vendor/gonum.org/v1/gonum/mat/band.go b/vendor/gonum.org/v1/gonum/mat/band.go new file mode 100644 index 0000000000..72ebefddd1 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/band.go @@ -0,0 +1,263 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas/blas64" +) + +var ( + bandDense *BandDense + _ Matrix = bandDense + _ Banded = bandDense + _ RawBander = bandDense + + _ NonZeroDoer = bandDense + _ RowNonZeroDoer = bandDense + _ ColNonZeroDoer = bandDense +) + +// BandDense represents a band matrix in dense storage format. +type BandDense struct { + mat blas64.Band +} + +// Banded is a band matrix representation. +type Banded interface { + Matrix + // Bandwidth returns the lower and upper bandwidth values for + // the matrix. The total bandwidth of the matrix is kl+ku+1. + Bandwidth() (kl, ku int) + + // TBand is the equivalent of the T() method in the Matrix + // interface but guarantees the transpose is of banded type. + TBand() Banded +} + +// A RawBander can return a blas64.Band representation of the receiver. +// Changes to the blas64.Band.Data slice will be reflected in the original +// matrix, changes to the Rows, Cols, KL, KU and Stride fields will not. +type RawBander interface { + RawBand() blas64.Band +} + +// A MutableBanded can set elements of a band matrix. +type MutableBanded interface { + Banded + SetBand(i, j int, v float64) +} + +var ( + _ Matrix = TransposeBand{} + _ Banded = TransposeBand{} + _ UntransposeBander = TransposeBand{} +) + +// TransposeBand is a type for performing an implicit transpose of a band +// matrix. It implements the Banded interface, returning values from the +// transpose of the matrix within. +type TransposeBand struct { + Banded Banded +} + +// At returns the value of the element at row i and column j of the transposed +// matrix, that is, row j and column i of the Banded field. +func (t TransposeBand) At(i, j int) float64 { + return t.Banded.At(j, i) +} + +// Dims returns the dimensions of the transposed matrix. +func (t TransposeBand) Dims() (r, c int) { + c, r = t.Banded.Dims() + return r, c +} + +// T performs an implicit transpose by returning the Banded field. +func (t TransposeBand) T() Matrix { + return t.Banded +} + +// Bandwidth returns the lower and upper bandwidth values for +// the transposed matrix. +func (t TransposeBand) Bandwidth() (kl, ku int) { + kl, ku = t.Banded.Bandwidth() + return ku, kl +} + +// TBand performs an implicit transpose by returning the Banded field. +func (t TransposeBand) TBand() Banded { + return t.Banded +} + +// Untranspose returns the Banded field. +func (t TransposeBand) Untranspose() Matrix { + return t.Banded +} + +// UntransposeBand returns the Banded field. +func (t TransposeBand) UntransposeBand() Banded { + return t.Banded +} + +// NewBandDense creates a new Band matrix with r rows and c columns. If data == nil, +// a new slice is allocated for the backing slice. If len(data) == min(r, c+kl)*(kl+ku+1), +// data is used as the backing slice, and changes to the elements of the returned +// BandDense will be reflected in data. If neither of these is true, NewBandDense +// will panic. kl must be at least zero and less r, and ku must be at least zero and +// less than c, otherwise NewBandDense will panic. +// NewBandDense will panic if either r or c is zero. +// +// The data must be arranged in row-major order constructed by removing the zeros +// from the rows outside the band and aligning the diagonals. For example, the matrix +// 1 2 3 0 0 0 +// 4 5 6 7 0 0 +// 0 8 9 10 11 0 +// 0 0 12 13 14 15 +// 0 0 0 16 17 18 +// 0 0 0 0 19 20 +// becomes (* entries are never accessed) +// * 1 2 3 +// 4 5 6 7 +// 8 9 10 11 +// 12 13 14 15 +// 16 17 18 * +// 19 20 * * +// which is passed to NewBandDense as []float64{*, 1, 2, 3, 4, ...} with kl=1 and ku=2. +// Only the values in the band portion of the matrix are used. +func NewBandDense(r, c, kl, ku int, data []float64) *BandDense { + if r <= 0 || c <= 0 || kl < 0 || ku < 0 { + if r == 0 || c == 0 { + panic(ErrZeroLength) + } + panic("mat: negative dimension") + } + if kl+1 > r || ku+1 > c { + panic("mat: band out of range") + } + bc := kl + ku + 1 + if data != nil && len(data) != min(r, c+kl)*bc { + panic(ErrShape) + } + if data == nil { + data = make([]float64, min(r, c+kl)*bc) + } + return &BandDense{ + mat: blas64.Band{ + Rows: r, + Cols: c, + KL: kl, + KU: ku, + Stride: bc, + Data: data, + }, + } +} + +// NewDiagonalRect is a convenience function that returns a diagonal matrix represented by a +// BandDense. The length of data must be min(r, c) otherwise NewDiagonalRect will panic. +func NewDiagonalRect(r, c int, data []float64) *BandDense { + return NewBandDense(r, c, 0, 0, data) +} + +// Dims returns the number of rows and columns in the matrix. +func (b *BandDense) Dims() (r, c int) { + return b.mat.Rows, b.mat.Cols +} + +// Bandwidth returns the upper and lower bandwidths of the matrix. +func (b *BandDense) Bandwidth() (kl, ku int) { + return b.mat.KL, b.mat.KU +} + +// T performs an implicit transpose by returning the receiver inside a Transpose. +func (b *BandDense) T() Matrix { + return Transpose{b} +} + +// TBand performs an implicit transpose by returning the receiver inside a TransposeBand. +func (b *BandDense) TBand() Banded { + return TransposeBand{b} +} + +// RawBand returns the underlying blas64.Band used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in returned blas64.Band. +func (b *BandDense) RawBand() blas64.Band { + return b.mat +} + +// SetRawBand sets the underlying blas64.Band used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in the input. +func (b *BandDense) SetRawBand(mat blas64.Band) { + b.mat = mat +} + +// DiagView returns the diagonal as a matrix backed by the original data. +func (b *BandDense) DiagView() Diagonal { + n := min(b.mat.Rows, b.mat.Cols) + return &DiagDense{ + mat: blas64.Vector{ + N: n, + Inc: b.mat.Stride, + Data: b.mat.Data[b.mat.KL : (n-1)*b.mat.Stride+b.mat.KL+1], + }, + } +} + +// DoNonZero calls the function fn for each of the non-zero elements of b. The function fn +// takes a row/column index and the element value of b at (i, j). +func (b *BandDense) DoNonZero(fn func(i, j int, v float64)) { + for i := 0; i < min(b.mat.Rows, b.mat.Cols+b.mat.KL); i++ { + for j := max(0, i-b.mat.KL); j < min(b.mat.Cols, i+b.mat.KU+1); j++ { + v := b.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + } +} + +// DoRowNonZero calls the function fn for each of the non-zero elements of row i of b. The function fn +// takes a row/column index and the element value of b at (i, j). +func (b *BandDense) DoRowNonZero(i int, fn func(i, j int, v float64)) { + if i < 0 || b.mat.Rows <= i { + panic(ErrRowAccess) + } + for j := max(0, i-b.mat.KL); j < min(b.mat.Cols, i+b.mat.KU+1); j++ { + v := b.at(i, j) + if v != 0 { + fn(i, j, v) + } + } +} + +// DoColNonZero calls the function fn for each of the non-zero elements of column j of b. The function fn +// takes a row/column index and the element value of b at (i, j). +func (b *BandDense) DoColNonZero(j int, fn func(i, j int, v float64)) { + if j < 0 || b.mat.Cols <= j { + panic(ErrColAccess) + } + for i := 0; i < min(b.mat.Rows, b.mat.Cols+b.mat.KL); i++ { + if i-b.mat.KL <= j && j < i+b.mat.KU+1 { + v := b.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + } +} + +// Zero sets all of the matrix elements to zero. +func (b *BandDense) Zero() { + m := b.mat.Rows + kL := b.mat.KL + nCol := b.mat.KU + 1 + kL + for i := 0; i < m; i++ { + l := max(0, kL-i) + u := min(nCol, m+kL-i) + zero(b.mat.Data[i*b.mat.Stride+l : i*b.mat.Stride+u]) + } +} diff --git a/vendor/gonum.org/v1/gonum/mat/cdense.go b/vendor/gonum.org/v1/gonum/mat/cdense.go new file mode 100644 index 0000000000..9c29d1afd1 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/cdense.go @@ -0,0 +1,168 @@ +// Copyright ©2019 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import "gonum.org/v1/gonum/blas/cblas128" + +// Dense is a dense matrix representation with complex data. +type CDense struct { + mat cblas128.General + + capRows, capCols int +} + +// Dims returns the number of rows and columns in the matrix. +func (m *CDense) Dims() (r, c int) { + return m.mat.Rows, m.mat.Cols +} + +// H performs an implicit conjugate transpose by returning the receiver inside a +// Conjugate. +func (m *CDense) H() CMatrix { + return Conjugate{m} +} + +// NewCDense creates a new complex Dense matrix with r rows and c columns. +// If data == nil, a new slice is allocated for the backing slice. +// If len(data) == r*c, data is used as the backing slice, and changes to the +// elements of the returned CDense will be reflected in data. +// If neither of these is true, NewCDense will panic. +// NewCDense will panic if either r or c is zero. +// +// The data must be arranged in row-major order, i.e. the (i*c + j)-th +// element in the data slice is the {i, j}-th element in the matrix. +func NewCDense(r, c int, data []complex128) *CDense { + if r <= 0 || c <= 0 { + if r == 0 || c == 0 { + panic(ErrZeroLength) + } + panic("mat: negative dimension") + } + if data != nil && r*c != len(data) { + panic(ErrShape) + } + if data == nil { + data = make([]complex128, r*c) + } + return &CDense{ + mat: cblas128.General{ + Rows: r, + Cols: c, + Stride: c, + Data: data, + }, + capRows: r, + capCols: c, + } +} + +// reuseAs resizes an empty matrix to a r×c matrix, +// or checks that a non-empty matrix is r×c. +// +// reuseAs must be kept in sync with reuseAsZeroed. +func (m *CDense) reuseAs(r, c int) { + if m.mat.Rows > m.capRows || m.mat.Cols > m.capCols { + // Panic as a string, not a mat.Error. + panic("mat: caps not correctly set") + } + if r == 0 || c == 0 { + panic(ErrZeroLength) + } + if m.IsZero() { + m.mat = cblas128.General{ + Rows: r, + Cols: c, + Stride: c, + Data: useC(m.mat.Data, r*c), + } + m.capRows = r + m.capCols = c + return + } + if r != m.mat.Rows || c != m.mat.Cols { + panic(ErrShape) + } +} + +func (m *CDense) reuseAsZeroed(r, c int) { + // This must be kept in-sync with reuseAs. + if m.mat.Rows > m.capRows || m.mat.Cols > m.capCols { + // Panic as a string, not a mat.Error. + panic("mat: caps not correctly set") + } + if r == 0 || c == 0 { + panic(ErrZeroLength) + } + if m.IsZero() { + m.mat = cblas128.General{ + Rows: r, + Cols: c, + Stride: c, + Data: useZeroedC(m.mat.Data, r*c), + } + m.capRows = r + m.capCols = c + return + } + if r != m.mat.Rows || c != m.mat.Cols { + panic(ErrShape) + } + m.Zero() +} + +// Reset zeros the dimensions of the matrix so that it can be reused as the +// receiver of a dimensionally restricted operation. +// +// See the Reseter interface for more information. +func (m *CDense) Reset() { + // Row, Cols and Stride must be zeroed in unison. + m.mat.Rows, m.mat.Cols, m.mat.Stride = 0, 0, 0 + m.capRows, m.capCols = 0, 0 + m.mat.Data = m.mat.Data[:0] +} + +// IsZero returns whether the receiver is zero-sized. Zero-sized matrices can be the +// receiver for size-restricted operations. CDense matrices can be zeroed using Reset. +func (m *CDense) IsZero() bool { + // It must be the case that m.Dims() returns + // zeros in this case. See comment in Reset(). + return m.mat.Stride == 0 +} + +// Zero sets all of the matrix elements to zero. +func (m *CDense) Zero() { + r := m.mat.Rows + c := m.mat.Cols + for i := 0; i < r; i++ { + zeroC(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]) + } +} + +// Copy makes a copy of elements of a into the receiver. It is similar to the +// built-in copy; it copies as much as the overlap between the two matrices and +// returns the number of rows and columns it copied. If a aliases the receiver +// and is a transposed Dense or VecDense, with a non-unitary increment, Copy will +// panic. +// +// See the Copier interface for more information. +func (m *CDense) Copy(a CMatrix) (r, c int) { + r, c = a.Dims() + if a == m { + return r, c + } + r = min(r, m.mat.Rows) + c = min(c, m.mat.Cols) + if r == 0 || c == 0 { + return 0, 0 + } + // TODO(btracey): Check for overlap when complex version exists. + // TODO(btracey): Add fast-paths. + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + m.set(i, j, a.At(i, j)) + } + } + return r, c +} diff --git a/vendor/gonum.org/v1/gonum/mat/cholesky.go b/vendor/gonum.org/v1/gonum/mat/cholesky.go new file mode 100644 index 0000000000..bee438538f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/cholesky.go @@ -0,0 +1,673 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack/lapack64" +) + +const ( + badTriangle = "mat: invalid triangle" + badCholesky = "mat: invalid Cholesky factorization" +) + +var ( + _ Matrix = (*Cholesky)(nil) + _ Symmetric = (*Cholesky)(nil) +) + +// Cholesky is a symmetric positive definite matrix represented by its +// Cholesky decomposition. +// +// The decomposition can be constructed using the Factorize method. The +// factorization itself can be extracted using the UTo or LTo methods, and the +// original symmetric matrix can be recovered with ToSym. +// +// Note that this matrix representation is useful for certain operations, in +// particular finding solutions to linear equations. It is very inefficient +// at other operations, in particular At is slow. +// +// Cholesky methods may only be called on a value that has been successfully +// initialized by a call to Factorize that has returned true. Calls to methods +// of an unsuccessful Cholesky factorization will panic. +type Cholesky struct { + // The chol pointer must never be retained as a pointer outside the Cholesky + // struct, either by returning chol outside the struct or by setting it to + // a pointer coming from outside. The same prohibition applies to the data + // slice within chol. + chol *TriDense + cond float64 +} + +// updateCond updates the condition number of the Cholesky decomposition. If +// norm > 0, then that norm is used as the norm of the original matrix A, otherwise +// the norm is estimated from the decomposition. +func (c *Cholesky) updateCond(norm float64) { + n := c.chol.mat.N + work := getFloats(3*n, false) + defer putFloats(work) + if norm < 0 { + // This is an approximation. By the definition of a norm, + // |AB| <= |A| |B|. + // Since A = U^T*U, we get for the condition number κ that + // κ(A) := |A| |A^-1| = |U^T*U| |A^-1| <= |U^T| |U| |A^-1|, + // so this will overestimate the condition number somewhat. + // The norm of the original factorized matrix cannot be stored + // because of update possibilities. + unorm := lapack64.Lantr(CondNorm, c.chol.mat, work) + lnorm := lapack64.Lantr(CondNormTrans, c.chol.mat, work) + norm = unorm * lnorm + } + sym := c.chol.asSymBlas() + iwork := getInts(n, false) + v := lapack64.Pocon(sym, norm, work, iwork) + putInts(iwork) + c.cond = 1 / v +} + +// Dims returns the dimensions of the matrix. +func (ch *Cholesky) Dims() (r, c int) { + if !ch.valid() { + panic(badCholesky) + } + r, c = ch.chol.Dims() + return r, c +} + +// At returns the element at row i, column j. +func (c *Cholesky) At(i, j int) float64 { + if !c.valid() { + panic(badCholesky) + } + n := c.Symmetric() + if uint(i) >= uint(n) { + panic(ErrRowAccess) + } + if uint(j) >= uint(n) { + panic(ErrColAccess) + } + + var val float64 + for k := 0; k <= min(i, j); k++ { + val += c.chol.at(k, i) * c.chol.at(k, j) + } + return val +} + +// T returns the the receiver, the transpose of a symmetric matrix. +func (c *Cholesky) T() Matrix { + return c +} + +// Symmetric implements the Symmetric interface and returns the number of rows +// in the matrix (this is also the number of columns). +func (c *Cholesky) Symmetric() int { + r, _ := c.chol.Dims() + return r +} + +// Cond returns the condition number of the factorized matrix. +func (c *Cholesky) Cond() float64 { + if !c.valid() { + panic(badCholesky) + } + return c.cond +} + +// Factorize calculates the Cholesky decomposition of the matrix A and returns +// whether the matrix is positive definite. If Factorize returns false, the +// factorization must not be used. +func (c *Cholesky) Factorize(a Symmetric) (ok bool) { + n := a.Symmetric() + if c.chol == nil { + c.chol = NewTriDense(n, Upper, nil) + } else { + c.chol = NewTriDense(n, Upper, use(c.chol.mat.Data, n*n)) + } + copySymIntoTriangle(c.chol, a) + + sym := c.chol.asSymBlas() + work := getFloats(c.chol.mat.N, false) + norm := lapack64.Lansy(CondNorm, sym, work) + putFloats(work) + _, ok = lapack64.Potrf(sym) + if ok { + c.updateCond(norm) + } else { + c.Reset() + } + return ok +} + +// Reset resets the factorization so that it can be reused as the receiver of a +// dimensionally restricted operation. +func (c *Cholesky) Reset() { + if c.chol != nil { + c.chol.Reset() + } + c.cond = math.Inf(1) +} + +// SetFromU sets the Cholesky decomposition from the given triangular matrix. +// SetFromU panics if t is not upper triangular. Note that t is copied into, +// not stored inside, the receiver. +func (c *Cholesky) SetFromU(t *TriDense) { + n, kind := t.Triangle() + if kind != Upper { + panic("cholesky: matrix must be upper triangular") + } + if c.chol == nil { + c.chol = NewTriDense(n, Upper, nil) + } else { + c.chol = NewTriDense(n, Upper, use(c.chol.mat.Data, n*n)) + } + c.chol.Copy(t) + c.updateCond(-1) +} + +// Clone makes a copy of the input Cholesky into the receiver, overwriting the +// previous value of the receiver. Clone does not place any restrictions on receiver +// shape. Clone panics if the input Cholesky is not the result of a valid decomposition. +func (c *Cholesky) Clone(chol *Cholesky) { + if !chol.valid() { + panic(badCholesky) + } + n := chol.Symmetric() + if c.chol == nil { + c.chol = NewTriDense(n, Upper, nil) + } else { + c.chol = NewTriDense(n, Upper, use(c.chol.mat.Data, n*n)) + } + c.chol.Copy(chol.chol) + c.cond = chol.cond +} + +// Det returns the determinant of the matrix that has been factorized. +func (c *Cholesky) Det() float64 { + if !c.valid() { + panic(badCholesky) + } + return math.Exp(c.LogDet()) +} + +// LogDet returns the log of the determinant of the matrix that has been factorized. +func (c *Cholesky) LogDet() float64 { + if !c.valid() { + panic(badCholesky) + } + var det float64 + for i := 0; i < c.chol.mat.N; i++ { + det += 2 * math.Log(c.chol.mat.Data[i*c.chol.mat.Stride+i]) + } + return det +} + +// SolveTo finds the matrix X that solves A * X = B where A is represented +// by the Cholesky decomposition. The result is stored in-place into dst. +func (c *Cholesky) SolveTo(dst *Dense, b Matrix) error { + if !c.valid() { + panic(badCholesky) + } + n := c.chol.mat.N + bm, bn := b.Dims() + if n != bm { + panic(ErrShape) + } + + dst.reuseAs(bm, bn) + if b != dst { + dst.Copy(b) + } + lapack64.Potrs(c.chol.mat, dst.mat) + if c.cond > ConditionTolerance { + return Condition(c.cond) + } + return nil +} + +// SolveCholTo finds the matrix X that solves A * X = B where A and B are represented +// by their Cholesky decompositions a and b. The result is stored in-place into +// dst. +func (a *Cholesky) SolveCholTo(dst *Dense, b *Cholesky) error { + if !a.valid() || !b.valid() { + panic(badCholesky) + } + bn := b.chol.mat.N + if a.chol.mat.N != bn { + panic(ErrShape) + } + + dst.reuseAsZeroed(bn, bn) + dst.Copy(b.chol.T()) + blas64.Trsm(blas.Left, blas.Trans, 1, a.chol.mat, dst.mat) + blas64.Trsm(blas.Left, blas.NoTrans, 1, a.chol.mat, dst.mat) + blas64.Trmm(blas.Right, blas.NoTrans, 1, b.chol.mat, dst.mat) + if a.cond > ConditionTolerance { + return Condition(a.cond) + } + return nil +} + +// SolveVecTo finds the vector X that solves A * x = b where A is represented +// by the Cholesky decomposition. The result is stored in-place into +// dst. +func (c *Cholesky) SolveVecTo(dst *VecDense, b Vector) error { + if !c.valid() { + panic(badCholesky) + } + n := c.chol.mat.N + if br, bc := b.Dims(); br != n || bc != 1 { + panic(ErrShape) + } + switch rv := b.(type) { + default: + dst.reuseAs(n) + return c.SolveTo(dst.asDense(), b) + case RawVectorer: + bmat := rv.RawVector() + if dst != b { + dst.checkOverlap(bmat) + } + dst.reuseAs(n) + if dst != b { + dst.CopyVec(b) + } + lapack64.Potrs(c.chol.mat, dst.asGeneral()) + if c.cond > ConditionTolerance { + return Condition(c.cond) + } + return nil + } +} + +// RawU returns the Triangular matrix used to store the Cholesky decomposition of +// the original matrix A. The returned matrix should not be modified. If it is +// modified, the decomposition is invalid and should not be used. +func (c *Cholesky) RawU() Triangular { + return c.chol +} + +// UTo extracts the n×n upper triangular matrix U from a Cholesky +// decomposition into dst and returns the result. If dst is nil a new +// TriDense is allocated. +// A = U^T * U. +func (c *Cholesky) UTo(dst *TriDense) *TriDense { + if !c.valid() { + panic(badCholesky) + } + n := c.chol.mat.N + if dst == nil { + dst = NewTriDense(n, Upper, make([]float64, n*n)) + } else { + dst.reuseAs(n, Upper) + } + dst.Copy(c.chol) + return dst +} + +// LTo extracts the n×n lower triangular matrix L from a Cholesky +// decomposition into dst and returns the result. If dst is nil a new +// TriDense is allocated. +// A = L * L^T. +func (c *Cholesky) LTo(dst *TriDense) *TriDense { + if !c.valid() { + panic(badCholesky) + } + n := c.chol.mat.N + if dst == nil { + dst = NewTriDense(n, Lower, make([]float64, n*n)) + } else { + dst.reuseAs(n, Lower) + } + dst.Copy(c.chol.TTri()) + return dst +} + +// ToSym reconstructs the original positive definite matrix given its +// Cholesky decomposition into dst and returns the result. If dst is nil +// a new SymDense is allocated. +func (c *Cholesky) ToSym(dst *SymDense) *SymDense { + if !c.valid() { + panic(badCholesky) + } + n := c.chol.mat.N + if dst == nil { + dst = NewSymDense(n, nil) + } else { + dst.reuseAs(n) + } + // Create a TriDense representing the Cholesky factor U with dst's + // backing slice. + // Operations on u are reflected in s. + u := &TriDense{ + mat: blas64.Triangular{ + Uplo: blas.Upper, + Diag: blas.NonUnit, + N: n, + Data: dst.mat.Data, + Stride: dst.mat.Stride, + }, + cap: n, + } + u.Copy(c.chol) + // Compute the product U^T*U using the algorithm from LAPACK/TESTING/LIN/dpot01.f + a := u.mat.Data + lda := u.mat.Stride + bi := blas64.Implementation() + for k := n - 1; k >= 0; k-- { + a[k*lda+k] = bi.Ddot(k+1, a[k:], lda, a[k:], lda) + if k > 0 { + bi.Dtrmv(blas.Upper, blas.Trans, blas.NonUnit, k, a, lda, a[k:], lda) + } + } + return dst +} + +// InverseTo computes the inverse of the matrix represented by its Cholesky +// factorization and stores the result into s. If the factorized +// matrix is ill-conditioned, a Condition error will be returned. +// Note that matrix inversion is numerically unstable, and should generally be +// avoided where possible, for example by using the Solve routines. +func (c *Cholesky) InverseTo(s *SymDense) error { + if !c.valid() { + panic(badCholesky) + } + s.reuseAs(c.chol.mat.N) + // Create a TriDense representing the Cholesky factor U with the backing + // slice from s. + // Operations on u are reflected in s. + u := &TriDense{ + mat: blas64.Triangular{ + Uplo: blas.Upper, + Diag: blas.NonUnit, + N: s.mat.N, + Data: s.mat.Data, + Stride: s.mat.Stride, + }, + cap: s.mat.N, + } + u.Copy(c.chol) + + _, ok := lapack64.Potri(u.mat) + if !ok { + return Condition(math.Inf(1)) + } + if c.cond > ConditionTolerance { + return Condition(c.cond) + } + return nil +} + +// Scale multiplies the original matrix A by a positive constant using +// its Cholesky decomposition, storing the result in-place into the receiver. +// That is, if the original Cholesky factorization is +// U^T * U = A +// the updated factorization is +// U'^T * U' = f A = A' +// Scale panics if the constant is non-positive, or if the receiver is non-zero +// and is of a different size from the input. +func (c *Cholesky) Scale(f float64, orig *Cholesky) { + if !orig.valid() { + panic(badCholesky) + } + if f <= 0 { + panic("cholesky: scaling by a non-positive constant") + } + n := orig.Symmetric() + if c.chol == nil { + c.chol = NewTriDense(n, Upper, nil) + } else if c.chol.mat.N != n { + panic(ErrShape) + } + c.chol.ScaleTri(math.Sqrt(f), orig.chol) + c.cond = orig.cond // Scaling by a positive constant does not change the condition number. +} + +// ExtendVecSym computes the Cholesky decomposition of the original matrix A, +// whose Cholesky decomposition is in a, extended by a the n×1 vector v according to +// [A w] +// [w' k] +// where k = v[n-1] and w = v[:n-1]. The result is stored into the receiver. +// In order for the updated matrix to be positive definite, it must be the case +// that k > w' A^-1 w. If this condition does not hold then ExtendVecSym will +// return false and the receiver will not be updated. +// +// ExtendVecSym will panic if v.Len() != a.Symmetric()+1 or if a does not contain +// a valid decomposition. +func (c *Cholesky) ExtendVecSym(a *Cholesky, v Vector) (ok bool) { + n := a.Symmetric() + + if v.Len() != n+1 { + panic(badSliceLength) + } + if !a.valid() { + panic(badCholesky) + } + + // The algorithm is commented here, but see also + // https://math.stackexchange.com/questions/955874/cholesky-factor-when-adding-a-row-and-column-to-already-factorized-matrix + // We have A and want to compute the Cholesky of + // [A w] + // [w' k] + // We want + // [U c] + // [0 d] + // to be the updated Cholesky, and so it must be that + // [A w] = [U' 0] [U c] + // [w' k] [c' d] [0 d] + // Thus, we need + // 1) A = U'U (true by the original decomposition being valid), + // 2) U' * c = w => c = U'^-1 w + // 3) c'*c + d'*d = k => d = sqrt(k-c'*c) + + // First, compute c = U'^-1 a + // TODO(btracey): Replace this with CopyVec when issue 167 is fixed. + w := NewVecDense(n, nil) + for i := 0; i < n; i++ { + w.SetVec(i, v.At(i, 0)) + } + k := v.At(n, 0) + + var t VecDense + t.SolveVec(a.chol.T(), w) + + dot := Dot(&t, &t) + if dot >= k { + return false + } + d := math.Sqrt(k - dot) + + newU := NewTriDense(n+1, Upper, nil) + newU.Copy(a.chol) + for i := 0; i < n; i++ { + newU.SetTri(i, n, t.At(i, 0)) + } + newU.SetTri(n, n, d) + c.chol = newU + c.updateCond(-1) + return true +} + +// SymRankOne performs a rank-1 update of the original matrix A and refactorizes +// its Cholesky factorization, storing the result into the receiver. That is, if +// in the original Cholesky factorization +// U^T * U = A, +// in the updated factorization +// U'^T * U' = A + alpha * x * x^T = A'. +// +// Note that when alpha is negative, the updating problem may be ill-conditioned +// and the results may be inaccurate, or the updated matrix A' may not be +// positive definite and not have a Cholesky factorization. SymRankOne returns +// whether the updated matrix A' is positive definite. +// +// SymRankOne updates a Cholesky factorization in O(n²) time. The Cholesky +// factorization computation from scratch is O(n³). +func (c *Cholesky) SymRankOne(orig *Cholesky, alpha float64, x Vector) (ok bool) { + if !orig.valid() { + panic(badCholesky) + } + n := orig.Symmetric() + if r, c := x.Dims(); r != n || c != 1 { + panic(ErrShape) + } + if orig != c { + if c.chol == nil { + c.chol = NewTriDense(n, Upper, nil) + } else if c.chol.mat.N != n { + panic(ErrShape) + } + c.chol.Copy(orig.chol) + } + + if alpha == 0 { + return true + } + + // Algorithms for updating and downdating the Cholesky factorization are + // described, for example, in + // - J. J. Dongarra, J. R. Bunch, C. B. Moler, G. W. Stewart: LINPACK + // Users' Guide. SIAM (1979), pages 10.10--10.14 + // or + // - P. E. Gill, G. H. Golub, W. Murray, and M. A. Saunders: Methods for + // modifying matrix factorizations. Mathematics of Computation 28(126) + // (1974), Method C3 on page 521 + // + // The implementation is based on LINPACK code + // http://www.netlib.org/linpack/dchud.f + // http://www.netlib.org/linpack/dchdd.f + // and + // https://icl.cs.utk.edu/lapack-forum/viewtopic.php?f=2&t=2646 + // + // According to http://icl.cs.utk.edu/lapack-forum/archives/lapack/msg00301.html + // LINPACK is released under BSD license. + // + // See also: + // - M. A. Saunders: Large-scale Linear Programming Using the Cholesky + // Factorization. Technical Report Stanford University (1972) + // http://i.stanford.edu/pub/cstr/reports/cs/tr/72/252/CS-TR-72-252.pdf + // - Matthias Seeger: Low rank updates for the Cholesky decomposition. + // EPFL Technical Report 161468 (2004) + // http://infoscience.epfl.ch/record/161468 + + work := getFloats(n, false) + defer putFloats(work) + var xmat blas64.Vector + if rv, ok := x.(RawVectorer); ok { + xmat = rv.RawVector() + } else { + var tmp *VecDense + tmp.CopyVec(x) + xmat = tmp.RawVector() + } + blas64.Copy(xmat, blas64.Vector{N: n, Data: work, Inc: 1}) + + if alpha > 0 { + // Compute rank-1 update. + if alpha != 1 { + blas64.Scal(math.Sqrt(alpha), blas64.Vector{N: n, Data: work, Inc: 1}) + } + umat := c.chol.mat + stride := umat.Stride + for i := 0; i < n; i++ { + // Compute parameters of the Givens matrix that zeroes + // the i-th element of x. + c, s, r, _ := blas64.Rotg(umat.Data[i*stride+i], work[i]) + if r < 0 { + // Multiply by -1 to have positive diagonal + // elemnts. + r *= -1 + c *= -1 + s *= -1 + } + umat.Data[i*stride+i] = r + if i < n-1 { + // Multiply the extended factorization matrix by + // the Givens matrix from the left. Only + // the i-th row and x are modified. + blas64.Rot( + blas64.Vector{N: n - i - 1, Data: umat.Data[i*stride+i+1 : i*stride+n], Inc: 1}, + blas64.Vector{N: n - i - 1, Data: work[i+1 : n], Inc: 1}, + c, s) + } + } + c.updateCond(-1) + return true + } + + // Compute rank-1 downdate. + alpha = math.Sqrt(-alpha) + if alpha != 1 { + blas64.Scal(alpha, blas64.Vector{N: n, Data: work, Inc: 1}) + } + // Solve U^T * p = x storing the result into work. + ok = lapack64.Trtrs(blas.Trans, c.chol.RawTriangular(), blas64.General{ + Rows: n, + Cols: 1, + Stride: 1, + Data: work, + }) + if !ok { + // The original matrix is singular. Should not happen, because + // the factorization is valid. + panic(badCholesky) + } + norm := blas64.Nrm2(blas64.Vector{N: n, Data: work, Inc: 1}) + if norm >= 1 { + // The updated matrix is not positive definite. + return false + } + norm = math.Sqrt((1 + norm) * (1 - norm)) + cos := getFloats(n, false) + defer putFloats(cos) + sin := getFloats(n, false) + defer putFloats(sin) + for i := n - 1; i >= 0; i-- { + // Compute parameters of Givens matrices that zero elements of p + // backwards. + cos[i], sin[i], norm, _ = blas64.Rotg(norm, work[i]) + if norm < 0 { + norm *= -1 + cos[i] *= -1 + sin[i] *= -1 + } + } + umat := c.chol.mat + stride := umat.Stride + for i := n - 1; i >= 0; i-- { + work[i] = 0 + // Apply Givens matrices to U. + // TODO(vladimir-ch): Use workspace to avoid modifying the + // receiver in case an invalid factorization is created. + blas64.Rot( + blas64.Vector{N: n - i, Data: work[i:n], Inc: 1}, + blas64.Vector{N: n - i, Data: umat.Data[i*stride+i : i*stride+n], Inc: 1}, + cos[i], sin[i]) + if umat.Data[i*stride+i] == 0 { + // The matrix is singular (may rarely happen due to + // floating-point effects?). + ok = false + } else if umat.Data[i*stride+i] < 0 { + // Diagonal elements should be positive. If it happens + // that on the i-th row the diagonal is negative, + // multiply U from the left by an identity matrix that + // has -1 on the i-th row. + blas64.Scal(-1, blas64.Vector{N: n - i, Data: umat.Data[i*stride+i : i*stride+n], Inc: 1}) + } + } + if ok { + c.updateCond(-1) + } else { + c.Reset() + } + return ok +} + +func (c *Cholesky) valid() bool { + return c.chol != nil && !c.chol.IsZero() +} diff --git a/vendor/gonum.org/v1/gonum/mat/cmatrix.go b/vendor/gonum.org/v1/gonum/mat/cmatrix.go new file mode 100644 index 0000000000..6219c28aaa --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/cmatrix.go @@ -0,0 +1,210 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + "math/cmplx" + + "gonum.org/v1/gonum/floats" +) + +// CMatrix is the basic matrix interface type for complex matrices. +type CMatrix interface { + // Dims returns the dimensions of a Matrix. + Dims() (r, c int) + + // At returns the value of a matrix element at row i, column j. + // It will panic if i or j are out of bounds for the matrix. + At(i, j int) complex128 + + // H returns the conjugate transpose of the Matrix. Whether H + // returns a copy of the underlying data is implementation dependent. + // This method may be implemented using the Conjugate type, which + // provides an implicit matrix conjugate transpose. + H() CMatrix +} + +var ( + _ CMatrix = Conjugate{} + _ Unconjugator = Conjugate{} +) + +// Conjugate is a type for performing an implicit matrix conjugate transpose. +// It implements the Matrix interface, returning values from the conjugate +// transpose of the matrix within. +type Conjugate struct { + CMatrix CMatrix +} + +// At returns the value of the element at row i and column j of the conjugate +// transposed matrix, that is, row j and column i of the Matrix field. +func (t Conjugate) At(i, j int) complex128 { + z := t.CMatrix.At(j, i) + return cmplx.Conj(z) +} + +// Dims returns the dimensions of the transposed matrix. The number of rows returned +// is the number of columns in the Matrix field, and the number of columns is +// the number of rows in the Matrix field. +func (t Conjugate) Dims() (r, c int) { + c, r = t.CMatrix.Dims() + return r, c +} + +// H performs an implicit conjugate transpose by returning the Matrix field. +func (t Conjugate) H() CMatrix { + return t.CMatrix +} + +// Unconjugate returns the Matrix field. +func (t Conjugate) Unconjugate() CMatrix { + return t.CMatrix +} + +// Unconjugator is a type that can undo an implicit conjugate transpose. +type Unconjugator interface { + // Note: This interface is needed to unify all of the Conjugate types. In + // the cmat128 methods, we need to test if the Matrix has been implicitly + // transposed. If this is checked by testing for the specific Conjugate type + // then the behavior will be different if the user uses H() or HTri() for a + // triangular matrix. + + // Unconjugate returns the underlying Matrix stored for the implicit + // conjugate transpose. + Unconjugate() CMatrix +} + +// useC returns a complex128 slice with l elements, using c if it +// has the necessary capacity, otherwise creating a new slice. +func useC(c []complex128, l int) []complex128 { + if l <= cap(c) { + return c[:l] + } + return make([]complex128, l) +} + +// useZeroedC returns a complex128 slice with l elements, using c if it +// has the necessary capacity, otherwise creating a new slice. The +// elements of the returned slice are guaranteed to be zero. +func useZeroedC(c []complex128, l int) []complex128 { + if l <= cap(c) { + c = c[:l] + zeroC(c) + return c + } + return make([]complex128, l) +} + +// zeroC zeros the given slice's elements. +func zeroC(c []complex128) { + for i := range c { + c[i] = 0 + } +} + +// unconjugate unconjugates a matrix if applicable. If a is an Unconjugator, then +// unconjugate returns the underlying matrix and true. If it is not, then it returns +// the input matrix and false. +func unconjugate(a CMatrix) (CMatrix, bool) { + if ut, ok := a.(Unconjugator); ok { + return ut.Unconjugate(), true + } + return a, false +} + +// CEqual returns whether the matrices a and b have the same size +// and are element-wise equal. +func CEqual(a, b CMatrix) bool { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + return false + } + // TODO(btracey): Add in fast-paths. + for i := 0; i < ar; i++ { + for j := 0; j < ac; j++ { + if a.At(i, j) != b.At(i, j) { + return false + } + } + } + return true +} + +// CEqualApprox returns whether the matrices a and b have the same size and contain all equal +// elements with tolerance for element-wise equality specified by epsilon. Matrices +// with non-equal shapes are not equal. +func CEqualApprox(a, b CMatrix, epsilon float64) bool { + // TODO(btracey): + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + return false + } + for i := 0; i < ar; i++ { + for j := 0; j < ac; j++ { + if !cEqualWithinAbsOrRel(a.At(i, j), b.At(i, j), epsilon, epsilon) { + return false + } + } + } + return true +} + +// TODO(btracey): Move these into a cmplxs if/when we have one. + +func cEqualWithinAbsOrRel(a, b complex128, absTol, relTol float64) bool { + if cEqualWithinAbs(a, b, absTol) { + return true + } + return cEqualWithinRel(a, b, relTol) +} + +// cEqualWithinAbs returns true if a and b have an absolute +// difference of less than tol. +func cEqualWithinAbs(a, b complex128, tol float64) bool { + return a == b || cmplx.Abs(a-b) <= tol +} + +const minNormalFloat64 = 2.2250738585072014e-308 + +// cEqualWithinRel returns true if the difference between a and b +// is not greater than tol times the greater value. +func cEqualWithinRel(a, b complex128, tol float64) bool { + if a == b { + return true + } + if cmplx.IsNaN(a) || cmplx.IsNaN(b) { + return false + } + // Cannot play the same trick as in floats because there are multiple + // possible infinities. + if cmplx.IsInf(a) { + if !cmplx.IsInf(b) { + return false + } + ra := real(a) + if math.IsInf(ra, 0) { + if ra == real(b) { + return floats.EqualWithinRel(imag(a), imag(b), tol) + } + return false + } + if imag(a) == imag(b) { + return floats.EqualWithinRel(ra, real(b), tol) + } + return false + } + if cmplx.IsInf(b) { + return false + } + + delta := cmplx.Abs(a - b) + if delta <= minNormalFloat64 { + return delta <= tol*minNormalFloat64 + } + return delta/math.Max(cmplx.Abs(a), cmplx.Abs(b)) <= tol +} diff --git a/vendor/gonum.org/v1/gonum/mat/consts.go b/vendor/gonum.org/v1/gonum/mat/consts.go new file mode 100644 index 0000000000..3de3f5bf47 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/consts.go @@ -0,0 +1,15 @@ +// Copyright ©2016 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +// TriKind represents the triangularity of the matrix. +type TriKind bool + +const ( + // Upper specifies an upper triangular matrix. + Upper TriKind = true + // Lower specifies a lower triangular matrix. + Lower TriKind = false +) diff --git a/vendor/gonum.org/v1/gonum/mat/dense.go b/vendor/gonum.org/v1/gonum/mat/dense.go new file mode 100644 index 0000000000..87b1105cad --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/dense.go @@ -0,0 +1,558 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +var ( + dense *Dense + + _ Matrix = dense + _ Mutable = dense + + _ Cloner = dense + _ RowViewer = dense + _ ColViewer = dense + _ RawRowViewer = dense + _ Grower = dense + + _ RawMatrixSetter = dense + _ RawMatrixer = dense + + _ Reseter = dense +) + +// Dense is a dense matrix representation. +type Dense struct { + mat blas64.General + + capRows, capCols int +} + +// NewDense creates a new Dense matrix with r rows and c columns. If data == nil, +// a new slice is allocated for the backing slice. If len(data) == r*c, data is +// used as the backing slice, and changes to the elements of the returned Dense +// will be reflected in data. If neither of these is true, NewDense will panic. +// NewDense will panic if either r or c is zero. +// +// The data must be arranged in row-major order, i.e. the (i*c + j)-th +// element in the data slice is the {i, j}-th element in the matrix. +func NewDense(r, c int, data []float64) *Dense { + if r <= 0 || c <= 0 { + if r == 0 || c == 0 { + panic(ErrZeroLength) + } + panic("mat: negative dimension") + } + if data != nil && r*c != len(data) { + panic(ErrShape) + } + if data == nil { + data = make([]float64, r*c) + } + return &Dense{ + mat: blas64.General{ + Rows: r, + Cols: c, + Stride: c, + Data: data, + }, + capRows: r, + capCols: c, + } +} + +// reuseAs resizes an empty matrix to a r×c matrix, +// or checks that a non-empty matrix is r×c. +// +// reuseAs must be kept in sync with reuseAsZeroed. +func (m *Dense) reuseAs(r, c int) { + if m.mat.Rows > m.capRows || m.mat.Cols > m.capCols { + // Panic as a string, not a mat.Error. + panic("mat: caps not correctly set") + } + if r == 0 || c == 0 { + panic(ErrZeroLength) + } + if m.IsZero() { + m.mat = blas64.General{ + Rows: r, + Cols: c, + Stride: c, + Data: use(m.mat.Data, r*c), + } + m.capRows = r + m.capCols = c + return + } + if r != m.mat.Rows || c != m.mat.Cols { + panic(ErrShape) + } +} + +// reuseAsZeroed resizes an empty matrix to a r×c matrix, +// or checks that a non-empty matrix is r×c. It zeroes +// all the elements of the matrix. +// +// reuseAsZeroed must be kept in sync with reuseAs. +func (m *Dense) reuseAsZeroed(r, c int) { + if m.mat.Rows > m.capRows || m.mat.Cols > m.capCols { + // Panic as a string, not a mat.Error. + panic("mat: caps not correctly set") + } + if r == 0 || c == 0 { + panic(ErrZeroLength) + } + if m.IsZero() { + m.mat = blas64.General{ + Rows: r, + Cols: c, + Stride: c, + Data: useZeroed(m.mat.Data, r*c), + } + m.capRows = r + m.capCols = c + return + } + if r != m.mat.Rows || c != m.mat.Cols { + panic(ErrShape) + } + m.Zero() +} + +// Zero sets all of the matrix elements to zero. +func (m *Dense) Zero() { + r := m.mat.Rows + c := m.mat.Cols + for i := 0; i < r; i++ { + zero(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]) + } +} + +// isolatedWorkspace returns a new dense matrix w with the size of a and +// returns a callback to defer which performs cleanup at the return of the call. +// This should be used when a method receiver is the same pointer as an input argument. +func (m *Dense) isolatedWorkspace(a Matrix) (w *Dense, restore func()) { + r, c := a.Dims() + if r == 0 || c == 0 { + panic(ErrZeroLength) + } + w = getWorkspace(r, c, false) + return w, func() { + m.Copy(w) + putWorkspace(w) + } +} + +// Reset zeros the dimensions of the matrix so that it can be reused as the +// receiver of a dimensionally restricted operation. +// +// See the Reseter interface for more information. +func (m *Dense) Reset() { + // Row, Cols and Stride must be zeroed in unison. + m.mat.Rows, m.mat.Cols, m.mat.Stride = 0, 0, 0 + m.capRows, m.capCols = 0, 0 + m.mat.Data = m.mat.Data[:0] +} + +// IsZero returns whether the receiver is zero-sized. Zero-sized matrices can be the +// receiver for size-restricted operations. Dense matrices can be zeroed using Reset. +func (m *Dense) IsZero() bool { + // It must be the case that m.Dims() returns + // zeros in this case. See comment in Reset(). + return m.mat.Stride == 0 +} + +// asTriDense returns a TriDense with the given size and side. The backing data +// of the TriDense is the same as the receiver. +func (m *Dense) asTriDense(n int, diag blas.Diag, uplo blas.Uplo) *TriDense { + return &TriDense{ + mat: blas64.Triangular{ + N: n, + Stride: m.mat.Stride, + Data: m.mat.Data, + Uplo: uplo, + Diag: diag, + }, + cap: n, + } +} + +// DenseCopyOf returns a newly allocated copy of the elements of a. +func DenseCopyOf(a Matrix) *Dense { + d := &Dense{} + d.Clone(a) + return d +} + +// SetRawMatrix sets the underlying blas64.General used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in b. +func (m *Dense) SetRawMatrix(b blas64.General) { + m.capRows, m.capCols = b.Rows, b.Cols + m.mat = b +} + +// RawMatrix returns the underlying blas64.General used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in returned blas64.General. +func (m *Dense) RawMatrix() blas64.General { return m.mat } + +// Dims returns the number of rows and columns in the matrix. +func (m *Dense) Dims() (r, c int) { return m.mat.Rows, m.mat.Cols } + +// Caps returns the number of rows and columns in the backing matrix. +func (m *Dense) Caps() (r, c int) { return m.capRows, m.capCols } + +// T performs an implicit transpose by returning the receiver inside a Transpose. +func (m *Dense) T() Matrix { + return Transpose{m} +} + +// ColView returns a Vector reflecting the column j, backed by the matrix data. +// +// See ColViewer for more information. +func (m *Dense) ColView(j int) Vector { + var v VecDense + v.ColViewOf(m, j) + return &v +} + +// SetCol sets the values in the specified column of the matrix to the values +// in src. len(src) must equal the number of rows in the receiver. +func (m *Dense) SetCol(j int, src []float64) { + if j >= m.mat.Cols || j < 0 { + panic(ErrColAccess) + } + if len(src) != m.mat.Rows { + panic(ErrColLength) + } + + blas64.Copy( + blas64.Vector{N: m.mat.Rows, Inc: 1, Data: src}, + blas64.Vector{N: m.mat.Rows, Inc: m.mat.Stride, Data: m.mat.Data[j:]}, + ) +} + +// SetRow sets the values in the specified rows of the matrix to the values +// in src. len(src) must equal the number of columns in the receiver. +func (m *Dense) SetRow(i int, src []float64) { + if i >= m.mat.Rows || i < 0 { + panic(ErrRowAccess) + } + if len(src) != m.mat.Cols { + panic(ErrRowLength) + } + + copy(m.rawRowView(i), src) +} + +// RowView returns row i of the matrix data represented as a column vector, +// backed by the matrix data. +// +// See RowViewer for more information. +func (m *Dense) RowView(i int) Vector { + var v VecDense + v.RowViewOf(m, i) + return &v +} + +// RawRowView returns a slice backed by the same array as backing the +// receiver. +func (m *Dense) RawRowView(i int) []float64 { + if i >= m.mat.Rows || i < 0 { + panic(ErrRowAccess) + } + return m.rawRowView(i) +} + +func (m *Dense) rawRowView(i int) []float64 { + return m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+m.mat.Cols] +} + +// DiagView returns the diagonal as a matrix backed by the original data. +func (m *Dense) DiagView() Diagonal { + n := min(m.mat.Rows, m.mat.Cols) + return &DiagDense{ + mat: blas64.Vector{ + N: n, + Inc: m.mat.Stride + 1, + Data: m.mat.Data[:(n-1)*m.mat.Stride+n], + }, + } +} + +// Slice returns a new Matrix that shares backing data with the receiver. +// The returned matrix starts at {i,j} of the receiver and extends k-i rows +// and l-j columns. The final row in the resulting matrix is k-1 and the +// final column is l-1. +// Slice panics with ErrIndexOutOfRange if the slice is outside the capacity +// of the receiver. +func (m *Dense) Slice(i, k, j, l int) Matrix { + mr, mc := m.Caps() + if i < 0 || mr <= i || j < 0 || mc <= j || k < i || mr < k || l < j || mc < l { + if i == k || j == l { + panic(ErrZeroLength) + } + panic(ErrIndexOutOfRange) + } + t := *m + t.mat.Data = t.mat.Data[i*t.mat.Stride+j : (k-1)*t.mat.Stride+l] + t.mat.Rows = k - i + t.mat.Cols = l - j + t.capRows -= i + t.capCols -= j + return &t +} + +// Grow returns the receiver expanded by r rows and c columns. If the dimensions +// of the expanded matrix are outside the capacities of the receiver a new +// allocation is made, otherwise not. Note the receiver itself is not modified +// during the call to Grow. +func (m *Dense) Grow(r, c int) Matrix { + if r < 0 || c < 0 { + panic(ErrIndexOutOfRange) + } + if r == 0 && c == 0 { + return m + } + + r += m.mat.Rows + c += m.mat.Cols + + var t Dense + switch { + case m.mat.Rows == 0 || m.mat.Cols == 0: + t.mat = blas64.General{ + Rows: r, + Cols: c, + Stride: c, + // We zero because we don't know how the matrix will be used. + // In other places, the mat is immediately filled with a result; + // this is not the case here. + Data: useZeroed(m.mat.Data, r*c), + } + case r > m.capRows || c > m.capCols: + cr := max(r, m.capRows) + cc := max(c, m.capCols) + t.mat = blas64.General{ + Rows: r, + Cols: c, + Stride: cc, + Data: make([]float64, cr*cc), + } + t.capRows = cr + t.capCols = cc + // Copy the complete matrix over to the new matrix. + // Including elements not currently visible. Use a temporary structure + // to avoid modifying the receiver. + var tmp Dense + tmp.mat = blas64.General{ + Rows: m.mat.Rows, + Cols: m.mat.Cols, + Stride: m.mat.Stride, + Data: m.mat.Data, + } + tmp.capRows = m.capRows + tmp.capCols = m.capCols + t.Copy(&tmp) + return &t + default: + t.mat = blas64.General{ + Data: m.mat.Data[:(r-1)*m.mat.Stride+c], + Rows: r, + Cols: c, + Stride: m.mat.Stride, + } + } + t.capRows = r + t.capCols = c + return &t +} + +// Clone makes a copy of a into the receiver, overwriting the previous value of +// the receiver. The clone operation does not make any restriction on shape and +// will not cause shadowing. +// +// See the Cloner interface for more information. +func (m *Dense) Clone(a Matrix) { + r, c := a.Dims() + mat := blas64.General{ + Rows: r, + Cols: c, + Stride: c, + } + m.capRows, m.capCols = r, c + + aU, trans := untranspose(a) + switch aU := aU.(type) { + case RawMatrixer: + amat := aU.RawMatrix() + mat.Data = make([]float64, r*c) + if trans { + for i := 0; i < r; i++ { + blas64.Copy(blas64.Vector{N: c, Inc: amat.Stride, Data: amat.Data[i : i+(c-1)*amat.Stride+1]}, + blas64.Vector{N: c, Inc: 1, Data: mat.Data[i*c : (i+1)*c]}) + } + } else { + for i := 0; i < r; i++ { + copy(mat.Data[i*c:(i+1)*c], amat.Data[i*amat.Stride:i*amat.Stride+c]) + } + } + case *VecDense: + amat := aU.mat + mat.Data = make([]float64, aU.mat.N) + blas64.Copy(blas64.Vector{N: aU.mat.N, Inc: amat.Inc, Data: amat.Data}, + blas64.Vector{N: aU.mat.N, Inc: 1, Data: mat.Data}) + default: + mat.Data = make([]float64, r*c) + w := *m + w.mat = mat + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + w.set(i, j, a.At(i, j)) + } + } + *m = w + return + } + m.mat = mat +} + +// Copy makes a copy of elements of a into the receiver. It is similar to the +// built-in copy; it copies as much as the overlap between the two matrices and +// returns the number of rows and columns it copied. If a aliases the receiver +// and is a transposed Dense or VecDense, with a non-unitary increment, Copy will +// panic. +// +// See the Copier interface for more information. +func (m *Dense) Copy(a Matrix) (r, c int) { + r, c = a.Dims() + if a == m { + return r, c + } + r = min(r, m.mat.Rows) + c = min(c, m.mat.Cols) + if r == 0 || c == 0 { + return 0, 0 + } + + aU, trans := untranspose(a) + switch aU := aU.(type) { + case RawMatrixer: + amat := aU.RawMatrix() + if trans { + if amat.Stride != 1 { + m.checkOverlap(amat) + } + for i := 0; i < r; i++ { + blas64.Copy(blas64.Vector{N: c, Inc: amat.Stride, Data: amat.Data[i : i+(c-1)*amat.Stride+1]}, + blas64.Vector{N: c, Inc: 1, Data: m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]}) + } + } else { + switch o := offset(m.mat.Data, amat.Data); { + case o < 0: + for i := r - 1; i >= 0; i-- { + copy(m.mat.Data[i*m.mat.Stride:i*m.mat.Stride+c], amat.Data[i*amat.Stride:i*amat.Stride+c]) + } + case o > 0: + for i := 0; i < r; i++ { + copy(m.mat.Data[i*m.mat.Stride:i*m.mat.Stride+c], amat.Data[i*amat.Stride:i*amat.Stride+c]) + } + default: + // Nothing to do. + } + } + case *VecDense: + var n, stride int + amat := aU.mat + if trans { + if amat.Inc != 1 { + m.checkOverlap(aU.asGeneral()) + } + n = c + stride = 1 + } else { + n = r + stride = m.mat.Stride + } + if amat.Inc == 1 && stride == 1 { + copy(m.mat.Data, amat.Data[:n]) + break + } + switch o := offset(m.mat.Data, amat.Data); { + case o < 0: + blas64.Copy(blas64.Vector{N: n, Inc: -amat.Inc, Data: amat.Data}, + blas64.Vector{N: n, Inc: -stride, Data: m.mat.Data}) + case o > 0: + blas64.Copy(blas64.Vector{N: n, Inc: amat.Inc, Data: amat.Data}, + blas64.Vector{N: n, Inc: stride, Data: m.mat.Data}) + default: + // Nothing to do. + } + default: + m.checkOverlapMatrix(aU) + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + m.set(i, j, a.At(i, j)) + } + } + } + + return r, c +} + +// Stack appends the rows of b onto the rows of a, placing the result into the +// receiver with b placed in the greater indexed rows. Stack will panic if the +// two input matrices do not have the same number of columns or the constructed +// stacked matrix is not the same shape as the receiver. +func (m *Dense) Stack(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + if ac != bc || m == a || m == b { + panic(ErrShape) + } + + m.reuseAs(ar+br, ac) + + m.Copy(a) + w := m.Slice(ar, ar+br, 0, bc).(*Dense) + w.Copy(b) +} + +// Augment creates the augmented matrix of a and b, where b is placed in the +// greater indexed columns. Augment will panic if the two input matrices do +// not have the same number of rows or the constructed augmented matrix is +// not the same shape as the receiver. +func (m *Dense) Augment(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || m == a || m == b { + panic(ErrShape) + } + + m.reuseAs(ar, ac+bc) + + m.Copy(a) + w := m.Slice(0, br, ac, ac+bc).(*Dense) + w.Copy(b) +} + +// Trace returns the trace of the matrix. The matrix must be square or Trace +// will panic. +func (m *Dense) Trace() float64 { + if m.mat.Rows != m.mat.Cols { + panic(ErrSquare) + } + // TODO(btracey): could use internal asm sum routine. + var v float64 + for i := 0; i < m.mat.Rows; i++ { + v += m.mat.Data[i*m.mat.Stride+i] + } + return v +} diff --git a/vendor/gonum.org/v1/gonum/mat/dense_arithmetic.go b/vendor/gonum.org/v1/gonum/mat/dense_arithmetic.go new file mode 100644 index 0000000000..dd4526f634 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/dense_arithmetic.go @@ -0,0 +1,886 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack/lapack64" +) + +// Add adds a and b element-wise, placing the result in the receiver. Add +// will panic if the two matrices do not have the same shape. +func (m *Dense) Add(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + panic(ErrShape) + } + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + m.reuseAs(ar, ac) + + if arm, ok := a.(RawMatrixer); ok { + if brm, ok := b.(RawMatrixer); ok { + amat, bmat := arm.RawMatrix(), brm.RawMatrix() + if m != aU { + m.checkOverlap(amat) + } + if m != bU { + m.checkOverlap(bmat) + } + for ja, jb, jm := 0, 0, 0; ja < ar*amat.Stride; ja, jb, jm = ja+amat.Stride, jb+bmat.Stride, jm+m.mat.Stride { + for i, v := range amat.Data[ja : ja+ac] { + m.mat.Data[i+jm] = v + bmat.Data[i+jb] + } + } + return + } + } + + m.checkOverlapMatrix(aU) + m.checkOverlapMatrix(bU) + var restore func() + if m == aU { + m, restore = m.isolatedWorkspace(aU) + defer restore() + } else if m == bU { + m, restore = m.isolatedWorkspace(bU) + defer restore() + } + + for r := 0; r < ar; r++ { + for c := 0; c < ac; c++ { + m.set(r, c, a.At(r, c)+b.At(r, c)) + } + } +} + +// Sub subtracts the matrix b from a, placing the result in the receiver. Sub +// will panic if the two matrices do not have the same shape. +func (m *Dense) Sub(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + panic(ErrShape) + } + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + m.reuseAs(ar, ac) + + if arm, ok := a.(RawMatrixer); ok { + if brm, ok := b.(RawMatrixer); ok { + amat, bmat := arm.RawMatrix(), brm.RawMatrix() + if m != aU { + m.checkOverlap(amat) + } + if m != bU { + m.checkOverlap(bmat) + } + for ja, jb, jm := 0, 0, 0; ja < ar*amat.Stride; ja, jb, jm = ja+amat.Stride, jb+bmat.Stride, jm+m.mat.Stride { + for i, v := range amat.Data[ja : ja+ac] { + m.mat.Data[i+jm] = v - bmat.Data[i+jb] + } + } + return + } + } + + m.checkOverlapMatrix(aU) + m.checkOverlapMatrix(bU) + var restore func() + if m == aU { + m, restore = m.isolatedWorkspace(aU) + defer restore() + } else if m == bU { + m, restore = m.isolatedWorkspace(bU) + defer restore() + } + + for r := 0; r < ar; r++ { + for c := 0; c < ac; c++ { + m.set(r, c, a.At(r, c)-b.At(r, c)) + } + } +} + +// MulElem performs element-wise multiplication of a and b, placing the result +// in the receiver. MulElem will panic if the two matrices do not have the same +// shape. +func (m *Dense) MulElem(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + panic(ErrShape) + } + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + m.reuseAs(ar, ac) + + if arm, ok := a.(RawMatrixer); ok { + if brm, ok := b.(RawMatrixer); ok { + amat, bmat := arm.RawMatrix(), brm.RawMatrix() + if m != aU { + m.checkOverlap(amat) + } + if m != bU { + m.checkOverlap(bmat) + } + for ja, jb, jm := 0, 0, 0; ja < ar*amat.Stride; ja, jb, jm = ja+amat.Stride, jb+bmat.Stride, jm+m.mat.Stride { + for i, v := range amat.Data[ja : ja+ac] { + m.mat.Data[i+jm] = v * bmat.Data[i+jb] + } + } + return + } + } + + m.checkOverlapMatrix(aU) + m.checkOverlapMatrix(bU) + var restore func() + if m == aU { + m, restore = m.isolatedWorkspace(aU) + defer restore() + } else if m == bU { + m, restore = m.isolatedWorkspace(bU) + defer restore() + } + + for r := 0; r < ar; r++ { + for c := 0; c < ac; c++ { + m.set(r, c, a.At(r, c)*b.At(r, c)) + } + } +} + +// DivElem performs element-wise division of a by b, placing the result +// in the receiver. DivElem will panic if the two matrices do not have the same +// shape. +func (m *Dense) DivElem(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + panic(ErrShape) + } + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + m.reuseAs(ar, ac) + + if arm, ok := a.(RawMatrixer); ok { + if brm, ok := b.(RawMatrixer); ok { + amat, bmat := arm.RawMatrix(), brm.RawMatrix() + if m != aU { + m.checkOverlap(amat) + } + if m != bU { + m.checkOverlap(bmat) + } + for ja, jb, jm := 0, 0, 0; ja < ar*amat.Stride; ja, jb, jm = ja+amat.Stride, jb+bmat.Stride, jm+m.mat.Stride { + for i, v := range amat.Data[ja : ja+ac] { + m.mat.Data[i+jm] = v / bmat.Data[i+jb] + } + } + return + } + } + + m.checkOverlapMatrix(aU) + m.checkOverlapMatrix(bU) + var restore func() + if m == aU { + m, restore = m.isolatedWorkspace(aU) + defer restore() + } else if m == bU { + m, restore = m.isolatedWorkspace(bU) + defer restore() + } + + for r := 0; r < ar; r++ { + for c := 0; c < ac; c++ { + m.set(r, c, a.At(r, c)/b.At(r, c)) + } + } +} + +// Inverse computes the inverse of the matrix a, storing the result into the +// receiver. If a is ill-conditioned, a Condition error will be returned. +// Note that matrix inversion is numerically unstable, and should generally +// be avoided where possible, for example by using the Solve routines. +func (m *Dense) Inverse(a Matrix) error { + // TODO(btracey): Special case for RawTriangular, etc. + r, c := a.Dims() + if r != c { + panic(ErrSquare) + } + m.reuseAs(a.Dims()) + aU, aTrans := untranspose(a) + switch rm := aU.(type) { + case RawMatrixer: + if m != aU || aTrans { + if m == aU || m.checkOverlap(rm.RawMatrix()) { + tmp := getWorkspace(r, c, false) + tmp.Copy(a) + m.Copy(tmp) + putWorkspace(tmp) + break + } + m.Copy(a) + } + default: + m.Copy(a) + } + ipiv := getInts(r, false) + defer putInts(ipiv) + ok := lapack64.Getrf(m.mat, ipiv) + if !ok { + return Condition(math.Inf(1)) + } + work := getFloats(4*r, false) // must be at least 4*r for cond. + lapack64.Getri(m.mat, ipiv, work, -1) + if int(work[0]) > 4*r { + l := int(work[0]) + putFloats(work) + work = getFloats(l, false) + } else { + work = work[:4*r] + } + defer putFloats(work) + lapack64.Getri(m.mat, ipiv, work, len(work)) + norm := lapack64.Lange(CondNorm, m.mat, work) + rcond := lapack64.Gecon(CondNorm, m.mat, norm, work, ipiv) // reuse ipiv + if rcond == 0 { + return Condition(math.Inf(1)) + } + cond := 1 / rcond + if cond > ConditionTolerance { + return Condition(cond) + } + return nil +} + +// Mul takes the matrix product of a and b, placing the result in the receiver. +// If the number of columns in a does not equal the number of rows in b, Mul will panic. +func (m *Dense) Mul(a, b Matrix) { + ar, ac := a.Dims() + br, bc := b.Dims() + + if ac != br { + panic(ErrShape) + } + + aU, aTrans := untranspose(a) + bU, bTrans := untranspose(b) + m.reuseAs(ar, bc) + var restore func() + if m == aU { + m, restore = m.isolatedWorkspace(aU) + defer restore() + } else if m == bU { + m, restore = m.isolatedWorkspace(bU) + defer restore() + } + aT := blas.NoTrans + if aTrans { + aT = blas.Trans + } + bT := blas.NoTrans + if bTrans { + bT = blas.Trans + } + + // Some of the cases do not have a transpose option, so create + // temporary memory. + // C = A^T * B = (B^T * A)^T + // C^T = B^T * A. + if aUrm, ok := aU.(RawMatrixer); ok { + amat := aUrm.RawMatrix() + if restore == nil { + m.checkOverlap(amat) + } + if bUrm, ok := bU.(RawMatrixer); ok { + bmat := bUrm.RawMatrix() + if restore == nil { + m.checkOverlap(bmat) + } + blas64.Gemm(aT, bT, 1, amat, bmat, 0, m.mat) + return + } + if bU, ok := bU.(RawSymmetricer); ok { + bmat := bU.RawSymmetric() + if aTrans { + c := getWorkspace(ac, ar, false) + blas64.Symm(blas.Left, 1, bmat, amat, 0, c.mat) + strictCopy(m, c.T()) + putWorkspace(c) + return + } + blas64.Symm(blas.Right, 1, bmat, amat, 0, m.mat) + return + } + if bU, ok := bU.(RawTriangular); ok { + // Trmm updates in place, so copy aU first. + bmat := bU.RawTriangular() + if aTrans { + c := getWorkspace(ac, ar, false) + var tmp Dense + tmp.SetRawMatrix(amat) + c.Copy(&tmp) + bT := blas.Trans + if bTrans { + bT = blas.NoTrans + } + blas64.Trmm(blas.Left, bT, 1, bmat, c.mat) + strictCopy(m, c.T()) + putWorkspace(c) + return + } + m.Copy(a) + blas64.Trmm(blas.Right, bT, 1, bmat, m.mat) + return + } + if bU, ok := bU.(*VecDense); ok { + m.checkOverlap(bU.asGeneral()) + bvec := bU.RawVector() + if bTrans { + // {ar,1} x {1,bc}, which is not a vector. + // Instead, construct B as a General. + bmat := blas64.General{ + Rows: bc, + Cols: 1, + Stride: bvec.Inc, + Data: bvec.Data, + } + blas64.Gemm(aT, bT, 1, amat, bmat, 0, m.mat) + return + } + cvec := blas64.Vector{ + Inc: m.mat.Stride, + Data: m.mat.Data, + } + blas64.Gemv(aT, 1, amat, bvec, 0, cvec) + return + } + } + if bUrm, ok := bU.(RawMatrixer); ok { + bmat := bUrm.RawMatrix() + if restore == nil { + m.checkOverlap(bmat) + } + if aU, ok := aU.(RawSymmetricer); ok { + amat := aU.RawSymmetric() + if bTrans { + c := getWorkspace(bc, br, false) + blas64.Symm(blas.Right, 1, amat, bmat, 0, c.mat) + strictCopy(m, c.T()) + putWorkspace(c) + return + } + blas64.Symm(blas.Left, 1, amat, bmat, 0, m.mat) + return + } + if aU, ok := aU.(RawTriangular); ok { + // Trmm updates in place, so copy bU first. + amat := aU.RawTriangular() + if bTrans { + c := getWorkspace(bc, br, false) + var tmp Dense + tmp.SetRawMatrix(bmat) + c.Copy(&tmp) + aT := blas.Trans + if aTrans { + aT = blas.NoTrans + } + blas64.Trmm(blas.Right, aT, 1, amat, c.mat) + strictCopy(m, c.T()) + putWorkspace(c) + return + } + m.Copy(b) + blas64.Trmm(blas.Left, aT, 1, amat, m.mat) + return + } + if aU, ok := aU.(*VecDense); ok { + m.checkOverlap(aU.asGeneral()) + avec := aU.RawVector() + if aTrans { + // {1,ac} x {ac, bc} + // Transpose B so that the vector is on the right. + cvec := blas64.Vector{ + Inc: 1, + Data: m.mat.Data, + } + bT := blas.Trans + if bTrans { + bT = blas.NoTrans + } + blas64.Gemv(bT, 1, bmat, avec, 0, cvec) + return + } + // {ar,1} x {1,bc} which is not a vector result. + // Instead, construct A as a General. + amat := blas64.General{ + Rows: ar, + Cols: 1, + Stride: avec.Inc, + Data: avec.Data, + } + blas64.Gemm(aT, bT, 1, amat, bmat, 0, m.mat) + return + } + } + + m.checkOverlapMatrix(aU) + m.checkOverlapMatrix(bU) + row := getFloats(ac, false) + defer putFloats(row) + for r := 0; r < ar; r++ { + for i := range row { + row[i] = a.At(r, i) + } + for c := 0; c < bc; c++ { + var v float64 + for i, e := range row { + v += e * b.At(i, c) + } + m.mat.Data[r*m.mat.Stride+c] = v + } + } +} + +// strictCopy copies a into m panicking if the shape of a and m differ. +func strictCopy(m *Dense, a Matrix) { + r, c := m.Copy(a) + if r != m.mat.Rows || c != m.mat.Cols { + // Panic with a string since this + // is not a user-facing panic. + panic(ErrShape.Error()) + } +} + +// Exp calculates the exponential of the matrix a, e^a, placing the result +// in the receiver. Exp will panic with matrix.ErrShape if a is not square. +func (m *Dense) Exp(a Matrix) { + // The implementation used here is from Functions of Matrices: Theory and Computation + // Chapter 10, Algorithm 10.20. https://doi.org/10.1137/1.9780898717778.ch10 + + r, c := a.Dims() + if r != c { + panic(ErrShape) + } + + m.reuseAs(r, r) + if r == 1 { + m.mat.Data[0] = math.Exp(a.At(0, 0)) + return + } + + pade := []struct { + theta float64 + b []float64 + }{ + {theta: 0.015, b: []float64{ + 120, 60, 12, 1, + }}, + {theta: 0.25, b: []float64{ + 30240, 15120, 3360, 420, 30, 1, + }}, + {theta: 0.95, b: []float64{ + 17297280, 8648640, 1995840, 277200, 25200, 1512, 56, 1, + }}, + {theta: 2.1, b: []float64{ + 17643225600, 8821612800, 2075673600, 302702400, 30270240, 2162160, 110880, 3960, 90, 1, + }}, + } + + a1 := m + a1.Copy(a) + v := getWorkspace(r, r, true) + vraw := v.RawMatrix() + n := r * r + vvec := blas64.Vector{N: n, Inc: 1, Data: vraw.Data} + defer putWorkspace(v) + + u := getWorkspace(r, r, true) + uraw := u.RawMatrix() + uvec := blas64.Vector{N: n, Inc: 1, Data: uraw.Data} + defer putWorkspace(u) + + a2 := getWorkspace(r, r, false) + defer putWorkspace(a2) + + n1 := Norm(a, 1) + for i, t := range pade { + if n1 > t.theta { + continue + } + + // This loop only executes once, so + // this is not as horrible as it looks. + p := getWorkspace(r, r, true) + praw := p.RawMatrix() + pvec := blas64.Vector{N: n, Inc: 1, Data: praw.Data} + defer putWorkspace(p) + + for k := 0; k < r; k++ { + p.set(k, k, 1) + v.set(k, k, t.b[0]) + u.set(k, k, t.b[1]) + } + + a2.Mul(a1, a1) + for j := 0; j <= i; j++ { + p.Mul(p, a2) + blas64.Axpy(t.b[2*j+2], pvec, vvec) + blas64.Axpy(t.b[2*j+3], pvec, uvec) + } + u.Mul(a1, u) + + // Use p as a workspace here and + // rename u for the second call's + // receiver. + vmu, vpu := u, p + vpu.Add(v, u) + vmu.Sub(v, u) + + m.Solve(vmu, vpu) + return + } + + // Remaining Padé table line. + const theta13 = 5.4 + b := [...]float64{ + 64764752532480000, 32382376266240000, 7771770303897600, 1187353796428800, + 129060195264000, 10559470521600, 670442572800, 33522128640, + 1323241920, 40840800, 960960, 16380, 182, 1, + } + + s := math.Log2(n1 / theta13) + if s >= 0 { + s = math.Ceil(s) + a1.Scale(1/math.Pow(2, s), a1) + } + a2.Mul(a1, a1) + + i := getWorkspace(r, r, true) + for j := 0; j < r; j++ { + i.set(j, j, 1) + } + iraw := i.RawMatrix() + ivec := blas64.Vector{N: n, Inc: 1, Data: iraw.Data} + defer putWorkspace(i) + + a2raw := a2.RawMatrix() + a2vec := blas64.Vector{N: n, Inc: 1, Data: a2raw.Data} + + a4 := getWorkspace(r, r, false) + a4raw := a4.RawMatrix() + a4vec := blas64.Vector{N: n, Inc: 1, Data: a4raw.Data} + defer putWorkspace(a4) + a4.Mul(a2, a2) + + a6 := getWorkspace(r, r, false) + a6raw := a6.RawMatrix() + a6vec := blas64.Vector{N: n, Inc: 1, Data: a6raw.Data} + defer putWorkspace(a6) + a6.Mul(a2, a4) + + // V = A_6(b_12*A_6 + b_10*A_4 + b_8*A_2) + b_6*A_6 + b_4*A_4 + b_2*A_2 +b_0*I + blas64.Axpy(b[12], a6vec, vvec) + blas64.Axpy(b[10], a4vec, vvec) + blas64.Axpy(b[8], a2vec, vvec) + v.Mul(v, a6) + blas64.Axpy(b[6], a6vec, vvec) + blas64.Axpy(b[4], a4vec, vvec) + blas64.Axpy(b[2], a2vec, vvec) + blas64.Axpy(b[0], ivec, vvec) + + // U = A(A_6(b_13*A_6 + b_11*A_4 + b_9*A_2) + b_7*A_6 + b_5*A_4 + b_2*A_3 +b_1*I) + blas64.Axpy(b[13], a6vec, uvec) + blas64.Axpy(b[11], a4vec, uvec) + blas64.Axpy(b[9], a2vec, uvec) + u.Mul(u, a6) + blas64.Axpy(b[7], a6vec, uvec) + blas64.Axpy(b[5], a4vec, uvec) + blas64.Axpy(b[3], a2vec, uvec) + blas64.Axpy(b[1], ivec, uvec) + u.Mul(u, a1) + + // Use i as a workspace here and + // rename u for the second call's + // receiver. + vmu, vpu := u, i + vpu.Add(v, u) + vmu.Sub(v, u) + + m.Solve(vmu, vpu) + + for ; s > 0; s-- { + m.Mul(m, m) + } +} + +// Pow calculates the integral power of the matrix a to n, placing the result +// in the receiver. Pow will panic if n is negative or if a is not square. +func (m *Dense) Pow(a Matrix, n int) { + if n < 0 { + panic("matrix: illegal power") + } + r, c := a.Dims() + if r != c { + panic(ErrShape) + } + + m.reuseAs(r, c) + + // Take possible fast paths. + switch n { + case 0: + for i := 0; i < r; i++ { + zero(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]) + m.mat.Data[i*m.mat.Stride+i] = 1 + } + return + case 1: + m.Copy(a) + return + case 2: + m.Mul(a, a) + return + } + + // Perform iterative exponentiation by squaring in work space. + w := getWorkspace(r, r, false) + w.Copy(a) + s := getWorkspace(r, r, false) + s.Copy(a) + x := getWorkspace(r, r, false) + for n--; n > 0; n >>= 1 { + if n&1 != 0 { + x.Mul(w, s) + w, x = x, w + } + if n != 1 { + x.Mul(s, s) + s, x = x, s + } + } + m.Copy(w) + putWorkspace(w) + putWorkspace(s) + putWorkspace(x) +} + +// Scale multiplies the elements of a by f, placing the result in the receiver. +// +// See the Scaler interface for more information. +func (m *Dense) Scale(f float64, a Matrix) { + ar, ac := a.Dims() + + m.reuseAs(ar, ac) + + aU, aTrans := untranspose(a) + if rm, ok := aU.(RawMatrixer); ok { + amat := rm.RawMatrix() + if m == aU || m.checkOverlap(amat) { + var restore func() + m, restore = m.isolatedWorkspace(a) + defer restore() + } + if !aTrans { + for ja, jm := 0, 0; ja < ar*amat.Stride; ja, jm = ja+amat.Stride, jm+m.mat.Stride { + for i, v := range amat.Data[ja : ja+ac] { + m.mat.Data[i+jm] = v * f + } + } + } else { + for ja, jm := 0, 0; ja < ac*amat.Stride; ja, jm = ja+amat.Stride, jm+1 { + for i, v := range amat.Data[ja : ja+ar] { + m.mat.Data[i*m.mat.Stride+jm] = v * f + } + } + } + return + } + + m.checkOverlapMatrix(a) + for r := 0; r < ar; r++ { + for c := 0; c < ac; c++ { + m.set(r, c, f*a.At(r, c)) + } + } +} + +// Apply applies the function fn to each of the elements of a, placing the +// resulting matrix in the receiver. The function fn takes a row/column +// index and element value and returns some function of that tuple. +func (m *Dense) Apply(fn func(i, j int, v float64) float64, a Matrix) { + ar, ac := a.Dims() + + m.reuseAs(ar, ac) + + aU, aTrans := untranspose(a) + if rm, ok := aU.(RawMatrixer); ok { + amat := rm.RawMatrix() + if m == aU || m.checkOverlap(amat) { + var restore func() + m, restore = m.isolatedWorkspace(a) + defer restore() + } + if !aTrans { + for j, ja, jm := 0, 0, 0; ja < ar*amat.Stride; j, ja, jm = j+1, ja+amat.Stride, jm+m.mat.Stride { + for i, v := range amat.Data[ja : ja+ac] { + m.mat.Data[i+jm] = fn(j, i, v) + } + } + } else { + for j, ja, jm := 0, 0, 0; ja < ac*amat.Stride; j, ja, jm = j+1, ja+amat.Stride, jm+1 { + for i, v := range amat.Data[ja : ja+ar] { + m.mat.Data[i*m.mat.Stride+jm] = fn(i, j, v) + } + } + } + return + } + + m.checkOverlapMatrix(a) + for r := 0; r < ar; r++ { + for c := 0; c < ac; c++ { + m.set(r, c, fn(r, c, a.At(r, c))) + } + } +} + +// RankOne performs a rank-one update to the matrix a and stores the result +// in the receiver. If a is zero, see Outer. +// m = a + alpha * x * y' +func (m *Dense) RankOne(a Matrix, alpha float64, x, y Vector) { + ar, ac := a.Dims() + xr, xc := x.Dims() + if xr != ar || xc != 1 { + panic(ErrShape) + } + yr, yc := y.Dims() + if yr != ac || yc != 1 { + panic(ErrShape) + } + + if a != m { + aU, _ := untranspose(a) + if rm, ok := aU.(RawMatrixer); ok { + m.checkOverlap(rm.RawMatrix()) + } + } + + var xmat, ymat blas64.Vector + fast := true + xU, _ := untranspose(x) + if rv, ok := xU.(RawVectorer); ok { + xmat = rv.RawVector() + m.checkOverlap((&VecDense{mat: xmat}).asGeneral()) + } else { + fast = false + } + yU, _ := untranspose(y) + if rv, ok := yU.(RawVectorer); ok { + ymat = rv.RawVector() + m.checkOverlap((&VecDense{mat: ymat}).asGeneral()) + } else { + fast = false + } + + if fast { + if m != a { + m.reuseAs(ar, ac) + m.Copy(a) + } + blas64.Ger(alpha, xmat, ymat, m.mat) + return + } + + m.reuseAs(ar, ac) + for i := 0; i < ar; i++ { + for j := 0; j < ac; j++ { + m.set(i, j, a.At(i, j)+alpha*x.AtVec(i)*y.AtVec(j)) + } + } +} + +// Outer calculates the outer product of the column vectors x and y, +// and stores the result in the receiver. +// m = alpha * x * y' +// In order to update an existing matrix, see RankOne. +func (m *Dense) Outer(alpha float64, x, y Vector) { + xr, xc := x.Dims() + if xc != 1 { + panic(ErrShape) + } + yr, yc := y.Dims() + if yc != 1 { + panic(ErrShape) + } + + r := xr + c := yr + + // Copied from reuseAs with use replaced by useZeroed + // and a final zero of the matrix elements if we pass + // the shape checks. + // TODO(kortschak): Factor out into reuseZeroedAs if + // we find another case that needs it. + if m.mat.Rows > m.capRows || m.mat.Cols > m.capCols { + // Panic as a string, not a mat.Error. + panic("mat: caps not correctly set") + } + if m.IsZero() { + m.mat = blas64.General{ + Rows: r, + Cols: c, + Stride: c, + Data: useZeroed(m.mat.Data, r*c), + } + m.capRows = r + m.capCols = c + } else if r != m.mat.Rows || c != m.mat.Cols { + panic(ErrShape) + } + + var xmat, ymat blas64.Vector + fast := true + xU, _ := untranspose(x) + if rv, ok := xU.(RawVectorer); ok { + xmat = rv.RawVector() + m.checkOverlap((&VecDense{mat: xmat}).asGeneral()) + + } else { + fast = false + } + yU, _ := untranspose(y) + if rv, ok := yU.(RawVectorer); ok { + ymat = rv.RawVector() + m.checkOverlap((&VecDense{mat: ymat}).asGeneral()) + } else { + fast = false + } + + if fast { + for i := 0; i < r; i++ { + zero(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]) + } + blas64.Ger(alpha, xmat, ymat, m.mat) + return + } + + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + m.set(i, j, alpha*x.AtVec(i)*y.AtVec(j)) + } + } +} diff --git a/vendor/gonum.org/v1/gonum/mat/diagonal.go b/vendor/gonum.org/v1/gonum/mat/diagonal.go new file mode 100644 index 0000000000..e9f074a7fb --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/diagonal.go @@ -0,0 +1,311 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +var ( + diagDense *DiagDense + _ Matrix = diagDense + _ Diagonal = diagDense + _ MutableDiagonal = diagDense + _ Triangular = diagDense + _ TriBanded = diagDense + _ Symmetric = diagDense + _ SymBanded = diagDense + _ Banded = diagDense + _ RawBander = diagDense + _ RawSymBander = diagDense + + diag Diagonal + _ Matrix = diag + _ Diagonal = diag + _ Triangular = diag + _ TriBanded = diag + _ Symmetric = diag + _ SymBanded = diag + _ Banded = diag +) + +// Diagonal represents a diagonal matrix, that is a square matrix that only +// has non-zero terms on the diagonal. +type Diagonal interface { + Matrix + // Diag returns the number of rows/columns in the matrix. + Diag() int + + // Bandwidth and TBand are included in the Diagonal interface + // to allow the use of Diagonal types in banded functions. + // Bandwidth will always return (0, 0). + Bandwidth() (kl, ku int) + TBand() Banded + + // Triangle and TTri are included in the Diagonal interface + // to allow the use of Diagonal types in triangular functions. + Triangle() (int, TriKind) + TTri() Triangular + + // Symmetric and SymBand are included in the Diagonal interface + // to allow the use of Diagonal types in symmetric and banded symmetric + // functions respectively. + Symmetric() int + SymBand() (n, k int) + + // TriBand and TTriBand are included in the Diagonal interface + // to allow the use of Diagonal types in triangular banded functions. + TriBand() (n, k int, kind TriKind) + TTriBand() TriBanded +} + +// MutableDiagonal is a Diagonal matrix whose elements can be set. +type MutableDiagonal interface { + Diagonal + SetDiag(i int, v float64) +} + +// DiagDense represents a diagonal matrix in dense storage format. +type DiagDense struct { + mat blas64.Vector +} + +// NewDiagDense creates a new Diagonal matrix with n rows and n columns. +// The length of data must be n or data must be nil, otherwise NewDiagDense +// will panic. NewDiagDense will panic if n is zero. +func NewDiagDense(n int, data []float64) *DiagDense { + if n <= 0 { + if n == 0 { + panic(ErrZeroLength) + } + panic("mat: negative dimension") + } + if data == nil { + data = make([]float64, n) + } + if len(data) != n { + panic(ErrShape) + } + return &DiagDense{ + mat: blas64.Vector{N: n, Data: data, Inc: 1}, + } +} + +// Diag returns the dimension of the receiver. +func (d *DiagDense) Diag() int { + return d.mat.N +} + +// Dims returns the dimensions of the matrix. +func (d *DiagDense) Dims() (r, c int) { + return d.mat.N, d.mat.N +} + +// T returns the transpose of the matrix. +func (d *DiagDense) T() Matrix { + return d +} + +// TTri returns the transpose of the matrix. Note that Diagonal matrices are +// Upper by default. +func (d *DiagDense) TTri() Triangular { + return TransposeTri{d} +} + +// TBand performs an implicit transpose by returning the receiver inside a +// TransposeBand. +func (d *DiagDense) TBand() Banded { + return TransposeBand{d} +} + +// TTriBand performs an implicit transpose by returning the receiver inside a +// TransposeTriBand. Note that Diagonal matrices are Upper by default. +func (d *DiagDense) TTriBand() TriBanded { + return TransposeTriBand{d} +} + +// Bandwidth returns the upper and lower bandwidths of the matrix. +// These values are always zero for diagonal matrices. +func (d *DiagDense) Bandwidth() (kl, ku int) { + return 0, 0 +} + +// Symmetric implements the Symmetric interface. +func (d *DiagDense) Symmetric() int { + return d.mat.N +} + +// SymBand returns the number of rows/columns in the matrix, and the size of +// the bandwidth. +func (d *DiagDense) SymBand() (n, k int) { + return d.mat.N, 0 +} + +// Triangle implements the Triangular interface. +func (d *DiagDense) Triangle() (int, TriKind) { + return d.mat.N, Upper +} + +// TriBand returns the number of rows/columns in the matrix, the +// size of the bandwidth, and the orientation. Note that Diagonal matrices are +// Upper by default. +func (d *DiagDense) TriBand() (n, k int, kind TriKind) { + return d.mat.N, 0, Upper +} + +// Reset zeros the length of the matrix so that it can be reused as the +// receiver of a dimensionally restricted operation. +// +// See the Reseter interface for more information. +func (d *DiagDense) Reset() { + // No change of Inc or n to 0 may be + // made unless both are set to 0. + d.mat.Inc = 0 + d.mat.N = 0 + d.mat.Data = d.mat.Data[:0] +} + +// Zero sets all of the matrix elements to zero. +func (d *DiagDense) Zero() { + for i := 0; i < d.mat.N; i++ { + d.mat.Data[d.mat.Inc*i] = 0 + } +} + +// DiagView returns the diagonal as a matrix backed by the original data. +func (d *DiagDense) DiagView() Diagonal { + return d +} + +// DiagFrom copies the diagonal of m into the receiver. The receiver must +// be min(r, c) long or zero. Otherwise DiagFrom will panic. +func (d *DiagDense) DiagFrom(m Matrix) { + n := min(m.Dims()) + d.reuseAs(n) + + var vec blas64.Vector + switch r := m.(type) { + case *DiagDense: + vec = r.mat + case RawBander: + mat := r.RawBand() + vec = blas64.Vector{ + N: n, + Inc: mat.Stride, + Data: mat.Data[mat.KL : (n-1)*mat.Stride+mat.KL+1], + } + case RawMatrixer: + mat := r.RawMatrix() + vec = blas64.Vector{ + N: n, + Inc: mat.Stride + 1, + Data: mat.Data[:(n-1)*mat.Stride+n], + } + case RawSymBander: + mat := r.RawSymBand() + vec = blas64.Vector{ + N: n, + Inc: mat.Stride, + Data: mat.Data[:(n-1)*mat.Stride+1], + } + case RawSymmetricer: + mat := r.RawSymmetric() + vec = blas64.Vector{ + N: n, + Inc: mat.Stride + 1, + Data: mat.Data[:(n-1)*mat.Stride+n], + } + case RawTriBander: + mat := r.RawTriBand() + data := mat.Data + if mat.Uplo == blas.Lower { + data = data[mat.K:] + } + vec = blas64.Vector{ + N: n, + Inc: mat.Stride, + Data: data[:(n-1)*mat.Stride+1], + } + case RawTriangular: + mat := r.RawTriangular() + if mat.Diag == blas.Unit { + for i := 0; i < n; i += d.mat.Inc { + d.mat.Data[i] = 1 + } + return + } + vec = blas64.Vector{ + N: n, + Inc: mat.Stride + 1, + Data: mat.Data[:(n-1)*mat.Stride+n], + } + case RawVectorer: + d.mat.Data[0] = r.RawVector().Data[0] + return + default: + for i := 0; i < n; i++ { + d.setDiag(i, m.At(i, i)) + } + return + } + blas64.Copy(vec, d.mat) +} + +// RawBand returns the underlying data used by the receiver represented +// as a blas64.Band. +// Changes to elements in the receiver following the call will be reflected +// in returned blas64.Band. +func (d *DiagDense) RawBand() blas64.Band { + return blas64.Band{ + Rows: d.mat.N, + Cols: d.mat.N, + KL: 0, + KU: 0, + Stride: d.mat.Inc, + Data: d.mat.Data, + } +} + +// RawSymBand returns the underlying data used by the receiver represented +// as a blas64.SymmetricBand. +// Changes to elements in the receiver following the call will be reflected +// in returned blas64.Band. +func (d *DiagDense) RawSymBand() blas64.SymmetricBand { + return blas64.SymmetricBand{ + N: d.mat.N, + K: 0, + Stride: d.mat.Inc, + Uplo: blas.Upper, + Data: d.mat.Data, + } +} + +// reuseAs resizes an empty diagonal to a r×r diagonal, +// or checks that a non-empty matrix is r×r. +func (d *DiagDense) reuseAs(r int) { + if r == 0 { + panic(ErrZeroLength) + } + if d.IsZero() { + d.mat = blas64.Vector{ + Inc: 1, + Data: use(d.mat.Data, r), + } + d.mat.N = r + return + } + if r != d.mat.N { + panic(ErrShape) + } +} + +// IsZero returns whether the receiver is zero-sized. Zero-sized vectors can be the +// receiver for size-restricted operations. DiagDenses can be zeroed using Reset. +func (d *DiagDense) IsZero() bool { + // It must be the case that d.Dims() returns + // zeros in this case. See comment in Reset(). + return d.mat.Inc == 0 +} diff --git a/vendor/gonum.org/v1/gonum/mat/doc.go b/vendor/gonum.org/v1/gonum/mat/doc.go new file mode 100644 index 0000000000..2cc9100159 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/doc.go @@ -0,0 +1,169 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mat provides implementations of float64 and complex128 matrix +// structures and linear algebra operations on them. +// +// Overview +// +// This section provides a quick overview of the mat package. The following +// sections provide more in depth commentary. +// +// mat provides: +// - Interfaces for Matrix classes (Matrix, Symmetric, Triangular) +// - Concrete implementations (Dense, SymDense, TriDense) +// - Methods and functions for using matrix data (Add, Trace, SymRankOne) +// - Types for constructing and using matrix factorizations (QR, LU) +// - The complementary types for complex matrices, CMatrix, CSymDense, etc. +// +// A matrix may be constructed through the corresponding New function. If no +// backing array is provided the matrix will be initialized to all zeros. +// // Allocate a zeroed real matrix of size 3×5 +// zero := mat.NewDense(3, 5, nil) +// If a backing data slice is provided, the matrix will have those elements. +// Matrices are all stored in row-major format. +// // Generate a 6×6 matrix of random values. +// data := make([]float64, 36) +// for i := range data { +// data[i] = rand.NormFloat64() +// } +// a := mat.NewDense(6, 6, data) +// Operations involving matrix data are implemented as functions when the values +// of the matrix remain unchanged +// tr := mat.Trace(a) +// and are implemented as methods when the operation modifies the receiver. +// zero.Copy(a) +// +// Receivers must be the correct size for the matrix operations, otherwise the +// operation will panic. As a special case for convenience, a zero-value matrix +// will be modified to have the correct size, allocating data if necessary. +// var c mat.Dense // construct a new zero-sized matrix +// c.Mul(a, a) // c is automatically adjusted to be 6×6 +// +// Zero-value of a matrix +// +// A zero-value matrix is either the Go language definition of a zero-value or +// is a zero-sized matrix with zero-length stride. Matrix implementations may have +// a Reset method to revert the receiver into a zero-valued matrix and an IsZero +// method that returns whether the matrix is zero-valued. +// So the following will all result in a zero-value matrix. +// - var a mat.Dense +// - a := NewDense(0, 0, make([]float64, 0, 100)) +// - a.Reset() +// A zero-value matrix can not be sliced even if it does have an adequately sized +// backing data slice, but can be expanded using its Grow method if it exists. +// +// The Matrix Interfaces +// +// The Matrix interface is the common link between the concrete types of real +// matrices, The Matrix interface is defined by three functions: Dims, which +// returns the dimensions of the Matrix, At, which returns the element in the +// specified location, and T for returning a Transpose (discussed later). All of +// the concrete types can perform these behaviors and so implement the interface. +// Methods and functions are designed to use this interface, so in particular the method +// func (m *Dense) Mul(a, b Matrix) +// constructs a *Dense from the result of a multiplication with any Matrix types, +// not just *Dense. Where more restrictive requirements must be met, there are also the +// Symmetric and Triangular interfaces. For example, in +// func (s *SymDense) AddSym(a, b Symmetric) +// the Symmetric interface guarantees a symmetric result. +// +// The CMatrix interface plays the same role for complex matrices. The difference +// is that the CMatrix type has the H method instead T, for returning the conjugate +// transpose. +// +// (Conjugate) Transposes +// +// The T method is used for transposition on real matrices, and H is used for +// conjugate transposition on complex matrices. For example, c.Mul(a.T(), b) computes +// c = a^T * b. The mat types implement this method implicitly — +// see the Transpose and Conjugate types for more details. Note that some +// operations have a transpose as part of their definition, as in *SymDense.SymOuterK. +// +// Matrix Factorization +// +// Matrix factorizations, such as the LU decomposition, typically have their own +// specific data storage, and so are each implemented as a specific type. The +// factorization can be computed through a call to Factorize +// var lu mat.LU +// lu.Factorize(a) +// The elements of the factorization can be extracted through methods on the +// factorized type, i.e. *LU.UTo. The factorization types can also be used directly, +// as in *Dense.SolveCholesky. Some factorizations can be updated directly, +// without needing to update the original matrix and refactorize, +// as in *LU.RankOne. +// +// BLAS and LAPACK +// +// BLAS and LAPACK are the standard APIs for linear algebra routines. Many +// operations in mat are implemented using calls to the wrapper functions +// in gonum/blas/blas64 and gonum/lapack/lapack64 and their complex equivalents. +// By default, blas64 and lapack64 call the native Go implementations of the +// routines. Alternatively, it is possible to use C-based implementations of the +// APIs through the respective cgo packages and "Use" functions. The Go +// implementation of LAPACK (used by default) makes calls +// through blas64, so if a cgo BLAS implementation is registered, the lapack64 +// calls will be partially executed in Go and partially executed in C. +// +// Type Switching +// +// The Matrix abstraction enables efficiency as well as interoperability. Go's +// type reflection capabilities are used to choose the most efficient routine +// given the specific concrete types. For example, in +// c.Mul(a, b) +// if a and b both implement RawMatrixer, that is, they can be represented as a +// blas64.General, blas64.Gemm (general matrix multiplication) is called, while +// instead if b is a RawSymmetricer blas64.Symm is used (general-symmetric +// multiplication), and if b is a *VecDense blas64.Gemv is used. +// +// There are many possible type combinations and special cases. No specific guarantees +// are made about the performance of any method, and in particular, note that an +// abstract matrix type may be copied into a concrete type of the corresponding +// value. If there are specific special cases that are needed, please submit a +// pull-request or file an issue. +// +// Invariants +// +// Matrix input arguments to functions are never directly modified. If an operation +// changes Matrix data, the mutated matrix will be the receiver of a function. +// +// For convenience, a matrix may be used as both a receiver and as an input, e.g. +// a.Pow(a, 6) +// v.SolveVec(a.T(), v) +// though in many cases this will cause an allocation (see Element Aliasing). +// An exception to this rule is Copy, which does not allow a.Copy(a.T()). +// +// Element Aliasing +// +// Most methods in mat modify receiver data. It is forbidden for the modified +// data region of the receiver to overlap the used data area of the input +// arguments. The exception to this rule is when the method receiver is equal to one +// of the input arguments, as in the a.Pow(a, 6) call above, or its implicit transpose. +// +// This prohibition is to help avoid subtle mistakes when the method needs to read +// from and write to the same data region. There are ways to make mistakes using the +// mat API, and mat functions will detect and complain about those. +// There are many ways to make mistakes by excursion from the mat API via +// interaction with raw matrix values. +// +// If you need to read the rest of this section to understand the behavior of +// your program, you are being clever. Don't be clever. If you must be clever, +// blas64 and lapack64 may be used to call the behavior directly. +// +// mat will use the following rules to detect overlap between the receiver and one +// of the inputs: +// - the input implements one of the Raw methods, and +// - the address ranges of the backing data slices overlap, and +// - the strides differ or there is an overlap in the used data elements. +// If such an overlap is detected, the method will panic. +// +// The following cases will not panic: +// - the data slices do not overlap, +// - there is pointer identity between the receiver and input values after +// the value has been untransposed if necessary. +// +// mat will not attempt to detect element overlap if the input does not implement a +// Raw method. Method behavior is undefined if there is undetected overlap. +// +package mat // import "gonum.org/v1/gonum/mat" diff --git a/vendor/gonum.org/v1/gonum/mat/eigen.go b/vendor/gonum.org/v1/gonum/mat/eigen.go new file mode 100644 index 0000000000..ee971e4ae4 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/eigen.go @@ -0,0 +1,350 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +const ( + badFact = "mat: use without successful factorization" + badNoVect = "mat: eigenvectors not computed" +) + +// EigenSym is a type for creating and manipulating the Eigen decomposition of +// symmetric matrices. +type EigenSym struct { + vectorsComputed bool + + values []float64 + vectors *Dense +} + +// Factorize computes the eigenvalue decomposition of the symmetric matrix a. +// The Eigen decomposition is defined as +// A = P * D * P^-1 +// where D is a diagonal matrix containing the eigenvalues of the matrix, and +// P is a matrix of the eigenvectors of A. Factorize computes the eigenvalues +// in ascending order. If the vectors input argument is false, the eigenvectors +// are not computed. +// +// Factorize returns whether the decomposition succeeded. If the decomposition +// failed, methods that require a successful factorization will panic. +func (e *EigenSym) Factorize(a Symmetric, vectors bool) (ok bool) { + // kill previous decomposition + e.vectorsComputed = false + e.values = e.values[:] + + n := a.Symmetric() + sd := NewSymDense(n, nil) + sd.CopySym(a) + + jobz := lapack.EVNone + if vectors { + jobz = lapack.EVCompute + } + w := make([]float64, n) + work := []float64{0} + lapack64.Syev(jobz, sd.mat, w, work, -1) + + work = getFloats(int(work[0]), false) + ok = lapack64.Syev(jobz, sd.mat, w, work, len(work)) + putFloats(work) + if !ok { + e.vectorsComputed = false + e.values = nil + e.vectors = nil + return false + } + e.vectorsComputed = vectors + e.values = w + e.vectors = NewDense(n, n, sd.mat.Data) + return true +} + +// succFact returns whether the receiver contains a successful factorization. +func (e *EigenSym) succFact() bool { + return len(e.values) != 0 +} + +// Values extracts the eigenvalues of the factorized matrix. If dst is +// non-nil, the values are stored in-place into dst. In this case +// dst must have length n, otherwise Values will panic. If dst is +// nil, then a new slice will be allocated of the proper length and filled +// with the eigenvalues. +// +// Values panics if the Eigen decomposition was not successful. +func (e *EigenSym) Values(dst []float64) []float64 { + if !e.succFact() { + panic(badFact) + } + if dst == nil { + dst = make([]float64, len(e.values)) + } + if len(dst) != len(e.values) { + panic(ErrSliceLengthMismatch) + } + copy(dst, e.values) + return dst +} + +// VectorsTo returns the eigenvectors of the decomposition. VectorsTo +// will panic if the eigenvectors were not computed during the factorization, +// or if the factorization was not successful. +// +// If dst is not nil, the eigenvectors are stored in-place into dst, and dst +// must have size n×n and panics otherwise. If dst is nil, a new matrix +// is allocated and returned. +func (e *EigenSym) VectorsTo(dst *Dense) *Dense { + if !e.succFact() { + panic(badFact) + } + if !e.vectorsComputed { + panic(badNoVect) + } + r, c := e.vectors.Dims() + if dst == nil { + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(r, c) + } + dst.Copy(e.vectors) + return dst +} + +// EigenKind specifies the computation of eigenvectors during factorization. +type EigenKind int + +const ( + // EigenNone specifies to not compute any eigenvectors. + EigenNone EigenKind = 0 + // EigenLeft specifies to compute the left eigenvectors. + EigenLeft EigenKind = 1 << iota + // EigenRight specifies to compute the right eigenvectors. + EigenRight + // EigenBoth is a convenience value for computing both eigenvectors. + EigenBoth EigenKind = EigenLeft | EigenRight +) + +// Eigen is a type for creating and using the eigenvalue decomposition of a dense matrix. +type Eigen struct { + n int // The size of the factorized matrix. + + kind EigenKind + + values []complex128 + rVectors *CDense + lVectors *CDense +} + +// succFact returns whether the receiver contains a successful factorization. +func (e *Eigen) succFact() bool { + return e.n != 0 +} + +// Factorize computes the eigenvalues of the square matrix a, and optionally +// the eigenvectors. +// +// A right eigenvalue/eigenvector combination is defined by +// A * x_r = λ * x_r +// where x_r is the column vector called an eigenvector, and λ is the corresponding +// eigenvalue. +// +// Similarly, a left eigenvalue/eigenvector combination is defined by +// x_l * A = λ * x_l +// The eigenvalues, but not the eigenvectors, are the same for both decompositions. +// +// Typically eigenvectors refer to right eigenvectors. +// +// In all cases, Factorize computes the eigenvalues of the matrix. kind +// specifies which of the eigenvectors, if any, to compute. See the EigenKind +// documentation for more information. +// Eigen panics if the input matrix is not square. +// +// Factorize returns whether the decomposition succeeded. If the decomposition +// failed, methods that require a successful factorization will panic. +func (e *Eigen) Factorize(a Matrix, kind EigenKind) (ok bool) { + // kill previous factorization. + e.n = 0 + e.kind = 0 + // Copy a because it is modified during the Lapack call. + r, c := a.Dims() + if r != c { + panic(ErrShape) + } + var sd Dense + sd.Clone(a) + + left := kind&EigenLeft != 0 + right := kind&EigenRight != 0 + + var vl, vr Dense + jobvl := lapack.LeftEVNone + jobvr := lapack.RightEVNone + if left { + vl = *NewDense(r, r, nil) + jobvl = lapack.LeftEVCompute + } + if right { + vr = *NewDense(c, c, nil) + jobvr = lapack.RightEVCompute + } + + wr := getFloats(c, false) + defer putFloats(wr) + wi := getFloats(c, false) + defer putFloats(wi) + + work := []float64{0} + lapack64.Geev(jobvl, jobvr, sd.mat, wr, wi, vl.mat, vr.mat, work, -1) + work = getFloats(int(work[0]), false) + first := lapack64.Geev(jobvl, jobvr, sd.mat, wr, wi, vl.mat, vr.mat, work, len(work)) + putFloats(work) + + if first != 0 { + e.values = nil + return false + } + e.n = r + e.kind = kind + + // Construct complex eigenvalues from float64 data. + values := make([]complex128, r) + for i, v := range wr { + values[i] = complex(v, wi[i]) + } + e.values = values + + // Construct complex eigenvectors from float64 data. + var cvl, cvr CDense + if left { + cvl = *NewCDense(r, r, nil) + e.complexEigenTo(&cvl, &vl) + e.lVectors = &cvl + } else { + e.lVectors = nil + } + if right { + cvr = *NewCDense(c, c, nil) + e.complexEigenTo(&cvr, &vr) + e.rVectors = &cvr + } else { + e.rVectors = nil + } + return true +} + +// Kind returns the EigenKind of the decomposition. If no decomposition has been +// computed, Kind returns -1. +func (e *Eigen) Kind() EigenKind { + if !e.succFact() { + return -1 + } + return e.kind +} + +// Values extracts the eigenvalues of the factorized matrix. If dst is +// non-nil, the values are stored in-place into dst. In this case +// dst must have length n, otherwise Values will panic. If dst is +// nil, then a new slice will be allocated of the proper length and +// filed with the eigenvalues. +// +// Values panics if the Eigen decomposition was not successful. +func (e *Eigen) Values(dst []complex128) []complex128 { + if !e.succFact() { + panic(badFact) + } + if dst == nil { + dst = make([]complex128, e.n) + } + if len(dst) != e.n { + panic(ErrSliceLengthMismatch) + } + copy(dst, e.values) + return dst +} + +// complexEigenTo extracts the complex eigenvectors from the real matrix d +// and stores them into the complex matrix dst. +// +// The columns of the returned n×n dense matrix contain the eigenvectors of the +// decomposition in the same order as the eigenvalues. +// If the j-th eigenvalue is real, then +// dst[:,j] = d[:,j], +// and if it is not real, then the elements of the j-th and (j+1)-th columns of d +// form complex conjugate pairs and the eigenvectors are recovered as +// dst[:,j] = d[:,j] + i*d[:,j+1], +// dst[:,j+1] = d[:,j] - i*d[:,j+1], +// where i is the imaginary unit. +func (e *Eigen) complexEigenTo(dst *CDense, d *Dense) { + r, c := d.Dims() + cr, cc := dst.Dims() + if r != cr { + panic("size mismatch") + } + if c != cc { + panic("size mismatch") + } + for j := 0; j < c; j++ { + if imag(e.values[j]) == 0 { + for i := 0; i < r; i++ { + dst.set(i, j, complex(d.at(i, j), 0)) + } + continue + } + for i := 0; i < r; i++ { + real := d.at(i, j) + imag := d.at(i, j+1) + dst.set(i, j, complex(real, imag)) + dst.set(i, j+1, complex(real, -imag)) + } + j++ + } +} + +// VectorsTo returns the right eigenvectors of the decomposition. VectorsTo +// will panic if the right eigenvectors were not computed during the factorization, +// or if the factorization was not successful. +// +// The computed eigenvectors are normalized to have Euclidean norm equal to 1 +// and largest component real. +func (e *Eigen) VectorsTo(dst *CDense) *CDense { + if !e.succFact() { + panic(badFact) + } + if e.kind&EigenRight == 0 { + panic(badNoVect) + } + if dst == nil { + dst = NewCDense(e.n, e.n, nil) + } else { + dst.reuseAs(e.n, e.n) + } + dst.Copy(e.rVectors) + return dst +} + +// LeftVectorsTo returns the left eigenvectors of the decomposition. LeftVectorsTo +// will panic if the left eigenvectors were not computed during the factorization, +// or if the factorization was not successful. +// +// The computed eigenvectors are normalized to have Euclidean norm equal to 1 +// and largest component real. +func (e *Eigen) LeftVectorsTo(dst *CDense) *CDense { + if !e.succFact() { + panic(badFact) + } + if e.kind&EigenLeft == 0 { + panic(badNoVect) + } + if dst == nil { + dst = NewCDense(e.n, e.n, nil) + } else { + dst.reuseAs(e.n, e.n) + } + dst.Copy(e.lVectors) + return dst +} diff --git a/vendor/gonum.org/v1/gonum/mat/errors.go b/vendor/gonum.org/v1/gonum/mat/errors.go new file mode 100644 index 0000000000..0430d126f2 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/errors.go @@ -0,0 +1,149 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "fmt" + "runtime" + + "gonum.org/v1/gonum/lapack" +) + +// Condition is the condition number of a matrix. The condition +// number is defined as |A| * |A^-1|. +// +// One important use of Condition is during linear solve routines (finding x such +// that A * x = b). The condition number of A indicates the accuracy of +// the computed solution. A Condition error will be returned if the condition +// number of A is sufficiently large. If A is exactly singular to working precision, +// Condition == ∞, and the solve algorithm may have completed early. If Condition +// is large and finite the solve algorithm will be performed, but the computed +// solution may be innacurate. Due to the nature of finite precision arithmetic, +// the value of Condition is only an approximate test of singularity. +type Condition float64 + +func (c Condition) Error() string { + return fmt.Sprintf("matrix singular or near-singular with condition number %.4e", c) +} + +// ConditionTolerance is the tolerance limit of the condition number. If the +// condition number is above this value, the matrix is considered singular. +const ConditionTolerance = 1e16 + +const ( + // CondNorm is the matrix norm used for computing the condition number by routines + // in the matrix packages. + CondNorm = lapack.MaxRowSum + + // CondNormTrans is the norm used to compute on A^T to get the same result as + // computing CondNorm on A. + CondNormTrans = lapack.MaxColumnSum +) + +const stackTraceBufferSize = 1 << 20 + +// Maybe will recover a panic with a type mat.Error from fn, and return this error +// as the Err field of an ErrorStack. The stack trace for the panicking function will be +// recovered and placed in the StackTrace field. Any other error is re-panicked. +func Maybe(fn func()) (err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(Error); ok { + if e.string == "" { + panic("mat: invalid error") + } + buf := make([]byte, stackTraceBufferSize) + n := runtime.Stack(buf, false) + err = ErrorStack{Err: e, StackTrace: string(buf[:n])} + return + } + panic(r) + } + }() + fn() + return +} + +// MaybeFloat will recover a panic with a type mat.Error from fn, and return this error +// as the Err field of an ErrorStack. The stack trace for the panicking function will be +// recovered and placed in the StackTrace field. Any other error is re-panicked. +func MaybeFloat(fn func() float64) (f float64, err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(Error); ok { + if e.string == "" { + panic("mat: invalid error") + } + buf := make([]byte, stackTraceBufferSize) + n := runtime.Stack(buf, false) + err = ErrorStack{Err: e, StackTrace: string(buf[:n])} + return + } + panic(r) + } + }() + return fn(), nil +} + +// MaybeComplex will recover a panic with a type mat.Error from fn, and return this error +// as the Err field of an ErrorStack. The stack trace for the panicking function will be +// recovered and placed in the StackTrace field. Any other error is re-panicked. +func MaybeComplex(fn func() complex128) (f complex128, err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(Error); ok { + if e.string == "" { + panic("mat: invalid error") + } + buf := make([]byte, stackTraceBufferSize) + n := runtime.Stack(buf, false) + err = ErrorStack{Err: e, StackTrace: string(buf[:n])} + return + } + panic(r) + } + }() + return fn(), nil +} + +// Error represents matrix handling errors. These errors can be recovered by Maybe wrappers. +type Error struct{ string } + +func (err Error) Error() string { return err.string } + +var ( + ErrIndexOutOfRange = Error{"matrix: index out of range"} + ErrRowAccess = Error{"matrix: row index out of range"} + ErrColAccess = Error{"matrix: column index out of range"} + ErrVectorAccess = Error{"matrix: vector index out of range"} + ErrZeroLength = Error{"matrix: zero length in matrix dimension"} + ErrRowLength = Error{"matrix: row length mismatch"} + ErrColLength = Error{"matrix: col length mismatch"} + ErrSquare = Error{"matrix: expect square matrix"} + ErrNormOrder = Error{"matrix: invalid norm order for matrix"} + ErrSingular = Error{"matrix: matrix is singular"} + ErrShape = Error{"matrix: dimension mismatch"} + ErrIllegalStride = Error{"matrix: illegal stride"} + ErrPivot = Error{"matrix: malformed pivot list"} + ErrTriangle = Error{"matrix: triangular storage mismatch"} + ErrTriangleSet = Error{"matrix: triangular set out of bounds"} + ErrBandSet = Error{"matrix: band set out of bounds"} + ErrDiagSet = Error{"matrix: diagonal set out of bounds"} + ErrSliceLengthMismatch = Error{"matrix: input slice length mismatch"} + ErrNotPSD = Error{"matrix: input not positive symmetric definite"} + ErrFailedEigen = Error{"matrix: eigendecomposition not successful"} +) + +// ErrorStack represents matrix handling errors that have been recovered by Maybe wrappers. +type ErrorStack struct { + Err error + + // StackTrace is the stack trace + // recovered by Maybe, MaybeFloat + // or MaybeComplex. + StackTrace string +} + +func (err ErrorStack) Error() string { return err.Err.Error() } diff --git a/vendor/gonum.org/v1/gonum/mat/format.go b/vendor/gonum.org/v1/gonum/mat/format.go new file mode 100644 index 0000000000..9b60cb3186 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/format.go @@ -0,0 +1,238 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "fmt" + "strconv" +) + +// Formatted returns a fmt.Formatter for the matrix m using the given options. +func Formatted(m Matrix, options ...FormatOption) fmt.Formatter { + f := formatter{ + matrix: m, + dot: '.', + } + for _, o := range options { + o(&f) + } + return f +} + +type formatter struct { + matrix Matrix + prefix string + margin int + dot byte + squeeze bool +} + +// FormatOption is a functional option for matrix formatting. +type FormatOption func(*formatter) + +// Prefix sets the formatted prefix to the string p. Prefix is a string that is prepended to +// each line of output. +func Prefix(p string) FormatOption { + return func(f *formatter) { f.prefix = p } +} + +// Excerpt sets the maximum number of rows and columns to print at the margins of the matrix +// to m. If m is zero or less all elements are printed. +func Excerpt(m int) FormatOption { + return func(f *formatter) { f.margin = m } +} + +// DotByte sets the dot character to b. The dot character is used to replace zero elements +// if the result is printed with the fmt ' ' verb flag. Without a DotByte option, the default +// dot character is '.'. +func DotByte(b byte) FormatOption { + return func(f *formatter) { f.dot = b } +} + +// Squeeze sets the printing behaviour to minimise column width for each individual column. +func Squeeze() FormatOption { + return func(f *formatter) { f.squeeze = true } +} + +// Format satisfies the fmt.Formatter interface. +func (f formatter) Format(fs fmt.State, c rune) { + if c == 'v' && fs.Flag('#') { + fmt.Fprintf(fs, "%#v", f.matrix) + return + } + format(f.matrix, f.prefix, f.margin, f.dot, f.squeeze, fs, c) +} + +// format prints a pretty representation of m to the fs io.Writer. The format character c +// specifies the numerical representation of elements; valid values are those for float64 +// specified in the fmt package, with their associated flags. In addition to this, a space +// preceding a verb indicates that zero values should be represented by the dot character. +// The printed range of the matrix can be limited by specifying a positive value for margin; +// If margin is greater than zero, only the first and last margin rows/columns of the matrix +// are output. If squeeze is true, column widths are determined on a per-column basis. +// +// format will not provide Go syntax output. +func format(m Matrix, prefix string, margin int, dot byte, squeeze bool, fs fmt.State, c rune) { + rows, cols := m.Dims() + + var printed int + if margin <= 0 { + printed = rows + if cols > printed { + printed = cols + } + } else { + printed = margin + } + + prec, pOk := fs.Precision() + if !pOk { + prec = -1 + } + + var ( + maxWidth int + widths widther + buf, pad []byte + ) + if squeeze { + widths = make(columnWidth, cols) + } else { + widths = new(uniformWidth) + } + switch c { + case 'v', 'e', 'E', 'f', 'F', 'g', 'G': + if c == 'v' { + buf, maxWidth = maxCellWidth(m, 'g', printed, prec, widths) + } else { + buf, maxWidth = maxCellWidth(m, c, printed, prec, widths) + } + default: + fmt.Fprintf(fs, "%%!%c(%T=Dims(%d, %d))", c, m, rows, cols) + return + } + width, _ := fs.Width() + width = max(width, maxWidth) + pad = make([]byte, max(width, 2)) + for i := range pad { + pad[i] = ' ' + } + + first := true + if rows > 2*printed || cols > 2*printed { + first = false + fmt.Fprintf(fs, "Dims(%d, %d)\n", rows, cols) + } + + skipZero := fs.Flag(' ') + for i := 0; i < rows; i++ { + if !first { + fmt.Fprint(fs, prefix) + } + first = false + var el string + switch { + case rows == 1: + fmt.Fprint(fs, "[") + el = "]" + case i == 0: + fmt.Fprint(fs, "⎡") + el = "⎤\n" + case i < rows-1: + fmt.Fprint(fs, "⎢") + el = "⎥\n" + default: + fmt.Fprint(fs, "⎣") + el = "⎦" + } + + for j := 0; j < cols; j++ { + if j >= printed && j < cols-printed { + j = cols - printed - 1 + if i == 0 || i == rows-1 { + fmt.Fprint(fs, "... ... ") + } else { + fmt.Fprint(fs, " ") + } + continue + } + + v := m.At(i, j) + if v == 0 && skipZero { + buf = buf[:1] + buf[0] = dot + } else { + if c == 'v' { + buf = strconv.AppendFloat(buf[:0], v, 'g', prec, 64) + } else { + buf = strconv.AppendFloat(buf[:0], v, byte(c), prec, 64) + } + } + if fs.Flag('-') { + fs.Write(buf) + fs.Write(pad[:widths.width(j)-len(buf)]) + } else { + fs.Write(pad[:widths.width(j)-len(buf)]) + fs.Write(buf) + } + + if j < cols-1 { + fs.Write(pad[:2]) + } + } + + fmt.Fprint(fs, el) + + if i >= printed-1 && i < rows-printed && 2*printed < rows { + i = rows - printed - 1 + fmt.Fprintf(fs, "%s .\n%[1]s .\n%[1]s .\n", prefix) + continue + } + } +} + +func maxCellWidth(m Matrix, c rune, printed, prec int, w widther) ([]byte, int) { + var ( + buf = make([]byte, 0, 64) + rows, cols = m.Dims() + max int + ) + for i := 0; i < rows; i++ { + if i >= printed-1 && i < rows-printed && 2*printed < rows { + i = rows - printed - 1 + continue + } + for j := 0; j < cols; j++ { + if j >= printed && j < cols-printed { + continue + } + + buf = strconv.AppendFloat(buf, m.At(i, j), byte(c), prec, 64) + if len(buf) > max { + max = len(buf) + } + if len(buf) > w.width(j) { + w.setWidth(j, len(buf)) + } + buf = buf[:0] + } + } + return buf, max +} + +type widther interface { + width(i int) int + setWidth(i, w int) +} + +type uniformWidth int + +func (u *uniformWidth) width(_ int) int { return int(*u) } +func (u *uniformWidth) setWidth(_, w int) { *u = uniformWidth(w) } + +type columnWidth []int + +func (c columnWidth) width(i int) int { return c[i] } +func (c columnWidth) setWidth(i, w int) { c[i] = w } diff --git a/vendor/gonum.org/v1/gonum/mat/gsvd.go b/vendor/gonum.org/v1/gonum/mat/gsvd.go new file mode 100644 index 0000000000..2de511a9fe --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/gsvd.go @@ -0,0 +1,415 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/floats" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +// GSVDKind specifies the treatment of singular vectors during a GSVD +// factorization. +type GSVDKind int + +const ( + // GSVDNone specifies that no singular vectors should be computed during + // the decomposition. + GSVDNone GSVDKind = 0 + + // GSVDU specifies that the U singular vectors should be computed during + // the decomposition. + GSVDU GSVDKind = 1 << iota + // GSVDV specifies that the V singular vectors should be computed during + // the decomposition. + GSVDV + // GSVDQ specifies that the Q singular vectors should be computed during + // the decomposition. + GSVDQ + + // GSVDAll is a convenience value for computing all of the singular vectors. + GSVDAll = GSVDU | GSVDV | GSVDQ +) + +// GSVD is a type for creating and using the Generalized Singular Value Decomposition +// (GSVD) of a matrix. +// +// The factorization is a linear transformation of the data sets from the given +// variable×sample spaces to reduced and diagonalized "eigenvariable"×"eigensample" +// spaces. +type GSVD struct { + kind GSVDKind + + r, p, c, k, l int + s1, s2 []float64 + a, b, u, v, q blas64.General + + work []float64 + iwork []int +} + +// succFact returns whether the receiver contains a successful factorization. +func (gsvd *GSVD) succFact() bool { + return gsvd.r != 0 +} + +// Factorize computes the generalized singular value decomposition (GSVD) of the input +// the r×c matrix A and the p×c matrix B. The singular values of A and B are computed +// in all cases, while the singular vectors are optionally computed depending on the +// input kind. +// +// The full singular value decomposition (kind == GSVDAll) deconstructs A and B as +// A = U * Σ₁ * [ 0 R ] * Q^T +// +// B = V * Σ₂ * [ 0 R ] * Q^T +// where Σ₁ and Σ₂ are r×(k+l) and p×(k+l) diagonal matrices of singular values, and +// U, V and Q are r×r, p×p and c×c orthogonal matrices of singular vectors. k+l is the +// effective numerical rank of the matrix [ A^T B^T ]^T. +// +// It is frequently not necessary to compute the full GSVD. Computation time and +// storage costs can be reduced using the appropriate kind. Either only the singular +// values can be computed (kind == SVDNone), or in conjunction with specific singular +// vectors (kind bit set according to matrix.GSVDU, matrix.GSVDV and matrix.GSVDQ). +// +// Factorize returns whether the decomposition succeeded. If the decomposition +// failed, routines that require a successful factorization will panic. +func (gsvd *GSVD) Factorize(a, b Matrix, kind GSVDKind) (ok bool) { + // kill the previous decomposition + gsvd.r = 0 + gsvd.kind = 0 + + r, c := a.Dims() + gsvd.r, gsvd.c = r, c + p, c := b.Dims() + gsvd.p = p + if gsvd.c != c { + panic(ErrShape) + } + var jobU, jobV, jobQ lapack.GSVDJob + switch { + default: + panic("gsvd: bad input kind") + case kind == GSVDNone: + jobU = lapack.GSVDNone + jobV = lapack.GSVDNone + jobQ = lapack.GSVDNone + case GSVDAll&kind != 0: + if GSVDU&kind != 0 { + jobU = lapack.GSVDU + gsvd.u = blas64.General{ + Rows: r, + Cols: r, + Stride: r, + Data: use(gsvd.u.Data, r*r), + } + } + if GSVDV&kind != 0 { + jobV = lapack.GSVDV + gsvd.v = blas64.General{ + Rows: p, + Cols: p, + Stride: p, + Data: use(gsvd.v.Data, p*p), + } + } + if GSVDQ&kind != 0 { + jobQ = lapack.GSVDQ + gsvd.q = blas64.General{ + Rows: c, + Cols: c, + Stride: c, + Data: use(gsvd.q.Data, c*c), + } + } + } + + // A and B are destroyed on call, so copy the matrices. + aCopy := DenseCopyOf(a) + bCopy := DenseCopyOf(b) + + gsvd.s1 = use(gsvd.s1, c) + gsvd.s2 = use(gsvd.s2, c) + + gsvd.iwork = useInt(gsvd.iwork, c) + + gsvd.work = use(gsvd.work, 1) + lapack64.Ggsvd3(jobU, jobV, jobQ, aCopy.mat, bCopy.mat, gsvd.s1, gsvd.s2, gsvd.u, gsvd.v, gsvd.q, gsvd.work, -1, gsvd.iwork) + gsvd.work = use(gsvd.work, int(gsvd.work[0])) + gsvd.k, gsvd.l, ok = lapack64.Ggsvd3(jobU, jobV, jobQ, aCopy.mat, bCopy.mat, gsvd.s1, gsvd.s2, gsvd.u, gsvd.v, gsvd.q, gsvd.work, len(gsvd.work), gsvd.iwork) + if ok { + gsvd.a = aCopy.mat + gsvd.b = bCopy.mat + gsvd.kind = kind + } + return ok +} + +// Kind returns the GSVDKind of the decomposition. If no decomposition has been +// computed, Kind returns -1. +func (gsvd *GSVD) Kind() GSVDKind { + if !gsvd.succFact() { + return -1 + } + return gsvd.kind +} + +// Rank returns the k and l terms of the rank of [ A^T B^T ]^T. +func (gsvd *GSVD) Rank() (k, l int) { + return gsvd.k, gsvd.l +} + +// GeneralizedValues returns the generalized singular values of the factorized matrices. +// If the input slice is non-nil, the values will be stored in-place into the slice. +// In this case, the slice must have length min(r,c)-k, and GeneralizedValues will +// panic with matrix.ErrSliceLengthMismatch otherwise. If the input slice is nil, +// a new slice of the appropriate length will be allocated and returned. +// +// GeneralizedValues will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) GeneralizedValues(v []float64) []float64 { + if !gsvd.succFact() { + panic(badFact) + } + r := gsvd.r + c := gsvd.c + k := gsvd.k + d := min(r, c) + if v == nil { + v = make([]float64, d-k) + } + if len(v) != d-k { + panic(ErrSliceLengthMismatch) + } + floats.DivTo(v, gsvd.s1[k:d], gsvd.s2[k:d]) + return v +} + +// ValuesA returns the singular values of the factorized A matrix. +// If the input slice is non-nil, the values will be stored in-place into the slice. +// In this case, the slice must have length min(r,c)-k, and ValuesA will panic with +// matrix.ErrSliceLengthMismatch otherwise. If the input slice is nil, +// a new slice of the appropriate length will be allocated and returned. +// +// ValuesA will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) ValuesA(s []float64) []float64 { + if !gsvd.succFact() { + panic(badFact) + } + r := gsvd.r + c := gsvd.c + k := gsvd.k + d := min(r, c) + if s == nil { + s = make([]float64, d-k) + } + if len(s) != d-k { + panic(ErrSliceLengthMismatch) + } + copy(s, gsvd.s1[k:min(r, c)]) + return s +} + +// ValuesB returns the singular values of the factorized B matrix. +// If the input slice is non-nil, the values will be stored in-place into the slice. +// In this case, the slice must have length min(r,c)-k, and ValuesB will panic with +// matrix.ErrSliceLengthMismatch otherwise. If the input slice is nil, +// a new slice of the appropriate length will be allocated and returned. +// +// ValuesB will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) ValuesB(s []float64) []float64 { + if !gsvd.succFact() { + panic(badFact) + } + r := gsvd.r + c := gsvd.c + k := gsvd.k + d := min(r, c) + if s == nil { + s = make([]float64, d-k) + } + if len(s) != d-k { + panic(ErrSliceLengthMismatch) + } + copy(s, gsvd.s2[k:d]) + return s +} + +// ZeroRTo extracts the matrix [ 0 R ] from the singular value decomposition, storing +// the result in-place into dst. [ 0 R ] is size (k+l)×c. +// If dst is nil, a new matrix is allocated. The resulting ZeroR matrix is returned. +// +// ZeroRTo will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) ZeroRTo(dst *Dense) *Dense { + if !gsvd.succFact() { + panic(badFact) + } + r := gsvd.r + c := gsvd.c + k := gsvd.k + l := gsvd.l + h := min(k+l, r) + if dst == nil { + dst = NewDense(k+l, c, nil) + } else { + dst.reuseAsZeroed(k+l, c) + } + a := Dense{ + mat: gsvd.a, + capRows: r, + capCols: c, + } + dst.Slice(0, h, c-k-l, c).(*Dense). + Copy(a.Slice(0, h, c-k-l, c)) + if r < k+l { + b := Dense{ + mat: gsvd.b, + capRows: gsvd.p, + capCols: c, + } + dst.Slice(r, k+l, c+r-k-l, c).(*Dense). + Copy(b.Slice(r-k, l, c+r-k-l, c)) + } + return dst +} + +// SigmaATo extracts the matrix Σ₁ from the singular value decomposition, storing +// the result in-place into dst. Σ₁ is size r×(k+l). +// If dst is nil, a new matrix is allocated. The resulting SigmaA matrix is returned. +// +// SigmaATo will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) SigmaATo(dst *Dense) *Dense { + if !gsvd.succFact() { + panic(badFact) + } + r := gsvd.r + k := gsvd.k + l := gsvd.l + if dst == nil { + dst = NewDense(r, k+l, nil) + } else { + dst.reuseAsZeroed(r, k+l) + } + for i := 0; i < k; i++ { + dst.set(i, i, 1) + } + for i := k; i < min(r, k+l); i++ { + dst.set(i, i, gsvd.s1[i]) + } + return dst +} + +// SigmaBTo extracts the matrix Σ₂ from the singular value decomposition, storing +// the result in-place into dst. Σ₂ is size p×(k+l). +// If dst is nil, a new matrix is allocated. The resulting SigmaB matrix is returned. +// +// SigmaBTo will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) SigmaBTo(dst *Dense) *Dense { + if !gsvd.succFact() { + panic(badFact) + } + r := gsvd.r + p := gsvd.p + k := gsvd.k + l := gsvd.l + if dst == nil { + dst = NewDense(p, k+l, nil) + } else { + dst.reuseAsZeroed(p, k+l) + } + for i := 0; i < min(l, r-k); i++ { + dst.set(i, i+k, gsvd.s2[k+i]) + } + for i := r - k; i < l; i++ { + dst.set(i, i+k, 1) + } + return dst +} + +// UTo extracts the matrix U from the singular value decomposition, storing +// the result in-place into dst. U is size r×r. +// If dst is nil, a new matrix is allocated. The resulting U matrix is returned. +// +// UTo will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) UTo(dst *Dense) *Dense { + if !gsvd.succFact() { + panic(badFact) + } + if gsvd.kind&GSVDU == 0 { + panic("mat: improper GSVD kind") + } + r := gsvd.u.Rows + c := gsvd.u.Cols + if dst == nil { + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(r, c) + } + + tmp := &Dense{ + mat: gsvd.u, + capRows: r, + capCols: c, + } + dst.Copy(tmp) + return dst +} + +// VTo extracts the matrix V from the singular value decomposition, storing +// the result in-place into dst. V is size p×p. +// If dst is nil, a new matrix is allocated. The resulting V matrix is returned. +// +// VTo will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) VTo(dst *Dense) *Dense { + if !gsvd.succFact() { + panic(badFact) + } + if gsvd.kind&GSVDV == 0 { + panic("mat: improper GSVD kind") + } + r := gsvd.v.Rows + c := gsvd.v.Cols + if dst == nil { + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(r, c) + } + + tmp := &Dense{ + mat: gsvd.v, + capRows: r, + capCols: c, + } + dst.Copy(tmp) + return dst +} + +// QTo extracts the matrix Q from the singular value decomposition, storing +// the result in-place into dst. Q is size c×c. +// If dst is nil, a new matrix is allocated. The resulting Q matrix is returned. +// +// QTo will panic if the receiver does not contain a successful factorization. +func (gsvd *GSVD) QTo(dst *Dense) *Dense { + if !gsvd.succFact() { + panic(badFact) + } + if gsvd.kind&GSVDQ == 0 { + panic("mat: improper GSVD kind") + } + r := gsvd.q.Rows + c := gsvd.q.Cols + if dst == nil { + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(r, c) + } + + tmp := &Dense{ + mat: gsvd.q, + capRows: r, + capCols: c, + } + dst.Copy(tmp) + return dst +} diff --git a/vendor/gonum.org/v1/gonum/mat/hogsvd.go b/vendor/gonum.org/v1/gonum/mat/hogsvd.go new file mode 100644 index 0000000000..bd843e6b36 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/hogsvd.go @@ -0,0 +1,233 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "errors" + + "gonum.org/v1/gonum/blas/blas64" +) + +// HOGSVD is a type for creating and using the Higher Order Generalized Singular Value +// Decomposition (HOGSVD) of a set of matrices. +// +// The factorization is a linear transformation of the data sets from the given +// variable×sample spaces to reduced and diagonalized "eigenvariable"×"eigensample" +// spaces. +type HOGSVD struct { + n int + v *Dense + b []Dense + + err error +} + +// succFact returns whether the receiver contains a successful factorization. +func (gsvd *HOGSVD) succFact() bool { + return gsvd.n != 0 +} + +// Factorize computes the higher order generalized singular value decomposition (HOGSVD) +// of the n input r_i×c column tall matrices in m. HOGSV extends the GSVD case from 2 to n +// input matrices. +// +// M_0 = U_0 * Σ_0 * V^T +// M_1 = U_1 * Σ_1 * V^T +// . +// . +// . +// M_{n-1} = U_{n-1} * Σ_{n-1} * V^T +// +// where U_i are r_i×c matrices of singular vectors, Σ are c×c matrices singular values, and V +// is a c×c matrix of singular vectors. +// +// Factorize returns whether the decomposition succeeded. If the decomposition +// failed, routines that require a successful factorization will panic. +func (gsvd *HOGSVD) Factorize(m ...Matrix) (ok bool) { + // Factorize performs the HOGSVD factorisation + // essentially as described by Ponnapalli et al. + // https://doi.org/10.1371/journal.pone.0028072 + + if len(m) < 2 { + panic("hogsvd: too few matrices") + } + gsvd.n = 0 + + r, c := m[0].Dims() + a := make([]Cholesky, len(m)) + var ts SymDense + for i, d := range m { + rd, cd := d.Dims() + if rd < cd { + gsvd.err = ErrShape + return false + } + if rd > r { + r = rd + } + if cd != c { + panic(ErrShape) + } + ts.Reset() + ts.SymOuterK(1, d.T()) + ok = a[i].Factorize(&ts) + if !ok { + gsvd.err = errors.New("hogsvd: cholesky decomposition failed") + return false + } + } + + s := getWorkspace(c, c, true) + defer putWorkspace(s) + sij := getWorkspace(c, c, false) + defer putWorkspace(sij) + for i, ai := range a { + for _, aj := range a[i+1:] { + gsvd.err = ai.SolveCholTo(sij, &aj) + if gsvd.err != nil { + return false + } + s.Add(s, sij) + + gsvd.err = aj.SolveCholTo(sij, &ai) + if gsvd.err != nil { + return false + } + s.Add(s, sij) + } + } + s.Scale(1/float64(len(m)*(len(m)-1)), s) + + var eig Eigen + ok = eig.Factorize(s.T(), EigenRight) + if !ok { + gsvd.err = errors.New("hogsvd: eigen decomposition failed") + return false + } + vc := eig.VectorsTo(nil) + // vc is guaranteed to have real eigenvalues. + rc, cc := vc.Dims() + v := NewDense(rc, cc, nil) + for i := 0; i < rc; i++ { + for j := 0; j < cc; j++ { + a := vc.At(i, j) + v.set(i, j, real(a)) + } + } + // Rescale the columns of v by their Frobenius norms. + // Work done in cv is reflected in v. + var cv VecDense + for j := 0; j < c; j++ { + cv.ColViewOf(v, j) + cv.ScaleVec(1/blas64.Nrm2(cv.mat), &cv) + } + + b := make([]Dense, len(m)) + biT := getWorkspace(c, r, false) + defer putWorkspace(biT) + for i, d := range m { + // All calls to reset will leave a zeroed + // matrix with capacity to store the result + // without additional allocation. + biT.Reset() + gsvd.err = biT.Solve(v, d.T()) + if gsvd.err != nil { + return false + } + b[i].Clone(biT.T()) + } + + gsvd.n = len(m) + gsvd.v = v + gsvd.b = b + return true +} + +// Err returns the reason for a factorization failure. +func (gsvd *HOGSVD) Err() error { + return gsvd.err +} + +// Len returns the number of matrices that have been factorized. If Len returns +// zero, the factorization was not successful. +func (gsvd *HOGSVD) Len() int { + return gsvd.n +} + +// UTo extracts the matrix U_n from the singular value decomposition, storing +// the result in-place into dst. U_n is size r×c. +// If dst is nil, a new matrix is allocated. The resulting U matrix is returned. +// +// UTo will panic if the receiver does not contain a successful factorization. +func (gsvd *HOGSVD) UTo(dst *Dense, n int) *Dense { + if !gsvd.succFact() { + panic(badFact) + } + if n < 0 || gsvd.n <= n { + panic("hogsvd: invalid index") + } + + if dst == nil { + r, c := gsvd.b[n].Dims() + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(gsvd.b[n].Dims()) + } + dst.Copy(&gsvd.b[n]) + var v VecDense + for j, f := range gsvd.Values(nil, n) { + v.ColViewOf(dst, j) + v.ScaleVec(1/f, &v) + } + return dst +} + +// Values returns the nth set of singular values of the factorized system. +// If the input slice is non-nil, the values will be stored in-place into the slice. +// In this case, the slice must have length c, and Values will panic with +// matrix.ErrSliceLengthMismatch otherwise. If the input slice is nil, +// a new slice of the appropriate length will be allocated and returned. +// +// Values will panic if the receiver does not contain a successful factorization. +func (gsvd *HOGSVD) Values(s []float64, n int) []float64 { + if !gsvd.succFact() { + panic(badFact) + } + if n < 0 || gsvd.n <= n { + panic("hogsvd: invalid index") + } + + _, c := gsvd.b[n].Dims() + if s == nil { + s = make([]float64, c) + } else if len(s) != c { + panic(ErrSliceLengthMismatch) + } + var v VecDense + for j := 0; j < c; j++ { + v.ColViewOf(&gsvd.b[n], j) + s[j] = blas64.Nrm2(v.mat) + } + return s +} + +// VTo extracts the matrix V from the singular value decomposition, storing +// the result in-place into dst. V is size c×c. +// If dst is nil, a new matrix is allocated. The resulting V matrix is returned. +// +// VTo will panic if the receiver does not contain a successful factorization. +func (gsvd *HOGSVD) VTo(dst *Dense) *Dense { + if !gsvd.succFact() { + panic(badFact) + } + if dst == nil { + r, c := gsvd.v.Dims() + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(gsvd.v.Dims()) + } + dst.Copy(gsvd.v) + return dst +} diff --git a/vendor/gonum.org/v1/gonum/mat/index_bound_checks.go b/vendor/gonum.org/v1/gonum/mat/index_bound_checks.go new file mode 100644 index 0000000000..59815a6768 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/index_bound_checks.go @@ -0,0 +1,348 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file must be kept in sync with index_no_bound_checks.go. + +// +build bounds + +package mat + +// At returns the element at row i, column j. +func (m *Dense) At(i, j int) float64 { + return m.at(i, j) +} + +func (m *Dense) at(i, j int) float64 { + if uint(i) >= uint(m.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(m.mat.Cols) { + panic(ErrColAccess) + } + return m.mat.Data[i*m.mat.Stride+j] +} + +// Set sets the element at row i, column j to the value v. +func (m *Dense) Set(i, j int, v float64) { + m.set(i, j, v) +} + +func (m *Dense) set(i, j int, v float64) { + if uint(i) >= uint(m.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(m.mat.Cols) { + panic(ErrColAccess) + } + m.mat.Data[i*m.mat.Stride+j] = v +} + +// At returns the element at row i, column j. +func (m *CDense) At(i, j int) complex128 { + return m.at(i, j) +} + +func (m *CDense) at(i, j int) complex128 { + if uint(i) >= uint(m.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(m.mat.Cols) { + panic(ErrColAccess) + } + return m.mat.Data[i*m.mat.Stride+j] +} + +// Set sets the element at row i, column j to the value v. +func (m *CDense) Set(i, j int, v complex128) { + m.set(i, j, v) +} + +func (m *CDense) set(i, j int, v complex128) { + if uint(i) >= uint(m.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(m.mat.Cols) { + panic(ErrColAccess) + } + m.mat.Data[i*m.mat.Stride+j] = v +} + +// At returns the element at row i. +// It panics if i is out of bounds or if j is not zero. +func (v *VecDense) At(i, j int) float64 { + if j != 0 { + panic(ErrColAccess) + } + return v.at(i) +} + +// AtVec returns the element at row i. +// It panics if i is out of bounds. +func (v *VecDense) AtVec(i int) float64 { + return v.at(i) +} + +func (v *VecDense) at(i int) float64 { + if uint(i) >= uint(v.mat.N) { + panic(ErrRowAccess) + } + return v.mat.Data[i*v.mat.Inc] +} + +// SetVec sets the element at row i to the value val. +// It panics if i is out of bounds. +func (v *VecDense) SetVec(i int, val float64) { + v.setVec(i, val) +} + +func (v *VecDense) setVec(i int, val float64) { + if uint(i) >= uint(v.mat.N) { + panic(ErrVectorAccess) + } + v.mat.Data[i*v.mat.Inc] = val +} + +// At returns the element at row i and column j. +func (t *SymDense) At(i, j int) float64 { + return t.at(i, j) +} + +func (t *SymDense) at(i, j int) float64 { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + if i > j { + i, j = j, i + } + return t.mat.Data[i*t.mat.Stride+j] +} + +// SetSym sets the elements at (i,j) and (j,i) to the value v. +func (t *SymDense) SetSym(i, j int, v float64) { + t.set(i, j, v) +} + +func (t *SymDense) set(i, j int, v float64) { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + if i > j { + i, j = j, i + } + t.mat.Data[i*t.mat.Stride+j] = v +} + +// At returns the element at row i, column j. +func (t *TriDense) At(i, j int) float64 { + return t.at(i, j) +} + +func (t *TriDense) at(i, j int) float64 { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + isUpper := t.isUpper() + if (isUpper && i > j) || (!isUpper && i < j) { + return 0 + } + return t.mat.Data[i*t.mat.Stride+j] +} + +// SetTri sets the element of the triangular matrix at row i, column j to the value v. +// It panics if the location is outside the appropriate half of the matrix. +func (t *TriDense) SetTri(i, j int, v float64) { + t.set(i, j, v) +} + +func (t *TriDense) set(i, j int, v float64) { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + isUpper := t.isUpper() + if (isUpper && i > j) || (!isUpper && i < j) { + panic(ErrTriangleSet) + } + t.mat.Data[i*t.mat.Stride+j] = v +} + +// At returns the element at row i, column j. +func (b *BandDense) At(i, j int) float64 { + return b.at(i, j) +} + +func (b *BandDense) at(i, j int) float64 { + if uint(i) >= uint(b.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(b.mat.Cols) { + panic(ErrColAccess) + } + pj := j + b.mat.KL - i + if pj < 0 || b.mat.KL+b.mat.KU+1 <= pj { + return 0 + } + return b.mat.Data[i*b.mat.Stride+pj] +} + +// SetBand sets the element at row i, column j to the value v. +// It panics if the location is outside the appropriate region of the matrix. +func (b *BandDense) SetBand(i, j int, v float64) { + b.set(i, j, v) +} + +func (b *BandDense) set(i, j int, v float64) { + if uint(i) >= uint(b.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(b.mat.Cols) { + panic(ErrColAccess) + } + pj := j + b.mat.KL - i + if pj < 0 || b.mat.KL+b.mat.KU+1 <= pj { + panic(ErrBandSet) + } + b.mat.Data[i*b.mat.Stride+pj] = v +} + +// At returns the element at row i, column j. +func (s *SymBandDense) At(i, j int) float64 { + return s.at(i, j) +} + +func (s *SymBandDense) at(i, j int) float64 { + if uint(i) >= uint(s.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(s.mat.N) { + panic(ErrColAccess) + } + if i > j { + i, j = j, i + } + pj := j - i + if s.mat.K+1 <= pj { + return 0 + } + return s.mat.Data[i*s.mat.Stride+pj] +} + +// SetSymBand sets the element at row i, column j to the value v. +// It panics if the location is outside the appropriate region of the matrix. +func (s *SymBandDense) SetSymBand(i, j int, v float64) { + s.set(i, j, v) +} + +func (s *SymBandDense) set(i, j int, v float64) { + if uint(i) >= uint(s.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(s.mat.N) { + panic(ErrColAccess) + } + if i > j { + i, j = j, i + } + pj := j - i + if s.mat.K+1 <= pj { + panic(ErrBandSet) + } + s.mat.Data[i*s.mat.Stride+pj] = v +} + +func (t *TriBandDense) At(i, j int) float64 { + return t.at(i, j) +} + +func (t *TriBandDense) at(i, j int) float64 { + // TODO(btracey): Support Diag field, see #692. + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + isUpper := t.isUpper() + if (isUpper && i > j) || (!isUpper && i < j) { + return 0 + } + kl, ku := t.mat.K, 0 + if isUpper { + kl, ku = 0, t.mat.K + } + pj := j + kl - i + if pj < 0 || kl+ku+1 <= pj { + return 0 + } + return t.mat.Data[i*t.mat.Stride+pj] +} + +func (t *TriBandDense) SetTriBand(i, j int, v float64) { + t.setTriBand(i, j, v) +} + +func (t *TriBandDense) setTriBand(i, j int, v float64) { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + isUpper := t.isUpper() + if (isUpper && i > j) || (!isUpper && i < j) { + panic(ErrTriangleSet) + } + kl, ku := t.mat.K, 0 + if isUpper { + kl, ku = 0, t.mat.K + } + pj := j + kl - i + if pj < 0 || kl+ku+1 <= pj { + panic(ErrBandSet) + } + // TODO(btracey): Support Diag field, see #692. + t.mat.Data[i*t.mat.Stride+pj] = v +} + +// At returns the element at row i, column j. +func (d *DiagDense) At(i, j int) float64 { + return d.at(i, j) +} + +func (d *DiagDense) at(i, j int) float64 { + if uint(i) >= uint(d.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(d.mat.N) { + panic(ErrColAccess) + } + if i != j { + return 0 + } + return d.mat.Data[i*d.mat.Inc] +} + +// SetDiag sets the element at row i, column i to the value v. +// It panics if the location is outside the appropriate region of the matrix. +func (d *DiagDense) SetDiag(i int, v float64) { + d.setDiag(i, v) +} + +func (d *DiagDense) setDiag(i int, v float64) { + if uint(i) >= uint(d.mat.N) { + panic(ErrRowAccess) + } + d.mat.Data[i*d.mat.Inc] = v +} diff --git a/vendor/gonum.org/v1/gonum/mat/index_no_bound_checks.go b/vendor/gonum.org/v1/gonum/mat/index_no_bound_checks.go new file mode 100644 index 0000000000..051f8437af --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/index_no_bound_checks.go @@ -0,0 +1,359 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file must be kept in sync with index_bound_checks.go. + +// +build !bounds + +package mat + +// At returns the element at row i, column j. +func (m *Dense) At(i, j int) float64 { + if uint(i) >= uint(m.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(m.mat.Cols) { + panic(ErrColAccess) + } + return m.at(i, j) +} + +func (m *Dense) at(i, j int) float64 { + return m.mat.Data[i*m.mat.Stride+j] +} + +// Set sets the element at row i, column j to the value v. +func (m *Dense) Set(i, j int, v float64) { + if uint(i) >= uint(m.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(m.mat.Cols) { + panic(ErrColAccess) + } + m.set(i, j, v) +} + +func (m *Dense) set(i, j int, v float64) { + m.mat.Data[i*m.mat.Stride+j] = v +} + +// At returns the element at row i, column j. +func (m *CDense) At(i, j int) complex128 { + if uint(i) >= uint(m.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(m.mat.Cols) { + panic(ErrColAccess) + } + return m.at(i, j) +} + +func (m *CDense) at(i, j int) complex128 { + return m.mat.Data[i*m.mat.Stride+j] +} + +// Set sets the element at row i, column j to the value v. +func (m *CDense) Set(i, j int, v complex128) { + if uint(i) >= uint(m.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(m.mat.Cols) { + panic(ErrColAccess) + } + m.set(i, j, v) +} + +func (m *CDense) set(i, j int, v complex128) { + m.mat.Data[i*m.mat.Stride+j] = v +} + +// At returns the element at row i. +// It panics if i is out of bounds or if j is not zero. +func (v *VecDense) At(i, j int) float64 { + if uint(i) >= uint(v.mat.N) { + panic(ErrRowAccess) + } + if j != 0 { + panic(ErrColAccess) + } + return v.at(i) +} + +// AtVec returns the element at row i. +// It panics if i is out of bounds. +func (v *VecDense) AtVec(i int) float64 { + if uint(i) >= uint(v.mat.N) { + panic(ErrRowAccess) + } + return v.at(i) +} + +func (v *VecDense) at(i int) float64 { + return v.mat.Data[i*v.mat.Inc] +} + +// SetVec sets the element at row i to the value val. +// It panics if i is out of bounds. +func (v *VecDense) SetVec(i int, val float64) { + if uint(i) >= uint(v.mat.N) { + panic(ErrVectorAccess) + } + v.setVec(i, val) +} + +func (v *VecDense) setVec(i int, val float64) { + v.mat.Data[i*v.mat.Inc] = val +} + +// At returns the element at row i and column j. +func (s *SymDense) At(i, j int) float64 { + if uint(i) >= uint(s.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(s.mat.N) { + panic(ErrColAccess) + } + return s.at(i, j) +} + +func (s *SymDense) at(i, j int) float64 { + if i > j { + i, j = j, i + } + return s.mat.Data[i*s.mat.Stride+j] +} + +// SetSym sets the elements at (i,j) and (j,i) to the value v. +func (s *SymDense) SetSym(i, j int, v float64) { + if uint(i) >= uint(s.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(s.mat.N) { + panic(ErrColAccess) + } + s.set(i, j, v) +} + +func (s *SymDense) set(i, j int, v float64) { + if i > j { + i, j = j, i + } + s.mat.Data[i*s.mat.Stride+j] = v +} + +// At returns the element at row i, column j. +func (t *TriDense) At(i, j int) float64 { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + return t.at(i, j) +} + +func (t *TriDense) at(i, j int) float64 { + isUpper := t.triKind() + if (isUpper && i > j) || (!isUpper && i < j) { + return 0 + } + return t.mat.Data[i*t.mat.Stride+j] +} + +// SetTri sets the element at row i, column j to the value v. +// It panics if the location is outside the appropriate half of the matrix. +func (t *TriDense) SetTri(i, j int, v float64) { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + isUpper := t.isUpper() + if (isUpper && i > j) || (!isUpper && i < j) { + panic(ErrTriangleSet) + } + t.set(i, j, v) +} + +func (t *TriDense) set(i, j int, v float64) { + t.mat.Data[i*t.mat.Stride+j] = v +} + +// At returns the element at row i, column j. +func (b *BandDense) At(i, j int) float64 { + if uint(i) >= uint(b.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(b.mat.Cols) { + panic(ErrColAccess) + } + return b.at(i, j) +} + +func (b *BandDense) at(i, j int) float64 { + pj := j + b.mat.KL - i + if pj < 0 || b.mat.KL+b.mat.KU+1 <= pj { + return 0 + } + return b.mat.Data[i*b.mat.Stride+pj] +} + +// SetBand sets the element at row i, column j to the value v. +// It panics if the location is outside the appropriate region of the matrix. +func (b *BandDense) SetBand(i, j int, v float64) { + if uint(i) >= uint(b.mat.Rows) { + panic(ErrRowAccess) + } + if uint(j) >= uint(b.mat.Cols) { + panic(ErrColAccess) + } + pj := j + b.mat.KL - i + if pj < 0 || b.mat.KL+b.mat.KU+1 <= pj { + panic(ErrBandSet) + } + b.set(i, j, v) +} + +func (b *BandDense) set(i, j int, v float64) { + pj := j + b.mat.KL - i + b.mat.Data[i*b.mat.Stride+pj] = v +} + +// At returns the element at row i, column j. +func (s *SymBandDense) At(i, j int) float64 { + if uint(i) >= uint(s.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(s.mat.N) { + panic(ErrColAccess) + } + return s.at(i, j) +} + +func (s *SymBandDense) at(i, j int) float64 { + if i > j { + i, j = j, i + } + pj := j - i + if s.mat.K+1 <= pj { + return 0 + } + return s.mat.Data[i*s.mat.Stride+pj] +} + +// SetSymBand sets the element at row i, column j to the value v. +// It panics if the location is outside the appropriate region of the matrix. +func (s *SymBandDense) SetSymBand(i, j int, v float64) { + if uint(i) >= uint(s.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(s.mat.N) { + panic(ErrColAccess) + } + s.set(i, j, v) +} + +func (s *SymBandDense) set(i, j int, v float64) { + if i > j { + i, j = j, i + } + pj := j - i + if s.mat.K+1 <= pj { + panic(ErrBandSet) + } + s.mat.Data[i*s.mat.Stride+pj] = v +} + +func (t *TriBandDense) At(i, j int) float64 { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + return t.at(i, j) +} + +func (t *TriBandDense) at(i, j int) float64 { + // TODO(btracey): Support Diag field, see #692. + isUpper := t.isUpper() + if (isUpper && i > j) || (!isUpper && i < j) { + return 0 + } + kl := t.mat.K + ku := 0 + if isUpper { + ku = t.mat.K + kl = 0 + } + pj := j + kl - i + if pj < 0 || kl+ku+1 <= pj { + return 0 + } + return t.mat.Data[i*t.mat.Stride+pj] +} + +func (t *TriBandDense) SetTriBand(i, j int, v float64) { + if uint(i) >= uint(t.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(t.mat.N) { + panic(ErrColAccess) + } + isUpper := t.isUpper() + if (isUpper && i > j) || (!isUpper && i < j) { + panic(ErrTriangleSet) + } + kl, ku := t.mat.K, 0 + if isUpper { + kl, ku = 0, t.mat.K + } + pj := j + kl - i + if pj < 0 || kl+ku+1 <= pj { + panic(ErrBandSet) + } + // TODO(btracey): Support Diag field, see #692. + t.mat.Data[i*t.mat.Stride+pj] = v +} + +func (t *TriBandDense) setTriBand(i, j int, v float64) { + var kl int + if !t.isUpper() { + kl = t.mat.K + } + pj := j + kl - i + t.mat.Data[i*t.mat.Stride+pj] = v +} + +// At returns the element at row i, column j. +func (d *DiagDense) At(i, j int) float64 { + if uint(i) >= uint(d.mat.N) { + panic(ErrRowAccess) + } + if uint(j) >= uint(d.mat.N) { + panic(ErrColAccess) + } + return d.at(i, j) +} + +func (d *DiagDense) at(i, j int) float64 { + if i != j { + return 0 + } + return d.mat.Data[i*d.mat.Inc] +} + +// SetDiag sets the element at row i, column i to the value v. +// It panics if the location is outside the appropriate region of the matrix. +func (d *DiagDense) SetDiag(i int, v float64) { + if uint(i) >= uint(d.mat.N) { + panic(ErrRowAccess) + } + d.setDiag(i, v) +} + +func (d *DiagDense) setDiag(i int, v float64) { + d.mat.Data[i*d.mat.Inc] = v +} diff --git a/vendor/gonum.org/v1/gonum/mat/inner.go b/vendor/gonum.org/v1/gonum/mat/inner.go new file mode 100644 index 0000000000..fba3e0b046 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/inner.go @@ -0,0 +1,121 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/internal/asm/f64" +) + +// Inner computes the generalized inner product +// x^T A y +// between column vectors x and y with matrix A. This is only a true inner product if +// A is symmetric positive definite, though the operation works for any matrix A. +// +// Inner panics if x.Len != m or y.Len != n when A is an m x n matrix. +func Inner(x Vector, a Matrix, y Vector) float64 { + m, n := a.Dims() + if x.Len() != m { + panic(ErrShape) + } + if y.Len() != n { + panic(ErrShape) + } + if m == 0 || n == 0 { + return 0 + } + + var sum float64 + + switch a := a.(type) { + case RawSymmetricer: + amat := a.RawSymmetric() + if amat.Uplo != blas.Upper { + // Panic as a string not a mat.Error. + panic(badSymTriangle) + } + var xmat, ymat blas64.Vector + if xrv, ok := x.(RawVectorer); ok { + xmat = xrv.RawVector() + } else { + break + } + if yrv, ok := y.(RawVectorer); ok { + ymat = yrv.RawVector() + } else { + break + } + for i := 0; i < x.Len(); i++ { + xi := x.AtVec(i) + if xi != 0 { + if ymat.Inc == 1 { + sum += xi * f64.DotUnitary( + amat.Data[i*amat.Stride+i:i*amat.Stride+n], + ymat.Data[i:], + ) + } else { + sum += xi * f64.DotInc( + amat.Data[i*amat.Stride+i:i*amat.Stride+n], + ymat.Data[i*ymat.Inc:], uintptr(n-i), + 1, uintptr(ymat.Inc), + 0, 0, + ) + } + } + yi := y.AtVec(i) + if i != n-1 && yi != 0 { + if xmat.Inc == 1 { + sum += yi * f64.DotUnitary( + amat.Data[i*amat.Stride+i+1:i*amat.Stride+n], + xmat.Data[i+1:], + ) + } else { + sum += yi * f64.DotInc( + amat.Data[i*amat.Stride+i+1:i*amat.Stride+n], + xmat.Data[(i+1)*xmat.Inc:], uintptr(n-i-1), + 1, uintptr(xmat.Inc), + 0, 0, + ) + } + } + } + return sum + case RawMatrixer: + amat := a.RawMatrix() + var ymat blas64.Vector + if yrv, ok := y.(RawVectorer); ok { + ymat = yrv.RawVector() + } else { + break + } + for i := 0; i < x.Len(); i++ { + xi := x.AtVec(i) + if xi != 0 { + if ymat.Inc == 1 { + sum += xi * f64.DotUnitary( + amat.Data[i*amat.Stride:i*amat.Stride+n], + ymat.Data, + ) + } else { + sum += xi * f64.DotInc( + amat.Data[i*amat.Stride:i*amat.Stride+n], + ymat.Data, uintptr(n), + 1, uintptr(ymat.Inc), + 0, 0, + ) + } + } + } + return sum + } + for i := 0; i < x.Len(); i++ { + xi := x.AtVec(i) + for j := 0; j < y.Len(); j++ { + sum += xi * a.At(i, j) * y.AtVec(j) + } + } + return sum +} diff --git a/vendor/gonum.org/v1/gonum/mat/io.go b/vendor/gonum.org/v1/gonum/mat/io.go new file mode 100644 index 0000000000..7f7ef0703e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/io.go @@ -0,0 +1,492 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" +) + +// version is the current on-disk codec version. +const version uint32 = 0x1 + +// maxLen is the biggest slice/array len one can create on a 32/64b platform. +const maxLen = int64(int(^uint(0) >> 1)) + +var ( + headerSize = binary.Size(storage{}) + sizeInt64 = binary.Size(int64(0)) + sizeFloat64 = binary.Size(float64(0)) + + errWrongType = errors.New("mat: wrong data type") + + errTooBig = errors.New("mat: resulting data slice too big") + errTooSmall = errors.New("mat: input slice too small") + errBadBuffer = errors.New("mat: data buffer size mismatch") + errBadSize = errors.New("mat: invalid dimension") +) + +// Type encoding scheme: +// +// Type Form Packing Uplo Unit Rows Columns kU kL +// uint8 [GST] uint8 [BPF] uint8 [AUL] bool int64 int64 int64 int64 +// General 'G' 'F' 'A' false r c 0 0 +// Band 'G' 'B' 'A' false r c kU kL +// Symmetric 'S' 'F' ul false n n 0 0 +// SymmetricBand 'S' 'B' ul false n n k k +// SymmetricPacked 'S' 'P' ul false n n 0 0 +// Triangular 'T' 'F' ul Diag==Unit n n 0 0 +// TriangularBand 'T' 'B' ul Diag==Unit n n k k +// TriangularPacked 'T' 'P' ul Diag==Unit n n 0 0 +// +// G - general, S - symmetric, T - triangular +// F - full, B - band, P - packed +// A - all, U - upper, L - lower + +// MarshalBinary encodes the receiver into a binary form and returns the result. +// +// Dense is little-endian encoded as follows: +// 0 - 3 Version = 1 (uint32) +// 4 'G' (byte) +// 5 'F' (byte) +// 6 'A' (byte) +// 7 0 (byte) +// 8 - 15 number of rows (int64) +// 16 - 23 number of columns (int64) +// 24 - 31 0 (int64) +// 32 - 39 0 (int64) +// 40 - .. matrix data elements (float64) +// [0,0] [0,1] ... [0,ncols-1] +// [1,0] [1,1] ... [1,ncols-1] +// ... +// [nrows-1,0] ... [nrows-1,ncols-1] +func (m Dense) MarshalBinary() ([]byte, error) { + bufLen := int64(headerSize) + int64(m.mat.Rows)*int64(m.mat.Cols)*int64(sizeFloat64) + if bufLen <= 0 { + // bufLen is too big and has wrapped around. + return nil, errTooBig + } + + header := storage{ + Form: 'G', Packing: 'F', Uplo: 'A', + Rows: int64(m.mat.Rows), Cols: int64(m.mat.Cols), + Version: version, + } + buf := make([]byte, bufLen) + n, err := header.marshalBinaryTo(bytes.NewBuffer(buf[:0])) + if err != nil { + return buf[:n], err + } + + p := headerSize + r, c := m.Dims() + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + binary.LittleEndian.PutUint64(buf[p:p+sizeFloat64], math.Float64bits(m.at(i, j))) + p += sizeFloat64 + } + } + + return buf, nil +} + +// MarshalBinaryTo encodes the receiver into a binary form and writes it into w. +// MarshalBinaryTo returns the number of bytes written into w and an error, if any. +// +// See MarshalBinary for the on-disk layout. +func (m Dense) MarshalBinaryTo(w io.Writer) (int, error) { + header := storage{ + Form: 'G', Packing: 'F', Uplo: 'A', + Rows: int64(m.mat.Rows), Cols: int64(m.mat.Cols), + Version: version, + } + n, err := header.marshalBinaryTo(w) + if err != nil { + return n, err + } + + r, c := m.Dims() + var b [8]byte + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + binary.LittleEndian.PutUint64(b[:], math.Float64bits(m.at(i, j))) + nn, err := w.Write(b[:]) + n += nn + if err != nil { + return n, err + } + } + } + + return n, nil +} + +// UnmarshalBinary decodes the binary form into the receiver. +// It panics if the receiver is a non-zero Dense matrix. +// +// See MarshalBinary for the on-disk layout. +// +// Limited checks on the validity of the binary input are performed: +// - matrix.ErrShape is returned if the number of rows or columns is negative, +// - an error is returned if the resulting Dense matrix is too +// big for the current architecture (e.g. a 16GB matrix written by a +// 64b application and read back from a 32b application.) +// UnmarshalBinary does not limit the size of the unmarshaled matrix, and so +// it should not be used on untrusted data. +func (m *Dense) UnmarshalBinary(data []byte) error { + if !m.IsZero() { + panic("mat: unmarshal into non-zero matrix") + } + + if len(data) < headerSize { + return errTooSmall + } + + var header storage + err := header.unmarshalBinary(data[:headerSize]) + if err != nil { + return err + } + rows := header.Rows + cols := header.Cols + header.Version = 0 + header.Rows = 0 + header.Cols = 0 + if (header != storage{Form: 'G', Packing: 'F', Uplo: 'A'}) { + return errWrongType + } + if rows < 0 || cols < 0 { + return errBadSize + } + size := rows * cols + if size == 0 { + return ErrZeroLength + } + if int(size) < 0 || size > maxLen { + return errTooBig + } + if len(data) != headerSize+int(rows*cols)*sizeFloat64 { + return errBadBuffer + } + + p := headerSize + m.reuseAs(int(rows), int(cols)) + for i := range m.mat.Data { + m.mat.Data[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[p : p+sizeFloat64])) + p += sizeFloat64 + } + + return nil +} + +// UnmarshalBinaryFrom decodes the binary form into the receiver and returns +// the number of bytes read and an error if any. +// It panics if the receiver is a non-zero Dense matrix. +// +// See MarshalBinary for the on-disk layout. +// +// Limited checks on the validity of the binary input are performed: +// - matrix.ErrShape is returned if the number of rows or columns is negative, +// - an error is returned if the resulting Dense matrix is too +// big for the current architecture (e.g. a 16GB matrix written by a +// 64b application and read back from a 32b application.) +// UnmarshalBinary does not limit the size of the unmarshaled matrix, and so +// it should not be used on untrusted data. +func (m *Dense) UnmarshalBinaryFrom(r io.Reader) (int, error) { + if !m.IsZero() { + panic("mat: unmarshal into non-zero matrix") + } + + var header storage + n, err := header.unmarshalBinaryFrom(r) + if err != nil { + return n, err + } + rows := header.Rows + cols := header.Cols + header.Version = 0 + header.Rows = 0 + header.Cols = 0 + if (header != storage{Form: 'G', Packing: 'F', Uplo: 'A'}) { + return n, errWrongType + } + if rows < 0 || cols < 0 { + return n, errBadSize + } + size := rows * cols + if size == 0 { + return n, ErrZeroLength + } + if int(size) < 0 || size > maxLen { + return n, errTooBig + } + + m.reuseAs(int(rows), int(cols)) + var b [8]byte + for i := range m.mat.Data { + nn, err := readFull(r, b[:]) + n += nn + if err != nil { + if err == io.EOF { + return n, io.ErrUnexpectedEOF + } + return n, err + } + m.mat.Data[i] = math.Float64frombits(binary.LittleEndian.Uint64(b[:])) + } + + return n, nil +} + +// MarshalBinary encodes the receiver into a binary form and returns the result. +// +// VecDense is little-endian encoded as follows: +// +// 0 - 3 Version = 1 (uint32) +// 4 'G' (byte) +// 5 'F' (byte) +// 6 'A' (byte) +// 7 0 (byte) +// 8 - 15 number of elements (int64) +// 16 - 23 1 (int64) +// 24 - 31 0 (int64) +// 32 - 39 0 (int64) +// 40 - .. vector's data elements (float64) +func (v VecDense) MarshalBinary() ([]byte, error) { + bufLen := int64(headerSize) + int64(v.mat.N)*int64(sizeFloat64) + if bufLen <= 0 { + // bufLen is too big and has wrapped around. + return nil, errTooBig + } + + header := storage{ + Form: 'G', Packing: 'F', Uplo: 'A', + Rows: int64(v.mat.N), Cols: 1, + Version: version, + } + buf := make([]byte, bufLen) + n, err := header.marshalBinaryTo(bytes.NewBuffer(buf[:0])) + if err != nil { + return buf[:n], err + } + + p := headerSize + for i := 0; i < v.mat.N; i++ { + binary.LittleEndian.PutUint64(buf[p:p+sizeFloat64], math.Float64bits(v.at(i))) + p += sizeFloat64 + } + + return buf, nil +} + +// MarshalBinaryTo encodes the receiver into a binary form, writes it to w and +// returns the number of bytes written and an error if any. +// +// See MarshalBainry for the on-disk format. +func (v VecDense) MarshalBinaryTo(w io.Writer) (int, error) { + header := storage{ + Form: 'G', Packing: 'F', Uplo: 'A', + Rows: int64(v.mat.N), Cols: 1, + Version: version, + } + n, err := header.marshalBinaryTo(w) + if err != nil { + return n, err + } + + var buf [8]byte + for i := 0; i < v.mat.N; i++ { + binary.LittleEndian.PutUint64(buf[:], math.Float64bits(v.at(i))) + nn, err := w.Write(buf[:]) + n += nn + if err != nil { + return n, err + } + } + + return n, nil +} + +// UnmarshalBinary decodes the binary form into the receiver. +// It panics if the receiver is a non-zero VecDense. +// +// See MarshalBinary for the on-disk layout. +// +// Limited checks on the validity of the binary input are performed: +// - matrix.ErrShape is returned if the number of rows is negative, +// - an error is returned if the resulting VecDense is too +// big for the current architecture (e.g. a 16GB vector written by a +// 64b application and read back from a 32b application.) +// UnmarshalBinary does not limit the size of the unmarshaled vector, and so +// it should not be used on untrusted data. +func (v *VecDense) UnmarshalBinary(data []byte) error { + if !v.IsZero() { + panic("mat: unmarshal into non-zero vector") + } + + if len(data) < headerSize { + return errTooSmall + } + + var header storage + err := header.unmarshalBinary(data[:headerSize]) + if err != nil { + return err + } + if header.Cols != 1 { + return ErrShape + } + n := header.Rows + header.Version = 0 + header.Rows = 0 + header.Cols = 0 + if (header != storage{Form: 'G', Packing: 'F', Uplo: 'A'}) { + return errWrongType + } + if n == 0 { + return ErrZeroLength + } + if n < 0 { + return errBadSize + } + if int64(maxLen) < n { + return errTooBig + } + if len(data) != headerSize+int(n)*sizeFloat64 { + return errBadBuffer + } + + p := headerSize + v.reuseAs(int(n)) + for i := range v.mat.Data { + v.mat.Data[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[p : p+sizeFloat64])) + p += sizeFloat64 + } + + return nil +} + +// UnmarshalBinaryFrom decodes the binary form into the receiver, from the +// io.Reader and returns the number of bytes read and an error if any. +// It panics if the receiver is a non-zero VecDense. +// +// See MarshalBinary for the on-disk layout. +// See UnmarshalBinary for the list of sanity checks performed on the input. +func (v *VecDense) UnmarshalBinaryFrom(r io.Reader) (int, error) { + if !v.IsZero() { + panic("mat: unmarshal into non-zero vector") + } + + var header storage + n, err := header.unmarshalBinaryFrom(r) + if err != nil { + return n, err + } + if header.Cols != 1 { + return n, ErrShape + } + l := header.Rows + header.Version = 0 + header.Rows = 0 + header.Cols = 0 + if (header != storage{Form: 'G', Packing: 'F', Uplo: 'A'}) { + return n, errWrongType + } + if l == 0 { + return n, ErrZeroLength + } + if l < 0 { + return n, errBadSize + } + if int64(maxLen) < l { + return n, errTooBig + } + + v.reuseAs(int(l)) + var b [8]byte + for i := range v.mat.Data { + nn, err := readFull(r, b[:]) + n += nn + if err != nil { + if err == io.EOF { + return n, io.ErrUnexpectedEOF + } + return n, err + } + v.mat.Data[i] = math.Float64frombits(binary.LittleEndian.Uint64(b[:])) + } + + return n, nil +} + +// storage is the internal representation of the storage format of a +// serialised matrix. +type storage struct { + Version uint32 // Keep this first. + Form byte // [GST] + Packing byte // [BPF] + Uplo byte // [AUL] + Unit bool + Rows int64 + Cols int64 + KU int64 + KL int64 +} + +// TODO(kortschak): Consider replacing these with calls to direct +// encoding/decoding of fields rather than to binary.Write/binary.Read. + +func (s storage) marshalBinaryTo(w io.Writer) (int, error) { + buf := bytes.NewBuffer(make([]byte, 0, headerSize)) + err := binary.Write(buf, binary.LittleEndian, s) + if err != nil { + return 0, err + } + return w.Write(buf.Bytes()) +} + +func (s *storage) unmarshalBinary(buf []byte) error { + err := binary.Read(bytes.NewReader(buf), binary.LittleEndian, s) + if err != nil { + return err + } + if s.Version != version { + return fmt.Errorf("mat: incorrect version: %d", s.Version) + } + return nil +} + +func (s *storage) unmarshalBinaryFrom(r io.Reader) (int, error) { + buf := make([]byte, headerSize) + n, err := readFull(r, buf) + if err != nil { + return n, err + } + return n, s.unmarshalBinary(buf[:n]) +} + +// readFull reads from r into buf until it has read len(buf). +// It returns the number of bytes copied and an error if fewer bytes were read. +// If an EOF happens after reading fewer than len(buf) bytes, io.ErrUnexpectedEOF is returned. +func readFull(r io.Reader, buf []byte) (int, error) { + var n int + var err error + for n < len(buf) && err == nil { + var nn int + nn, err = r.Read(buf[n:]) + n += nn + } + if n == len(buf) { + return n, nil + } + if err == io.EOF { + return n, io.ErrUnexpectedEOF + } + return n, err +} diff --git a/vendor/gonum.org/v1/gonum/mat/lq.go b/vendor/gonum.org/v1/gonum/mat/lq.go new file mode 100644 index 0000000000..d788457d2b --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/lq.go @@ -0,0 +1,262 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +const badLQ = "mat: invalid LQ factorization" + +// LQ is a type for creating and using the LQ factorization of a matrix. +type LQ struct { + lq *Dense + tau []float64 + cond float64 +} + +func (lq *LQ) updateCond(norm lapack.MatrixNorm) { + // Since A = L*Q, and Q is orthogonal, we get for the condition number κ + // κ(A) := |A| |A^-1| = |L*Q| |(L*Q)^-1| = |L| |Q^T * L^-1| + // = |L| |L^-1| = κ(L), + // where we used that fact that Q^-1 = Q^T. However, this assumes that + // the matrix norm is invariant under orthogonal transformations which + // is not the case for CondNorm. Hopefully the error is negligible: κ + // is only a qualitative measure anyway. + m := lq.lq.mat.Rows + work := getFloats(3*m, false) + iwork := getInts(m, false) + l := lq.lq.asTriDense(m, blas.NonUnit, blas.Lower) + v := lapack64.Trcon(norm, l.mat, work, iwork) + lq.cond = 1 / v + putFloats(work) + putInts(iwork) +} + +// Factorize computes the LQ factorization of an m×n matrix a where n <= m. The LQ +// factorization always exists even if A is singular. +// +// The LQ decomposition is a factorization of the matrix A such that A = L * Q. +// The matrix Q is an orthonormal n×n matrix, and L is an m×n upper triangular matrix. +// L and Q can be extracted from the LTo and QTo methods. +func (lq *LQ) Factorize(a Matrix) { + lq.factorize(a, CondNorm) +} + +func (lq *LQ) factorize(a Matrix, norm lapack.MatrixNorm) { + m, n := a.Dims() + if m > n { + panic(ErrShape) + } + k := min(m, n) + if lq.lq == nil { + lq.lq = &Dense{} + } + lq.lq.Clone(a) + work := []float64{0} + lq.tau = make([]float64, k) + lapack64.Gelqf(lq.lq.mat, lq.tau, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Gelqf(lq.lq.mat, lq.tau, work, len(work)) + putFloats(work) + lq.updateCond(norm) +} + +// isValid returns whether the receiver contains a factorization. +func (lq *LQ) isValid() bool { + return lq.lq != nil && !lq.lq.IsZero() +} + +// Cond returns the condition number for the factorized matrix. +// Cond will panic if the receiver does not contain a factorization. +func (lq *LQ) Cond() float64 { + if !lq.isValid() { + panic(badLQ) + } + return lq.cond +} + +// TODO(btracey): Add in the "Reduced" forms for extracting the m×m orthogonal +// and upper triangular matrices. + +// LTo extracts the m×n lower trapezoidal matrix from a LQ decomposition. +// If dst is nil, a new matrix is allocated. The resulting L matrix is returned. +// LTo will panic if the receiver does not contain a factorization. +func (lq *LQ) LTo(dst *Dense) *Dense { + if !lq.isValid() { + panic(badLQ) + } + + r, c := lq.lq.Dims() + if dst == nil { + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(r, c) + } + + // Disguise the LQ as a lower triangular. + t := &TriDense{ + mat: blas64.Triangular{ + N: r, + Stride: lq.lq.mat.Stride, + Data: lq.lq.mat.Data, + Uplo: blas.Lower, + Diag: blas.NonUnit, + }, + cap: lq.lq.capCols, + } + dst.Copy(t) + + if r == c { + return dst + } + // Zero right of the triangular. + for i := 0; i < r; i++ { + zero(dst.mat.Data[i*dst.mat.Stride+r : i*dst.mat.Stride+c]) + } + + return dst +} + +// QTo extracts the n×n orthonormal matrix Q from an LQ decomposition. +// If dst is nil, a new matrix is allocated. The resulting Q matrix is returned. +// QTo will panic if the receiver does not contain a factorization. +func (lq *LQ) QTo(dst *Dense) *Dense { + if !lq.isValid() { + panic(badLQ) + } + + _, c := lq.lq.Dims() + if dst == nil { + dst = NewDense(c, c, nil) + } else { + dst.reuseAsZeroed(c, c) + } + q := dst.mat + + // Set Q = I. + ldq := q.Stride + for i := 0; i < c; i++ { + q.Data[i*ldq+i] = 1 + } + + // Construct Q from the elementary reflectors. + work := []float64{0} + lapack64.Ormlq(blas.Left, blas.NoTrans, lq.lq.mat, lq.tau, q, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Ormlq(blas.Left, blas.NoTrans, lq.lq.mat, lq.tau, q, work, len(work)) + putFloats(work) + + return dst +} + +// SolveTo finds a minimum-norm solution to a system of linear equations defined +// by the matrices A and b, where A is an m×n matrix represented in its LQ factorized +// form. If A is singular or near-singular a Condition error is returned. +// See the documentation for Condition for more information. +// +// The minimization problem solved depends on the input parameters. +// If trans == false, find the minimum norm solution of A * X = B. +// If trans == true, find X such that ||A*X - B||_2 is minimized. +// The solution matrix, X, is stored in place into dst. +// SolveTo will panic if the receiver does not contain a factorization. +func (lq *LQ) SolveTo(dst *Dense, trans bool, b Matrix) error { + if !lq.isValid() { + panic(badLQ) + } + + r, c := lq.lq.Dims() + br, bc := b.Dims() + + // The LQ solve algorithm stores the result in-place into the right hand side. + // The storage for the answer must be large enough to hold both b and x. + // However, this method's receiver must be the size of x. Copy b, and then + // copy the result into x at the end. + if trans { + if c != br { + panic(ErrShape) + } + dst.reuseAs(r, bc) + } else { + if r != br { + panic(ErrShape) + } + dst.reuseAs(c, bc) + } + // Do not need to worry about overlap between x and b because w has its own + // independent storage. + w := getWorkspace(max(r, c), bc, false) + w.Copy(b) + t := lq.lq.asTriDense(lq.lq.mat.Rows, blas.NonUnit, blas.Lower).mat + if trans { + work := []float64{0} + lapack64.Ormlq(blas.Left, blas.NoTrans, lq.lq.mat, lq.tau, w.mat, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Ormlq(blas.Left, blas.NoTrans, lq.lq.mat, lq.tau, w.mat, work, len(work)) + putFloats(work) + + ok := lapack64.Trtrs(blas.Trans, t, w.mat) + if !ok { + return Condition(math.Inf(1)) + } + } else { + ok := lapack64.Trtrs(blas.NoTrans, t, w.mat) + if !ok { + return Condition(math.Inf(1)) + } + for i := r; i < c; i++ { + zero(w.mat.Data[i*w.mat.Stride : i*w.mat.Stride+bc]) + } + work := []float64{0} + lapack64.Ormlq(blas.Left, blas.Trans, lq.lq.mat, lq.tau, w.mat, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Ormlq(blas.Left, blas.Trans, lq.lq.mat, lq.tau, w.mat, work, len(work)) + putFloats(work) + } + // x was set above to be the correct size for the result. + dst.Copy(w) + putWorkspace(w) + if lq.cond > ConditionTolerance { + return Condition(lq.cond) + } + return nil +} + +// SolveVecTo finds a minimum-norm solution to a system of linear equations. +// See LQ.SolveTo for the full documentation. +// SolveToVec will panic if the receiver does not contain a factorization. +func (lq *LQ) SolveVecTo(dst *VecDense, trans bool, b Vector) error { + if !lq.isValid() { + panic(badLQ) + } + + r, c := lq.lq.Dims() + if _, bc := b.Dims(); bc != 1 { + panic(ErrShape) + } + + // The Solve implementation is non-trivial, so rather than duplicate the code, + // instead recast the VecDenses as Dense and call the matrix code. + bm := Matrix(b) + if rv, ok := b.(RawVectorer); ok { + bmat := rv.RawVector() + if dst != b { + dst.checkOverlap(bmat) + } + b := VecDense{mat: bmat} + bm = b.asDense() + } + if trans { + dst.reuseAs(r) + } else { + dst.reuseAs(c) + } + return lq.SolveTo(dst.asDense(), trans, bm) +} diff --git a/vendor/gonum.org/v1/gonum/mat/lu.go b/vendor/gonum.org/v1/gonum/mat/lu.go new file mode 100644 index 0000000000..e0437169be --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/lu.go @@ -0,0 +1,422 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/floats" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +const ( + badSliceLength = "mat: improper slice length" + badLU = "mat: invalid LU factorization" +) + +// LU is a type for creating and using the LU factorization of a matrix. +type LU struct { + lu *Dense + pivot []int + cond float64 +} + +// updateCond updates the stored condition number of the matrix. anorm is the +// norm of the original matrix. If anorm is negative it will be estimated. +func (lu *LU) updateCond(anorm float64, norm lapack.MatrixNorm) { + n := lu.lu.mat.Cols + work := getFloats(4*n, false) + defer putFloats(work) + iwork := getInts(n, false) + defer putInts(iwork) + if anorm < 0 { + // This is an approximation. By the definition of a norm, + // |AB| <= |A| |B|. + // Since A = L*U, we get for the condition number κ that + // κ(A) := |A| |A^-1| = |L*U| |A^-1| <= |L| |U| |A^-1|, + // so this will overestimate the condition number somewhat. + // The norm of the original factorized matrix cannot be stored + // because of update possibilities. + u := lu.lu.asTriDense(n, blas.NonUnit, blas.Upper) + l := lu.lu.asTriDense(n, blas.Unit, blas.Lower) + unorm := lapack64.Lantr(norm, u.mat, work) + lnorm := lapack64.Lantr(norm, l.mat, work) + anorm = unorm * lnorm + } + v := lapack64.Gecon(norm, lu.lu.mat, anorm, work, iwork) + lu.cond = 1 / v +} + +// Factorize computes the LU factorization of the square matrix a and stores the +// result. The LU decomposition will complete regardless of the singularity of a. +// +// The LU factorization is computed with pivoting, and so really the decomposition +// is a PLU decomposition where P is a permutation matrix. The individual matrix +// factors can be extracted from the factorization using the Permutation method +// on Dense, and the LU LTo and UTo methods. +func (lu *LU) Factorize(a Matrix) { + lu.factorize(a, CondNorm) +} + +func (lu *LU) factorize(a Matrix, norm lapack.MatrixNorm) { + r, c := a.Dims() + if r != c { + panic(ErrSquare) + } + if lu.lu == nil { + lu.lu = NewDense(r, r, nil) + } else { + lu.lu.Reset() + lu.lu.reuseAs(r, r) + } + lu.lu.Copy(a) + if cap(lu.pivot) < r { + lu.pivot = make([]int, r) + } + lu.pivot = lu.pivot[:r] + work := getFloats(r, false) + anorm := lapack64.Lange(norm, lu.lu.mat, work) + putFloats(work) + lapack64.Getrf(lu.lu.mat, lu.pivot) + lu.updateCond(anorm, norm) +} + +// isValid returns whether the receiver contains a factorization. +func (lu *LU) isValid() bool { + return lu.lu != nil && !lu.lu.IsZero() +} + +// Cond returns the condition number for the factorized matrix. +// Cond will panic if the receiver does not contain a factorization. +func (lu *LU) Cond() float64 { + if !lu.isValid() { + panic(badLU) + } + return lu.cond +} + +// Reset resets the factorization so that it can be reused as the receiver of a +// dimensionally restricted operation. +func (lu *LU) Reset() { + if lu.lu != nil { + lu.lu.Reset() + } + lu.pivot = lu.pivot[:0] +} + +func (lu *LU) isZero() bool { + return len(lu.pivot) == 0 +} + +// Det returns the determinant of the matrix that has been factorized. In many +// expressions, using LogDet will be more numerically stable. +// Det will panic if the receiver does not contain a factorization. +func (lu *LU) Det() float64 { + det, sign := lu.LogDet() + return math.Exp(det) * sign +} + +// LogDet returns the log of the determinant and the sign of the determinant +// for the matrix that has been factorized. Numerical stability in product and +// division expressions is generally improved by working in log space. +// LogDet will panic if the receiver does not contain a factorization. +func (lu *LU) LogDet() (det float64, sign float64) { + if !lu.isValid() { + panic(badLU) + } + + _, n := lu.lu.Dims() + logDiag := getFloats(n, false) + defer putFloats(logDiag) + sign = 1.0 + for i := 0; i < n; i++ { + v := lu.lu.at(i, i) + if v < 0 { + sign *= -1 + } + if lu.pivot[i] != i { + sign *= -1 + } + logDiag[i] = math.Log(math.Abs(v)) + } + return floats.Sum(logDiag), sign +} + +// Pivot returns pivot indices that enable the construction of the permutation +// matrix P (see Dense.Permutation). If swaps == nil, then new memory will be +// allocated, otherwise the length of the input must be equal to the size of the +// factorized matrix. +// Pivot will panic if the receiver does not contain a factorization. +func (lu *LU) Pivot(swaps []int) []int { + if !lu.isValid() { + panic(badLU) + } + + _, n := lu.lu.Dims() + if swaps == nil { + swaps = make([]int, n) + } + if len(swaps) != n { + panic(badSliceLength) + } + // Perform the inverse of the row swaps in order to find the final + // row swap position. + for i := range swaps { + swaps[i] = i + } + for i := n - 1; i >= 0; i-- { + v := lu.pivot[i] + swaps[i], swaps[v] = swaps[v], swaps[i] + } + return swaps +} + +// RankOne updates an LU factorization as if a rank-one update had been applied to +// the original matrix A, storing the result into the receiver. That is, if in +// the original LU decomposition P * L * U = A, in the updated decomposition +// P * L * U = A + alpha * x * y^T. +// RankOne will panic if orig does not contain a factorization. +func (lu *LU) RankOne(orig *LU, alpha float64, x, y Vector) { + if !orig.isValid() { + panic(badLU) + } + + // RankOne uses algorithm a1 on page 28 of "Multiple-Rank Updates to Matrix + // Factorizations for Nonlinear Analysis and Circuit Design" by Linzhong Deng. + // http://web.stanford.edu/group/SOL/dissertations/Linzhong-Deng-thesis.pdf + _, n := orig.lu.Dims() + if r, c := x.Dims(); r != n || c != 1 { + panic(ErrShape) + } + if r, c := y.Dims(); r != n || c != 1 { + panic(ErrShape) + } + if orig != lu { + if lu.isZero() { + if cap(lu.pivot) < n { + lu.pivot = make([]int, n) + } + lu.pivot = lu.pivot[:n] + if lu.lu == nil { + lu.lu = NewDense(n, n, nil) + } else { + lu.lu.reuseAs(n, n) + } + } else if len(lu.pivot) != n { + panic(ErrShape) + } + copy(lu.pivot, orig.pivot) + lu.lu.Copy(orig.lu) + } + + xs := getFloats(n, false) + defer putFloats(xs) + ys := getFloats(n, false) + defer putFloats(ys) + for i := 0; i < n; i++ { + xs[i] = x.AtVec(i) + ys[i] = y.AtVec(i) + } + + // Adjust for the pivoting in the LU factorization + for i, v := range lu.pivot { + xs[i], xs[v] = xs[v], xs[i] + } + + lum := lu.lu.mat + omega := alpha + for j := 0; j < n; j++ { + ujj := lum.Data[j*lum.Stride+j] + ys[j] /= ujj + theta := 1 + xs[j]*ys[j]*omega + beta := omega * ys[j] / theta + gamma := omega * xs[j] + omega -= beta * gamma + lum.Data[j*lum.Stride+j] *= theta + for i := j + 1; i < n; i++ { + xs[i] -= lum.Data[i*lum.Stride+j] * xs[j] + tmp := ys[i] + ys[i] -= lum.Data[j*lum.Stride+i] * ys[j] + lum.Data[i*lum.Stride+j] += beta * xs[i] + lum.Data[j*lum.Stride+i] += gamma * tmp + } + } + lu.updateCond(-1, CondNorm) +} + +// LTo extracts the lower triangular matrix from an LU factorization. +// If dst is nil, a new matrix is allocated. The resulting L matrix is returned. +// LTo will panic if the receiver does not contain a factorization. +func (lu *LU) LTo(dst *TriDense) *TriDense { + if !lu.isValid() { + panic(badLU) + } + + _, n := lu.lu.Dims() + if dst == nil { + dst = NewTriDense(n, Lower, nil) + } else { + dst.reuseAs(n, Lower) + } + // Extract the lower triangular elements. + for i := 0; i < n; i++ { + for j := 0; j < i; j++ { + dst.mat.Data[i*dst.mat.Stride+j] = lu.lu.mat.Data[i*lu.lu.mat.Stride+j] + } + } + // Set ones on the diagonal. + for i := 0; i < n; i++ { + dst.mat.Data[i*dst.mat.Stride+i] = 1 + } + return dst +} + +// UTo extracts the upper triangular matrix from an LU factorization. +// If dst is nil, a new matrix is allocated. The resulting U matrix is returned. +// UTo will panic if the receiver does not contain a factorization. +func (lu *LU) UTo(dst *TriDense) *TriDense { + if !lu.isValid() { + panic(badLU) + } + + _, n := lu.lu.Dims() + if dst == nil { + dst = NewTriDense(n, Upper, nil) + } else { + dst.reuseAs(n, Upper) + } + // Extract the upper triangular elements. + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + dst.mat.Data[i*dst.mat.Stride+j] = lu.lu.mat.Data[i*lu.lu.mat.Stride+j] + } + } + return dst +} + +// Permutation constructs an r×r permutation matrix with the given row swaps. +// A permutation matrix has exactly one element equal to one in each row and column +// and all other elements equal to zero. swaps[i] specifies the row with which +// i will be swapped, which is equivalent to the non-zero column of row i. +func (m *Dense) Permutation(r int, swaps []int) { + m.reuseAs(r, r) + for i := 0; i < r; i++ { + zero(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+r]) + v := swaps[i] + if v < 0 || v >= r { + panic(ErrRowAccess) + } + m.mat.Data[i*m.mat.Stride+v] = 1 + } +} + +// SolveTo solves a system of linear equations using the LU decomposition of a matrix. +// It computes +// A * X = B if trans == false +// A^T * X = B if trans == true +// In both cases, A is represented in LU factorized form, and the matrix X is +// stored into dst. +// +// If A is singular or near-singular a Condition error is returned. See +// the documentation for Condition for more information. +// SolveTo will panic if the receiver does not contain a factorization. +func (lu *LU) SolveTo(dst *Dense, trans bool, b Matrix) error { + if !lu.isValid() { + panic(badLU) + } + + _, n := lu.lu.Dims() + br, bc := b.Dims() + if br != n { + panic(ErrShape) + } + // TODO(btracey): Should test the condition number instead of testing that + // the determinant is exactly zero. + if lu.Det() == 0 { + return Condition(math.Inf(1)) + } + + dst.reuseAs(n, bc) + bU, _ := untranspose(b) + var restore func() + if dst == bU { + dst, restore = dst.isolatedWorkspace(bU) + defer restore() + } else if rm, ok := bU.(RawMatrixer); ok { + dst.checkOverlap(rm.RawMatrix()) + } + + dst.Copy(b) + t := blas.NoTrans + if trans { + t = blas.Trans + } + lapack64.Getrs(t, lu.lu.mat, dst.mat, lu.pivot) + if lu.cond > ConditionTolerance { + return Condition(lu.cond) + } + return nil +} + +// SolveVecTo solves a system of linear equations using the LU decomposition of a matrix. +// It computes +// A * x = b if trans == false +// A^T * x = b if trans == true +// In both cases, A is represented in LU factorized form, and the vector x is +// stored into dst. +// +// If A is singular or near-singular a Condition error is returned. See +// the documentation for Condition for more information. +// SolveVecTo will panic if the receiver does not contain a factorization. +func (lu *LU) SolveVecTo(dst *VecDense, trans bool, b Vector) error { + if !lu.isValid() { + panic(badLU) + } + + _, n := lu.lu.Dims() + if br, bc := b.Dims(); br != n || bc != 1 { + panic(ErrShape) + } + switch rv := b.(type) { + default: + dst.reuseAs(n) + return lu.SolveTo(dst.asDense(), trans, b) + case RawVectorer: + if dst != b { + dst.checkOverlap(rv.RawVector()) + } + // TODO(btracey): Should test the condition number instead of testing that + // the determinant is exactly zero. + if lu.Det() == 0 { + return Condition(math.Inf(1)) + } + + dst.reuseAs(n) + var restore func() + if dst == b { + dst, restore = dst.isolatedWorkspace(b) + defer restore() + } + dst.CopyVec(b) + vMat := blas64.General{ + Rows: n, + Cols: 1, + Stride: dst.mat.Inc, + Data: dst.mat.Data, + } + t := blas.NoTrans + if trans { + t = blas.Trans + } + lapack64.Getrs(t, lu.lu.mat, vMat, lu.pivot) + if lu.cond > ConditionTolerance { + return Condition(lu.cond) + } + return nil + } +} diff --git a/vendor/gonum.org/v1/gonum/mat/matrix.go b/vendor/gonum.org/v1/gonum/mat/matrix.go new file mode 100644 index 0000000000..444d044579 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/matrix.go @@ -0,0 +1,946 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/floats" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +// Matrix is the basic matrix interface type. +type Matrix interface { + // Dims returns the dimensions of a Matrix. + Dims() (r, c int) + + // At returns the value of a matrix element at row i, column j. + // It will panic if i or j are out of bounds for the matrix. + At(i, j int) float64 + + // T returns the transpose of the Matrix. Whether T returns a copy of the + // underlying data is implementation dependent. + // This method may be implemented using the Transpose type, which + // provides an implicit matrix transpose. + T() Matrix +} + +var ( + _ Matrix = Transpose{} + _ Untransposer = Transpose{} +) + +// Transpose is a type for performing an implicit matrix transpose. It implements +// the Matrix interface, returning values from the transpose of the matrix within. +type Transpose struct { + Matrix Matrix +} + +// At returns the value of the element at row i and column j of the transposed +// matrix, that is, row j and column i of the Matrix field. +func (t Transpose) At(i, j int) float64 { + return t.Matrix.At(j, i) +} + +// Dims returns the dimensions of the transposed matrix. The number of rows returned +// is the number of columns in the Matrix field, and the number of columns is +// the number of rows in the Matrix field. +func (t Transpose) Dims() (r, c int) { + c, r = t.Matrix.Dims() + return r, c +} + +// T performs an implicit transpose by returning the Matrix field. +func (t Transpose) T() Matrix { + return t.Matrix +} + +// Untranspose returns the Matrix field. +func (t Transpose) Untranspose() Matrix { + return t.Matrix +} + +// Untransposer is a type that can undo an implicit transpose. +type Untransposer interface { + // Note: This interface is needed to unify all of the Transpose types. In + // the mat methods, we need to test if the Matrix has been implicitly + // transposed. If this is checked by testing for the specific Transpose type + // then the behavior will be different if the user uses T() or TTri() for a + // triangular matrix. + + // Untranspose returns the underlying Matrix stored for the implicit transpose. + Untranspose() Matrix +} + +// UntransposeBander is a type that can undo an implicit band transpose. +type UntransposeBander interface { + // Untranspose returns the underlying Banded stored for the implicit transpose. + UntransposeBand() Banded +} + +// UntransposeTrier is a type that can undo an implicit triangular transpose. +type UntransposeTrier interface { + // Untranspose returns the underlying Triangular stored for the implicit transpose. + UntransposeTri() Triangular +} + +// UntransposeTriBander is a type that can undo an implicit triangular banded +// transpose. +type UntransposeTriBander interface { + // Untranspose returns the underlying Triangular stored for the implicit transpose. + UntransposeTriBand() TriBanded +} + +// Mutable is a matrix interface type that allows elements to be altered. +type Mutable interface { + // Set alters the matrix element at row i, column j to v. + // It will panic if i or j are out of bounds for the matrix. + Set(i, j int, v float64) + + Matrix +} + +// A RowViewer can return a Vector reflecting a row that is backed by the matrix +// data. The Vector returned will have length equal to the number of columns. +type RowViewer interface { + RowView(i int) Vector +} + +// A RawRowViewer can return a slice of float64 reflecting a row that is backed by the matrix +// data. +type RawRowViewer interface { + RawRowView(i int) []float64 +} + +// A ColViewer can return a Vector reflecting a column that is backed by the matrix +// data. The Vector returned will have length equal to the number of rows. +type ColViewer interface { + ColView(j int) Vector +} + +// A RawColViewer can return a slice of float64 reflecting a column that is backed by the matrix +// data. +type RawColViewer interface { + RawColView(j int) []float64 +} + +// A Cloner can make a copy of a into the receiver, overwriting the previous value of the +// receiver. The clone operation does not make any restriction on shape and will not cause +// shadowing. +type Cloner interface { + Clone(a Matrix) +} + +// A Reseter can reset the matrix so that it can be reused as the receiver of a dimensionally +// restricted operation. This is commonly used when the matrix is being used as a workspace +// or temporary matrix. +// +// If the matrix is a view, using the reset matrix may result in data corruption in elements +// outside the view. +type Reseter interface { + Reset() +} + +// A Copier can make a copy of elements of a into the receiver. The submatrix copied +// starts at row and column 0 and has dimensions equal to the minimum dimensions of +// the two matrices. The number of row and columns copied is returned. +// Copy will copy from a source that aliases the receiver unless the source is transposed; +// an aliasing transpose copy will panic with the exception for a special case when +// the source data has a unitary increment or stride. +type Copier interface { + Copy(a Matrix) (r, c int) +} + +// A Grower can grow the size of the represented matrix by the given number of rows and columns. +// Growing beyond the size given by the Caps method will result in the allocation of a new +// matrix and copying of the elements. If Grow is called with negative increments it will +// panic with ErrIndexOutOfRange. +type Grower interface { + Caps() (r, c int) + Grow(r, c int) Matrix +} + +// A BandWidther represents a banded matrix and can return the left and right half-bandwidths, k1 and +// k2. +type BandWidther interface { + BandWidth() (k1, k2 int) +} + +// A RawMatrixSetter can set the underlying blas64.General used by the receiver. There is no restriction +// on the shape of the receiver. Changes to the receiver's elements will be reflected in the blas64.General.Data. +type RawMatrixSetter interface { + SetRawMatrix(a blas64.General) +} + +// A RawMatrixer can return a blas64.General representation of the receiver. Changes to the blas64.General.Data +// slice will be reflected in the original matrix, changes to the Rows, Cols and Stride fields will not. +type RawMatrixer interface { + RawMatrix() blas64.General +} + +// A RawVectorer can return a blas64.Vector representation of the receiver. Changes to the blas64.Vector.Data +// slice will be reflected in the original matrix, changes to the Inc field will not. +type RawVectorer interface { + RawVector() blas64.Vector +} + +// A NonZeroDoer can call a function for each non-zero element of the receiver. +// The parameters of the function are the element indices and its value. +type NonZeroDoer interface { + DoNonZero(func(i, j int, v float64)) +} + +// A RowNonZeroDoer can call a function for each non-zero element of a row of the receiver. +// The parameters of the function are the element indices and its value. +type RowNonZeroDoer interface { + DoRowNonZero(i int, fn func(i, j int, v float64)) +} + +// A ColNonZeroDoer can call a function for each non-zero element of a column of the receiver. +// The parameters of the function are the element indices and its value. +type ColNonZeroDoer interface { + DoColNonZero(j int, fn func(i, j int, v float64)) +} + +// untranspose untransposes a matrix if applicable. If a is an Untransposer, then +// untranspose returns the underlying matrix and true. If it is not, then it returns +// the input matrix and false. +func untranspose(a Matrix) (Matrix, bool) { + if ut, ok := a.(Untransposer); ok { + return ut.Untranspose(), true + } + return a, false +} + +// untransposeExtract returns an untransposed matrix in a built-in matrix type. +// +// The untransposed matrix is returned unaltered if it is a built-in matrix type. +// Otherwise, if it implements a Raw method, an appropriate built-in type value +// is returned holding the raw matrix value of the input. If neither of these +// is possible, the untransposed matrix is returned. +func untransposeExtract(a Matrix) (Matrix, bool) { + ut, trans := untranspose(a) + switch m := ut.(type) { + case *DiagDense, *SymBandDense, *TriBandDense, *BandDense, *TriDense, *SymDense, *Dense: + return m, trans + // TODO(btracey): Add here if we ever have an equivalent of RawDiagDense. + case RawSymBander: + rsb := m.RawSymBand() + if rsb.Uplo != blas.Upper { + return ut, trans + } + var sb SymBandDense + sb.SetRawSymBand(rsb) + return &sb, trans + case RawTriBander: + rtb := m.RawTriBand() + if rtb.Diag == blas.Unit { + return ut, trans + } + var tb TriBandDense + tb.SetRawTriBand(rtb) + return &tb, trans + case RawBander: + var b BandDense + b.SetRawBand(m.RawBand()) + return &b, trans + case RawTriangular: + rt := m.RawTriangular() + if rt.Diag == blas.Unit { + return ut, trans + } + var t TriDense + t.SetRawTriangular(rt) + return &t, trans + case RawSymmetricer: + rs := m.RawSymmetric() + if rs.Uplo != blas.Upper { + return ut, trans + } + var s SymDense + s.SetRawSymmetric(rs) + return &s, trans + case RawMatrixer: + var d Dense + d.SetRawMatrix(m.RawMatrix()) + return &d, trans + default: + return ut, trans + } +} + +// TODO(btracey): Consider adding CopyCol/CopyRow if the behavior seems useful. +// TODO(btracey): Add in fast paths to Row/Col for the other concrete types +// (TriDense, etc.) as well as relevant interfaces (RowColer, RawRowViewer, etc.) + +// Col copies the elements in the jth column of the matrix into the slice dst. +// The length of the provided slice must equal the number of rows, unless the +// slice is nil in which case a new slice is first allocated. +func Col(dst []float64, j int, a Matrix) []float64 { + r, c := a.Dims() + if j < 0 || j >= c { + panic(ErrColAccess) + } + if dst == nil { + dst = make([]float64, r) + } else { + if len(dst) != r { + panic(ErrColLength) + } + } + aU, aTrans := untranspose(a) + if rm, ok := aU.(RawMatrixer); ok { + m := rm.RawMatrix() + if aTrans { + copy(dst, m.Data[j*m.Stride:j*m.Stride+m.Cols]) + return dst + } + blas64.Copy(blas64.Vector{N: r, Inc: m.Stride, Data: m.Data[j:]}, + blas64.Vector{N: r, Inc: 1, Data: dst}, + ) + return dst + } + for i := 0; i < r; i++ { + dst[i] = a.At(i, j) + } + return dst +} + +// Row copies the elements in the ith row of the matrix into the slice dst. +// The length of the provided slice must equal the number of columns, unless the +// slice is nil in which case a new slice is first allocated. +func Row(dst []float64, i int, a Matrix) []float64 { + r, c := a.Dims() + if i < 0 || i >= r { + panic(ErrColAccess) + } + if dst == nil { + dst = make([]float64, c) + } else { + if len(dst) != c { + panic(ErrRowLength) + } + } + aU, aTrans := untranspose(a) + if rm, ok := aU.(RawMatrixer); ok { + m := rm.RawMatrix() + if aTrans { + blas64.Copy(blas64.Vector{N: c, Inc: m.Stride, Data: m.Data[i:]}, + blas64.Vector{N: c, Inc: 1, Data: dst}, + ) + return dst + } + copy(dst, m.Data[i*m.Stride:i*m.Stride+m.Cols]) + return dst + } + for j := 0; j < c; j++ { + dst[j] = a.At(i, j) + } + return dst +} + +// Cond returns the condition number of the given matrix under the given norm. +// The condition number must be based on the 1-norm, 2-norm or ∞-norm. +// Cond will panic with matrix.ErrShape if the matrix has zero size. +// +// BUG(btracey): The computation of the 1-norm and ∞-norm for non-square matrices +// is innacurate, although is typically the right order of magnitude. See +// https://github.com/xianyi/OpenBLAS/issues/636. While the value returned will +// change with the resolution of this bug, the result from Cond will match the +// condition number used internally. +func Cond(a Matrix, norm float64) float64 { + m, n := a.Dims() + if m == 0 || n == 0 { + panic(ErrShape) + } + var lnorm lapack.MatrixNorm + switch norm { + default: + panic("mat: bad norm value") + case 1: + lnorm = lapack.MaxColumnSum + case 2: + var svd SVD + ok := svd.Factorize(a, SVDNone) + if !ok { + return math.Inf(1) + } + return svd.Cond() + case math.Inf(1): + lnorm = lapack.MaxRowSum + } + + if m == n { + // Use the LU decomposition to compute the condition number. + var lu LU + lu.factorize(a, lnorm) + return lu.Cond() + } + if m > n { + // Use the QR factorization to compute the condition number. + var qr QR + qr.factorize(a, lnorm) + return qr.Cond() + } + // Use the LQ factorization to compute the condition number. + var lq LQ + lq.factorize(a, lnorm) + return lq.Cond() +} + +// Det returns the determinant of the matrix a. In many expressions using LogDet +// will be more numerically stable. +func Det(a Matrix) float64 { + det, sign := LogDet(a) + return math.Exp(det) * sign +} + +// Dot returns the sum of the element-wise product of a and b. +// Dot panics if the matrix sizes are unequal. +func Dot(a, b Vector) float64 { + la := a.Len() + lb := b.Len() + if la != lb { + panic(ErrShape) + } + if arv, ok := a.(RawVectorer); ok { + if brv, ok := b.(RawVectorer); ok { + return blas64.Dot(arv.RawVector(), brv.RawVector()) + } + } + var sum float64 + for i := 0; i < la; i++ { + sum += a.At(i, 0) * b.At(i, 0) + } + return sum +} + +// Equal returns whether the matrices a and b have the same size +// and are element-wise equal. +func Equal(a, b Matrix) bool { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + return false + } + aU, aTrans := untranspose(a) + bU, bTrans := untranspose(b) + if rma, ok := aU.(RawMatrixer); ok { + if rmb, ok := bU.(RawMatrixer); ok { + ra := rma.RawMatrix() + rb := rmb.RawMatrix() + if aTrans == bTrans { + for i := 0; i < ra.Rows; i++ { + for j := 0; j < ra.Cols; j++ { + if ra.Data[i*ra.Stride+j] != rb.Data[i*rb.Stride+j] { + return false + } + } + } + return true + } + for i := 0; i < ra.Rows; i++ { + for j := 0; j < ra.Cols; j++ { + if ra.Data[i*ra.Stride+j] != rb.Data[j*rb.Stride+i] { + return false + } + } + } + return true + } + } + if rma, ok := aU.(RawSymmetricer); ok { + if rmb, ok := bU.(RawSymmetricer); ok { + ra := rma.RawSymmetric() + rb := rmb.RawSymmetric() + // Symmetric matrices are always upper and equal to their transpose. + for i := 0; i < ra.N; i++ { + for j := i; j < ra.N; j++ { + if ra.Data[i*ra.Stride+j] != rb.Data[i*rb.Stride+j] { + return false + } + } + } + return true + } + } + if ra, ok := aU.(*VecDense); ok { + if rb, ok := bU.(*VecDense); ok { + // If the raw vectors are the same length they must either both be + // transposed or both not transposed (or have length 1). + for i := 0; i < ra.mat.N; i++ { + if ra.mat.Data[i*ra.mat.Inc] != rb.mat.Data[i*rb.mat.Inc] { + return false + } + } + return true + } + } + for i := 0; i < ar; i++ { + for j := 0; j < ac; j++ { + if a.At(i, j) != b.At(i, j) { + return false + } + } + } + return true +} + +// EqualApprox returns whether the matrices a and b have the same size and contain all equal +// elements with tolerance for element-wise equality specified by epsilon. Matrices +// with non-equal shapes are not equal. +func EqualApprox(a, b Matrix, epsilon float64) bool { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br || ac != bc { + return false + } + aU, aTrans := untranspose(a) + bU, bTrans := untranspose(b) + if rma, ok := aU.(RawMatrixer); ok { + if rmb, ok := bU.(RawMatrixer); ok { + ra := rma.RawMatrix() + rb := rmb.RawMatrix() + if aTrans == bTrans { + for i := 0; i < ra.Rows; i++ { + for j := 0; j < ra.Cols; j++ { + if !floats.EqualWithinAbsOrRel(ra.Data[i*ra.Stride+j], rb.Data[i*rb.Stride+j], epsilon, epsilon) { + return false + } + } + } + return true + } + for i := 0; i < ra.Rows; i++ { + for j := 0; j < ra.Cols; j++ { + if !floats.EqualWithinAbsOrRel(ra.Data[i*ra.Stride+j], rb.Data[j*rb.Stride+i], epsilon, epsilon) { + return false + } + } + } + return true + } + } + if rma, ok := aU.(RawSymmetricer); ok { + if rmb, ok := bU.(RawSymmetricer); ok { + ra := rma.RawSymmetric() + rb := rmb.RawSymmetric() + // Symmetric matrices are always upper and equal to their transpose. + for i := 0; i < ra.N; i++ { + for j := i; j < ra.N; j++ { + if !floats.EqualWithinAbsOrRel(ra.Data[i*ra.Stride+j], rb.Data[i*rb.Stride+j], epsilon, epsilon) { + return false + } + } + } + return true + } + } + if ra, ok := aU.(*VecDense); ok { + if rb, ok := bU.(*VecDense); ok { + // If the raw vectors are the same length they must either both be + // transposed or both not transposed (or have length 1). + for i := 0; i < ra.mat.N; i++ { + if !floats.EqualWithinAbsOrRel(ra.mat.Data[i*ra.mat.Inc], rb.mat.Data[i*rb.mat.Inc], epsilon, epsilon) { + return false + } + } + return true + } + } + for i := 0; i < ar; i++ { + for j := 0; j < ac; j++ { + if !floats.EqualWithinAbsOrRel(a.At(i, j), b.At(i, j), epsilon, epsilon) { + return false + } + } + } + return true +} + +// LogDet returns the log of the determinant and the sign of the determinant +// for the matrix that has been factorized. Numerical stability in product and +// division expressions is generally improved by working in log space. +func LogDet(a Matrix) (det float64, sign float64) { + // TODO(btracey): Add specialized routines for TriDense, etc. + var lu LU + lu.Factorize(a) + return lu.LogDet() +} + +// Max returns the largest element value of the matrix A. +// Max will panic with matrix.ErrShape if the matrix has zero size. +func Max(a Matrix) float64 { + r, c := a.Dims() + if r == 0 || c == 0 { + panic(ErrShape) + } + // Max(A) = Max(A^T) + aU, _ := untranspose(a) + switch m := aU.(type) { + case RawMatrixer: + rm := m.RawMatrix() + max := math.Inf(-1) + for i := 0; i < rm.Rows; i++ { + for _, v := range rm.Data[i*rm.Stride : i*rm.Stride+rm.Cols] { + if v > max { + max = v + } + } + } + return max + case RawTriangular: + rm := m.RawTriangular() + // The max of a triangular is at least 0 unless the size is 1. + if rm.N == 1 { + return rm.Data[0] + } + max := 0.0 + if rm.Uplo == blas.Upper { + for i := 0; i < rm.N; i++ { + for _, v := range rm.Data[i*rm.Stride+i : i*rm.Stride+rm.N] { + if v > max { + max = v + } + } + } + return max + } + for i := 0; i < rm.N; i++ { + for _, v := range rm.Data[i*rm.Stride : i*rm.Stride+i+1] { + if v > max { + max = v + } + } + } + return max + case RawSymmetricer: + rm := m.RawSymmetric() + if rm.Uplo != blas.Upper { + panic(badSymTriangle) + } + max := math.Inf(-1) + for i := 0; i < rm.N; i++ { + for _, v := range rm.Data[i*rm.Stride+i : i*rm.Stride+rm.N] { + if v > max { + max = v + } + } + } + return max + default: + r, c := aU.Dims() + max := math.Inf(-1) + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + v := aU.At(i, j) + if v > max { + max = v + } + } + } + return max + } +} + +// Min returns the smallest element value of the matrix A. +// Min will panic with matrix.ErrShape if the matrix has zero size. +func Min(a Matrix) float64 { + r, c := a.Dims() + if r == 0 || c == 0 { + panic(ErrShape) + } + // Min(A) = Min(A^T) + aU, _ := untranspose(a) + switch m := aU.(type) { + case RawMatrixer: + rm := m.RawMatrix() + min := math.Inf(1) + for i := 0; i < rm.Rows; i++ { + for _, v := range rm.Data[i*rm.Stride : i*rm.Stride+rm.Cols] { + if v < min { + min = v + } + } + } + return min + case RawTriangular: + rm := m.RawTriangular() + // The min of a triangular is at most 0 unless the size is 1. + if rm.N == 1 { + return rm.Data[0] + } + min := 0.0 + if rm.Uplo == blas.Upper { + for i := 0; i < rm.N; i++ { + for _, v := range rm.Data[i*rm.Stride+i : i*rm.Stride+rm.N] { + if v < min { + min = v + } + } + } + return min + } + for i := 0; i < rm.N; i++ { + for _, v := range rm.Data[i*rm.Stride : i*rm.Stride+i+1] { + if v < min { + min = v + } + } + } + return min + case RawSymmetricer: + rm := m.RawSymmetric() + if rm.Uplo != blas.Upper { + panic(badSymTriangle) + } + min := math.Inf(1) + for i := 0; i < rm.N; i++ { + for _, v := range rm.Data[i*rm.Stride+i : i*rm.Stride+rm.N] { + if v < min { + min = v + } + } + } + return min + default: + r, c := aU.Dims() + min := math.Inf(1) + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + v := aU.At(i, j) + if v < min { + min = v + } + } + } + return min + } +} + +// Norm returns the specified (induced) norm of the matrix a. See +// https://en.wikipedia.org/wiki/Matrix_norm for the definition of an induced norm. +// +// Valid norms are: +// 1 - The maximum absolute column sum +// 2 - Frobenius norm, the square root of the sum of the squares of the elements. +// Inf - The maximum absolute row sum. +// Norm will panic with ErrNormOrder if an illegal norm order is specified and +// with matrix.ErrShape if the matrix has zero size. +func Norm(a Matrix, norm float64) float64 { + r, c := a.Dims() + if r == 0 || c == 0 { + panic(ErrShape) + } + aU, aTrans := untranspose(a) + var work []float64 + switch rma := aU.(type) { + case RawMatrixer: + rm := rma.RawMatrix() + n := normLapack(norm, aTrans) + if n == lapack.MaxColumnSum { + work = getFloats(rm.Cols, false) + defer putFloats(work) + } + return lapack64.Lange(n, rm, work) + case RawTriangular: + rm := rma.RawTriangular() + n := normLapack(norm, aTrans) + if n == lapack.MaxRowSum || n == lapack.MaxColumnSum { + work = getFloats(rm.N, false) + defer putFloats(work) + } + return lapack64.Lantr(n, rm, work) + case RawSymmetricer: + rm := rma.RawSymmetric() + n := normLapack(norm, aTrans) + if n == lapack.MaxRowSum || n == lapack.MaxColumnSum { + work = getFloats(rm.N, false) + defer putFloats(work) + } + return lapack64.Lansy(n, rm, work) + case *VecDense: + rv := rma.RawVector() + switch norm { + default: + panic("unreachable") + case 1: + if aTrans { + imax := blas64.Iamax(rv) + return math.Abs(rma.At(imax, 0)) + } + return blas64.Asum(rv) + case 2: + return blas64.Nrm2(rv) + case math.Inf(1): + if aTrans { + return blas64.Asum(rv) + } + imax := blas64.Iamax(rv) + return math.Abs(rma.At(imax, 0)) + } + } + switch norm { + default: + panic("unreachable") + case 1: + var max float64 + for j := 0; j < c; j++ { + var sum float64 + for i := 0; i < r; i++ { + sum += math.Abs(a.At(i, j)) + } + if sum > max { + max = sum + } + } + return max + case 2: + var sum float64 + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + v := a.At(i, j) + sum += v * v + } + } + return math.Sqrt(sum) + case math.Inf(1): + var max float64 + for i := 0; i < r; i++ { + var sum float64 + for j := 0; j < c; j++ { + sum += math.Abs(a.At(i, j)) + } + if sum > max { + max = sum + } + } + return max + } +} + +// normLapack converts the float64 norm input in Norm to a lapack.MatrixNorm. +func normLapack(norm float64, aTrans bool) lapack.MatrixNorm { + switch norm { + case 1: + n := lapack.MaxColumnSum + if aTrans { + n = lapack.MaxRowSum + } + return n + case 2: + return lapack.Frobenius + case math.Inf(1): + n := lapack.MaxRowSum + if aTrans { + n = lapack.MaxColumnSum + } + return n + default: + panic(ErrNormOrder) + } +} + +// Sum returns the sum of the elements of the matrix. +func Sum(a Matrix) float64 { + // TODO(btracey): Add a fast path for the other supported matrix types. + + r, c := a.Dims() + var sum float64 + aU, _ := untranspose(a) + if rma, ok := aU.(RawMatrixer); ok { + rm := rma.RawMatrix() + for i := 0; i < rm.Rows; i++ { + for _, v := range rm.Data[i*rm.Stride : i*rm.Stride+rm.Cols] { + sum += v + } + } + return sum + } + for i := 0; i < r; i++ { + for j := 0; j < c; j++ { + sum += a.At(i, j) + } + } + return sum +} + +// A Tracer can compute the trace of the matrix. Trace must panic if the +// matrix is not square. +type Tracer interface { + Trace() float64 +} + +// Trace returns the trace of the matrix. Trace will panic if the +// matrix is not square. +func Trace(a Matrix) float64 { + m, _ := untransposeExtract(a) + if t, ok := m.(Tracer); ok { + return t.Trace() + } + r, c := a.Dims() + if r != c { + panic(ErrSquare) + } + var v float64 + for i := 0; i < r; i++ { + v += a.At(i, i) + } + return v +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +// use returns a float64 slice with l elements, using f if it +// has the necessary capacity, otherwise creating a new slice. +func use(f []float64, l int) []float64 { + if l <= cap(f) { + return f[:l] + } + return make([]float64, l) +} + +// useZeroed returns a float64 slice with l elements, using f if it +// has the necessary capacity, otherwise creating a new slice. The +// elements of the returned slice are guaranteed to be zero. +func useZeroed(f []float64, l int) []float64 { + if l <= cap(f) { + f = f[:l] + zero(f) + return f + } + return make([]float64, l) +} + +// zero zeros the given slice's elements. +func zero(f []float64) { + for i := range f { + f[i] = 0 + } +} + +// useInt returns an int slice with l elements, using i if it +// has the necessary capacity, otherwise creating a new slice. +func useInt(i []int, l int) []int { + if l <= cap(i) { + return i[:l] + } + return make([]int, l) +} diff --git a/vendor/gonum.org/v1/gonum/mat/offset.go b/vendor/gonum.org/v1/gonum/mat/offset.go new file mode 100644 index 0000000000..af2c03b64a --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/offset.go @@ -0,0 +1,20 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine,!safe + +package mat + +import "unsafe" + +// offset returns the number of float64 values b[0] is after a[0]. +func offset(a, b []float64) int { + if &a[0] == &b[0] { + return 0 + } + // This expression must be atomic with respect to GC moves. + // At this stage this is true, because the GC does not + // move. See https://golang.org/issue/12445. + return int(uintptr(unsafe.Pointer(&b[0]))-uintptr(unsafe.Pointer(&a[0]))) / int(unsafe.Sizeof(float64(0))) +} diff --git a/vendor/gonum.org/v1/gonum/mat/offset_appengine.go b/vendor/gonum.org/v1/gonum/mat/offset_appengine.go new file mode 100644 index 0000000000..df617478cf --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/offset_appengine.go @@ -0,0 +1,24 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine safe + +package mat + +import "reflect" + +var sizeOfFloat64 = int(reflect.TypeOf(float64(0)).Size()) + +// offset returns the number of float64 values b[0] is after a[0]. +func offset(a, b []float64) int { + va0 := reflect.ValueOf(a).Index(0) + vb0 := reflect.ValueOf(b).Index(0) + if va0.Addr() == vb0.Addr() { + return 0 + } + // This expression must be atomic with respect to GC moves. + // At this stage this is true, because the GC does not + // move. See https://golang.org/issue/12445. + return int(vb0.UnsafeAddr()-va0.UnsafeAddr()) / sizeOfFloat64 +} diff --git a/vendor/gonum.org/v1/gonum/mat/pool.go b/vendor/gonum.org/v1/gonum/mat/pool.go new file mode 100644 index 0000000000..25ca29f18f --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/pool.go @@ -0,0 +1,236 @@ +// Copyright ©2014 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "sync" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +var tab64 = [64]byte{ + 0x3f, 0x00, 0x3a, 0x01, 0x3b, 0x2f, 0x35, 0x02, + 0x3c, 0x27, 0x30, 0x1b, 0x36, 0x21, 0x2a, 0x03, + 0x3d, 0x33, 0x25, 0x28, 0x31, 0x12, 0x1c, 0x14, + 0x37, 0x1e, 0x22, 0x0b, 0x2b, 0x0e, 0x16, 0x04, + 0x3e, 0x39, 0x2e, 0x34, 0x26, 0x1a, 0x20, 0x29, + 0x32, 0x24, 0x11, 0x13, 0x1d, 0x0a, 0x0d, 0x15, + 0x38, 0x2d, 0x19, 0x1f, 0x23, 0x10, 0x09, 0x0c, + 0x2c, 0x18, 0x0f, 0x08, 0x17, 0x07, 0x06, 0x05, +} + +// bits returns the ceiling of base 2 log of v. +// Approach based on http://stackoverflow.com/a/11398748. +func bits(v uint64) byte { + if v == 0 { + return 0 + } + v <<= 2 + v-- + v |= v >> 1 + v |= v >> 2 + v |= v >> 4 + v |= v >> 8 + v |= v >> 16 + v |= v >> 32 + return tab64[((v-(v>>1))*0x07EDD5E59A4E28C2)>>58] - 1 +} + +var ( + // pool contains size stratified workspace Dense pools. + // Each pool element i returns sized matrices with a data + // slice capped at 1< 2. + if !m.IsZero() { + if fr != r { + panic(ErrShape) + } + if _, lc := factors[len(factors)-1].Dims(); lc != c { + panic(ErrShape) + } + } + + dims := make([]int, len(factors)+1) + dims[0] = r + dims[len(dims)-1] = c + pc := fc + for i, f := range factors[1:] { + cr, cc := f.Dims() + dims[i+1] = cr + if pc != cr { + panic(ErrShape) + } + pc = cc + } + + return &multiplier{ + factors: factors, + dims: dims, + table: newTable(len(factors)), + } +} + +// optimize determines an optimal matrix multiply operation order. +func (p *multiplier) optimize() { + if debugProductWalk { + fmt.Printf("chain dims: %v\n", p.dims) + } + const maxInt = int(^uint(0) >> 1) + for f := 1; f < len(p.factors); f++ { + for i := 0; i < len(p.factors)-f; i++ { + j := i + f + p.table.set(i, j, entry{cost: maxInt}) + for k := i; k < j; k++ { + cost := p.table.at(i, k).cost + p.table.at(k+1, j).cost + p.dims[i]*p.dims[k+1]*p.dims[j+1] + if cost < p.table.at(i, j).cost { + p.table.set(i, j, entry{cost: cost, k: k}) + } + } + } + } +} + +// multiply walks the optimal operation tree found by optimize, +// leaving the final result in the stack. It returns the +// product, which may be copied but should be returned to +// the workspace pool. +func (p *multiplier) multiply() *Dense { + result, _ := p.multiplySubchain(0, len(p.factors)-1) + if debugProductWalk { + r, c := result.Dims() + fmt.Printf("\tpop result (%d×%d) cost=%d\n", r, c, p.table.at(0, len(p.factors)-1).cost) + } + return result.(*Dense) +} + +func (p *multiplier) multiplySubchain(i, j int) (m Matrix, intermediate bool) { + if i == j { + return p.factors[i], false + } + + a, aTmp := p.multiplySubchain(i, p.table.at(i, j).k) + b, bTmp := p.multiplySubchain(p.table.at(i, j).k+1, j) + + ar, ac := a.Dims() + br, bc := b.Dims() + if ac != br { + // Panic with a string since this + // is not a user-facing panic. + panic(ErrShape.Error()) + } + + if debugProductWalk { + fmt.Printf("\tpush f[%d] (%d×%d)%s * f[%d] (%d×%d)%s\n", + i, ar, ac, result(aTmp), j, br, bc, result(bTmp)) + } + + r := getWorkspace(ar, bc, false) + r.Mul(a, b) + if aTmp { + putWorkspace(a.(*Dense)) + } + if bTmp { + putWorkspace(b.(*Dense)) + } + return r, true +} + +type entry struct { + k int // is the chain subdivision index. + cost int // cost is the cost of the operation. +} + +// table is a row major n×n dynamic programming table. +type table struct { + n int + entries []entry +} + +func newTable(n int) table { + return table{n: n, entries: make([]entry, n*n)} +} + +func (t table) at(i, j int) entry { return t.entries[i*t.n+j] } +func (t table) set(i, j int, e entry) { t.entries[i*t.n+j] = e } + +type result bool + +func (r result) String() string { + if r { + return " (popped result)" + } + return "" +} diff --git a/vendor/gonum.org/v1/gonum/mat/qr.go b/vendor/gonum.org/v1/gonum/mat/qr.go new file mode 100644 index 0000000000..bf38ee4d6d --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/qr.go @@ -0,0 +1,260 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +const badQR = "mat: invalid QR factorization" + +// QR is a type for creating and using the QR factorization of a matrix. +type QR struct { + qr *Dense + tau []float64 + cond float64 +} + +func (qr *QR) updateCond(norm lapack.MatrixNorm) { + // Since A = Q*R, and Q is orthogonal, we get for the condition number κ + // κ(A) := |A| |A^-1| = |Q*R| |(Q*R)^-1| = |R| |R^-1 * Q^T| + // = |R| |R^-1| = κ(R), + // where we used that fact that Q^-1 = Q^T. However, this assumes that + // the matrix norm is invariant under orthogonal transformations which + // is not the case for CondNorm. Hopefully the error is negligible: κ + // is only a qualitative measure anyway. + n := qr.qr.mat.Cols + work := getFloats(3*n, false) + iwork := getInts(n, false) + r := qr.qr.asTriDense(n, blas.NonUnit, blas.Upper) + v := lapack64.Trcon(norm, r.mat, work, iwork) + putFloats(work) + putInts(iwork) + qr.cond = 1 / v +} + +// Factorize computes the QR factorization of an m×n matrix a where m >= n. The QR +// factorization always exists even if A is singular. +// +// The QR decomposition is a factorization of the matrix A such that A = Q * R. +// The matrix Q is an orthonormal m×m matrix, and R is an m×n upper triangular matrix. +// Q and R can be extracted using the QTo and RTo methods. +func (qr *QR) Factorize(a Matrix) { + qr.factorize(a, CondNorm) +} + +func (qr *QR) factorize(a Matrix, norm lapack.MatrixNorm) { + m, n := a.Dims() + if m < n { + panic(ErrShape) + } + k := min(m, n) + if qr.qr == nil { + qr.qr = &Dense{} + } + qr.qr.Clone(a) + work := []float64{0} + qr.tau = make([]float64, k) + lapack64.Geqrf(qr.qr.mat, qr.tau, work, -1) + + work = getFloats(int(work[0]), false) + lapack64.Geqrf(qr.qr.mat, qr.tau, work, len(work)) + putFloats(work) + qr.updateCond(norm) +} + +// isValid returns whether the receiver contains a factorization. +func (qr *QR) isValid() bool { + return qr.qr != nil && !qr.qr.IsZero() +} + +// Cond returns the condition number for the factorized matrix. +// Cond will panic if the receiver does not contain a factorization. +func (qr *QR) Cond() float64 { + if !qr.isValid() { + panic(badQR) + } + return qr.cond +} + +// TODO(btracey): Add in the "Reduced" forms for extracting the n×n orthogonal +// and upper triangular matrices. + +// RTo extracts the m×n upper trapezoidal matrix from a QR decomposition. +// If dst is nil, a new matrix is allocated. The resulting dst matrix is returned. +// RTo will panic if the receiver does not contain a factorization. +func (qr *QR) RTo(dst *Dense) *Dense { + if !qr.isValid() { + panic(badQR) + } + + r, c := qr.qr.Dims() + if dst == nil { + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(r, c) + } + + // Disguise the QR as an upper triangular + t := &TriDense{ + mat: blas64.Triangular{ + N: c, + Stride: qr.qr.mat.Stride, + Data: qr.qr.mat.Data, + Uplo: blas.Upper, + Diag: blas.NonUnit, + }, + cap: qr.qr.capCols, + } + dst.Copy(t) + + // Zero below the triangular. + for i := r; i < c; i++ { + zero(dst.mat.Data[i*dst.mat.Stride : i*dst.mat.Stride+c]) + } + + return dst +} + +// QTo extracts the m×m orthonormal matrix Q from a QR decomposition. +// If dst is nil, a new matrix is allocated. The resulting Q matrix is returned. +// QTo will panic if the receiver does not contain a factorization. +func (qr *QR) QTo(dst *Dense) *Dense { + if !qr.isValid() { + panic(badQR) + } + + r, _ := qr.qr.Dims() + if dst == nil { + dst = NewDense(r, r, nil) + } else { + dst.reuseAsZeroed(r, r) + } + + // Set Q = I. + for i := 0; i < r*r; i += r + 1 { + dst.mat.Data[i] = 1 + } + + // Construct Q from the elementary reflectors. + work := []float64{0} + lapack64.Ormqr(blas.Left, blas.NoTrans, qr.qr.mat, qr.tau, dst.mat, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Ormqr(blas.Left, blas.NoTrans, qr.qr.mat, qr.tau, dst.mat, work, len(work)) + putFloats(work) + + return dst +} + +// SolveTo finds a minimum-norm solution to a system of linear equations defined +// by the matrices A and b, where A is an m×n matrix represented in its QR factorized +// form. If A is singular or near-singular a Condition error is returned. +// See the documentation for Condition for more information. +// +// The minimization problem solved depends on the input parameters. +// If trans == false, find X such that ||A*X - B||_2 is minimized. +// If trans == true, find the minimum norm solution of A^T * X = B. +// The solution matrix, X, is stored in place into dst. +// SolveTo will panic if the receiver does not contain a factorization. +func (qr *QR) SolveTo(dst *Dense, trans bool, b Matrix) error { + if !qr.isValid() { + panic(badQR) + } + + r, c := qr.qr.Dims() + br, bc := b.Dims() + + // The QR solve algorithm stores the result in-place into the right hand side. + // The storage for the answer must be large enough to hold both b and x. + // However, this method's receiver must be the size of x. Copy b, and then + // copy the result into m at the end. + if trans { + if c != br { + panic(ErrShape) + } + dst.reuseAs(r, bc) + } else { + if r != br { + panic(ErrShape) + } + dst.reuseAs(c, bc) + } + // Do not need to worry about overlap between m and b because x has its own + // independent storage. + w := getWorkspace(max(r, c), bc, false) + w.Copy(b) + t := qr.qr.asTriDense(qr.qr.mat.Cols, blas.NonUnit, blas.Upper).mat + if trans { + ok := lapack64.Trtrs(blas.Trans, t, w.mat) + if !ok { + return Condition(math.Inf(1)) + } + for i := c; i < r; i++ { + zero(w.mat.Data[i*w.mat.Stride : i*w.mat.Stride+bc]) + } + work := []float64{0} + lapack64.Ormqr(blas.Left, blas.NoTrans, qr.qr.mat, qr.tau, w.mat, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Ormqr(blas.Left, blas.NoTrans, qr.qr.mat, qr.tau, w.mat, work, len(work)) + putFloats(work) + } else { + work := []float64{0} + lapack64.Ormqr(blas.Left, blas.Trans, qr.qr.mat, qr.tau, w.mat, work, -1) + work = getFloats(int(work[0]), false) + lapack64.Ormqr(blas.Left, blas.Trans, qr.qr.mat, qr.tau, w.mat, work, len(work)) + putFloats(work) + + ok := lapack64.Trtrs(blas.NoTrans, t, w.mat) + if !ok { + return Condition(math.Inf(1)) + } + } + // X was set above to be the correct size for the result. + dst.Copy(w) + putWorkspace(w) + if qr.cond > ConditionTolerance { + return Condition(qr.cond) + } + return nil +} + +// SolveVecTo finds a minimum-norm solution to a system of linear equations, +// Ax = b. +// See QR.SolveTo for the full documentation. +// SolveVecTo will panic if the receiver does not contain a factorization. +func (qr *QR) SolveVecTo(dst *VecDense, trans bool, b Vector) error { + if !qr.isValid() { + panic(badQR) + } + + r, c := qr.qr.Dims() + if _, bc := b.Dims(); bc != 1 { + panic(ErrShape) + } + + // The Solve implementation is non-trivial, so rather than duplicate the code, + // instead recast the VecDenses as Dense and call the matrix code. + bm := Matrix(b) + if rv, ok := b.(RawVectorer); ok { + bmat := rv.RawVector() + if dst != b { + dst.checkOverlap(bmat) + } + b := VecDense{mat: bmat} + bm = b.asDense() + } + if trans { + dst.reuseAs(r) + } else { + dst.reuseAs(c) + } + return qr.SolveTo(dst.asDense(), trans, bm) + +} diff --git a/vendor/gonum.org/v1/gonum/mat/shadow.go b/vendor/gonum.org/v1/gonum/mat/shadow.go new file mode 100644 index 0000000000..cc62e44f0b --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/shadow.go @@ -0,0 +1,226 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas/blas64" +) + +const ( + // regionOverlap is the panic string used for the general case + // of a matrix region overlap between a source and destination. + regionOverlap = "mat: bad region: overlap" + + // regionIdentity is the panic string used for the specific + // case of complete agreement between a source and a destination. + regionIdentity = "mat: bad region: identical" + + // mismatchedStrides is the panic string used for overlapping + // data slices with differing strides. + mismatchedStrides = "mat: bad region: different strides" +) + +// checkOverlap returns false if the receiver does not overlap data elements +// referenced by the parameter and panics otherwise. +// +// checkOverlap methods return a boolean to allow the check call to be added to a +// boolean expression, making use of short-circuit operators. +func checkOverlap(a, b blas64.General) bool { + if cap(a.Data) == 0 || cap(b.Data) == 0 { + return false + } + + off := offset(a.Data[:1], b.Data[:1]) + + if off == 0 { + // At least one element overlaps. + if a.Cols == b.Cols && a.Rows == b.Rows && a.Stride == b.Stride { + panic(regionIdentity) + } + panic(regionOverlap) + } + + if off > 0 && len(a.Data) <= off { + // We know a is completely before b. + return false + } + if off < 0 && len(b.Data) <= -off { + // We know a is completely after b. + return false + } + + if a.Stride != b.Stride { + // Too hard, so assume the worst. + panic(mismatchedStrides) + } + + if off < 0 { + off = -off + a.Cols, b.Cols = b.Cols, a.Cols + } + if rectanglesOverlap(off, a.Cols, b.Cols, a.Stride) { + panic(regionOverlap) + } + return false +} + +func (m *Dense) checkOverlap(a blas64.General) bool { + return checkOverlap(m.RawMatrix(), a) +} + +func (m *Dense) checkOverlapMatrix(a Matrix) bool { + if m == a { + return false + } + var amat blas64.General + switch a := a.(type) { + default: + return false + case RawMatrixer: + amat = a.RawMatrix() + case RawSymmetricer: + amat = generalFromSymmetric(a.RawSymmetric()) + case RawTriangular: + amat = generalFromTriangular(a.RawTriangular()) + } + return m.checkOverlap(amat) +} + +func (s *SymDense) checkOverlap(a blas64.General) bool { + return checkOverlap(generalFromSymmetric(s.RawSymmetric()), a) +} + +func (s *SymDense) checkOverlapMatrix(a Matrix) bool { + if s == a { + return false + } + var amat blas64.General + switch a := a.(type) { + default: + return false + case RawMatrixer: + amat = a.RawMatrix() + case RawSymmetricer: + amat = generalFromSymmetric(a.RawSymmetric()) + case RawTriangular: + amat = generalFromTriangular(a.RawTriangular()) + } + return s.checkOverlap(amat) +} + +// generalFromSymmetric returns a blas64.General with the backing +// data and dimensions of a. +func generalFromSymmetric(a blas64.Symmetric) blas64.General { + return blas64.General{ + Rows: a.N, + Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } +} + +func (t *TriDense) checkOverlap(a blas64.General) bool { + return checkOverlap(generalFromTriangular(t.RawTriangular()), a) +} + +func (t *TriDense) checkOverlapMatrix(a Matrix) bool { + if t == a { + return false + } + var amat blas64.General + switch a := a.(type) { + default: + return false + case RawMatrixer: + amat = a.RawMatrix() + case RawSymmetricer: + amat = generalFromSymmetric(a.RawSymmetric()) + case RawTriangular: + amat = generalFromTriangular(a.RawTriangular()) + } + return t.checkOverlap(amat) +} + +// generalFromTriangular returns a blas64.General with the backing +// data and dimensions of a. +func generalFromTriangular(a blas64.Triangular) blas64.General { + return blas64.General{ + Rows: a.N, + Cols: a.N, + Stride: a.Stride, + Data: a.Data, + } +} + +func (v *VecDense) checkOverlap(a blas64.Vector) bool { + mat := v.mat + if cap(mat.Data) == 0 || cap(a.Data) == 0 { + return false + } + + off := offset(mat.Data[:1], a.Data[:1]) + + if off == 0 { + // At least one element overlaps. + if mat.Inc == a.Inc && len(mat.Data) == len(a.Data) { + panic(regionIdentity) + } + panic(regionOverlap) + } + + if off > 0 && len(mat.Data) <= off { + // We know v is completely before a. + return false + } + if off < 0 && len(a.Data) <= -off { + // We know v is completely after a. + return false + } + + if mat.Inc != a.Inc { + // Too hard, so assume the worst. + panic(mismatchedStrides) + } + + if mat.Inc == 1 || off&mat.Inc == 0 { + panic(regionOverlap) + } + return false +} + +// rectanglesOverlap returns whether the strided rectangles a and b overlap +// when b is offset by off elements after a but has at least one element before +// the end of a. off must be positive. a and b have aCols and bCols respectively. +// +// rectanglesOverlap works by shifting both matrices left such that the left +// column of a is at 0. The column indexes are flattened by obtaining the shifted +// relative left and right column positions modulo the common stride. This allows +// direct comparison of the column offsets when the matrix backing data slices +// are known to overlap. +func rectanglesOverlap(off, aCols, bCols, stride int) bool { + if stride == 1 { + // Unit stride means overlapping data + // slices must overlap as matrices. + return true + } + + // Flatten the shifted matrix column positions + // so a starts at 0, modulo the common stride. + aTo := aCols + // The mod stride operations here make the from + // and to indexes comparable between a and b when + // the data slices of a and b overlap. + bFrom := off % stride + bTo := (bFrom + bCols) % stride + + if bTo == 0 || bFrom < bTo { + // b matrix is not wrapped: compare for + // simple overlap. + return bFrom < aTo + } + + // b strictly wraps and so must overlap with a. + return true +} diff --git a/vendor/gonum.org/v1/gonum/mat/solve.go b/vendor/gonum.org/v1/gonum/mat/solve.go new file mode 100644 index 0000000000..11813280f8 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/solve.go @@ -0,0 +1,140 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack/lapack64" +) + +// Solve finds a minimum-norm solution to a system of linear equations defined +// by the matrices A and B. If A is singular or near-singular, a Condition error +// is returned. See the documentation for Condition for more information. +// +// The minimization problem solved depends on the input parameters: +// - if m >= n, find X such that ||A*X - B||_2 is minimized, +// - if m < n, find the minimum norm solution of A * X = B. +// The solution matrix, X, is stored in-place into the receiver. +func (m *Dense) Solve(a, b Matrix) error { + ar, ac := a.Dims() + br, bc := b.Dims() + if ar != br { + panic(ErrShape) + } + m.reuseAs(ac, bc) + + // TODO(btracey): Add special cases for SymDense, etc. + aU, aTrans := untranspose(a) + bU, bTrans := untranspose(b) + switch rma := aU.(type) { + case RawTriangular: + side := blas.Left + tA := blas.NoTrans + if aTrans { + tA = blas.Trans + } + + switch rm := bU.(type) { + case RawMatrixer: + if m != bU || bTrans { + if m == bU || m.checkOverlap(rm.RawMatrix()) { + tmp := getWorkspace(br, bc, false) + tmp.Copy(b) + m.Copy(tmp) + putWorkspace(tmp) + break + } + m.Copy(b) + } + default: + if m != bU { + m.Copy(b) + } else if bTrans { + // m and b share data so Copy cannot be used directly. + tmp := getWorkspace(br, bc, false) + tmp.Copy(b) + m.Copy(tmp) + putWorkspace(tmp) + } + } + + rm := rma.RawTriangular() + blas64.Trsm(side, tA, 1, rm, m.mat) + work := getFloats(3*rm.N, false) + iwork := getInts(rm.N, false) + cond := lapack64.Trcon(CondNorm, rm, work, iwork) + putFloats(work) + putInts(iwork) + if cond > ConditionTolerance { + return Condition(cond) + } + return nil + } + + switch { + case ar == ac: + if a == b { + // x = I. + if ar == 1 { + m.mat.Data[0] = 1 + return nil + } + for i := 0; i < ar; i++ { + v := m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+ac] + zero(v) + v[i] = 1 + } + return nil + } + var lu LU + lu.Factorize(a) + return lu.SolveTo(m, false, b) + case ar > ac: + var qr QR + qr.Factorize(a) + return qr.SolveTo(m, false, b) + default: + var lq LQ + lq.Factorize(a) + return lq.SolveTo(m, false, b) + } +} + +// SolveVec finds a minimum-norm solution to a system of linear equations defined +// by the matrix a and the right-hand side column vector b. If A is singular or +// near-singular, a Condition error is returned. See the documentation for +// Dense.Solve for more information. +func (v *VecDense) SolveVec(a Matrix, b Vector) error { + if _, bc := b.Dims(); bc != 1 { + panic(ErrShape) + } + _, c := a.Dims() + + // The Solve implementation is non-trivial, so rather than duplicate the code, + // instead recast the VecDenses as Dense and call the matrix code. + + if rv, ok := b.(RawVectorer); ok { + bmat := rv.RawVector() + if v != b { + v.checkOverlap(bmat) + } + v.reuseAs(c) + m := v.asDense() + // We conditionally create bm as m when b and v are identical + // to prevent the overlap detection code from identifying m + // and bm as overlapping but not identical. + bm := m + if v != b { + b := VecDense{mat: bmat} + bm = b.asDense() + } + return m.Solve(a, bm) + } + + v.reuseAs(c) + m := v.asDense() + return m.Solve(a, b) +} diff --git a/vendor/gonum.org/v1/gonum/mat/svd.go b/vendor/gonum.org/v1/gonum/mat/svd.go new file mode 100644 index 0000000000..2f55c4114b --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/svd.go @@ -0,0 +1,247 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack" + "gonum.org/v1/gonum/lapack/lapack64" +) + +// SVD is a type for creating and using the Singular Value Decomposition (SVD) +// of a matrix. +type SVD struct { + kind SVDKind + + s []float64 + u blas64.General + vt blas64.General +} + +// SVDKind specifies the treatment of singular vectors during an SVD +// factorization. +type SVDKind int + +const ( + // SVDNone specifies that no singular vectors should be computed during + // the decomposition. + SVDNone SVDKind = 0 + + // SVDThinU specifies the thin decomposition for U should be computed. + SVDThinU SVDKind = 1 << (iota - 1) + // SVDFullU specifies the full decomposition for U should be computed. + SVDFullU + // SVDThinV specifies the thin decomposition for V should be computed. + SVDThinV + // SVDFullV specifies the full decomposition for V should be computed. + SVDFullV + + // SVDThin is a convenience value for computing both thin vectors. + SVDThin SVDKind = SVDThinU | SVDThinV + // SVDThin is a convenience value for computing both full vectors. + SVDFull SVDKind = SVDFullU | SVDFullV +) + +// succFact returns whether the receiver contains a successful factorization. +func (svd *SVD) succFact() bool { + return len(svd.s) != 0 +} + +// Factorize computes the singular value decomposition (SVD) of the input matrix A. +// The singular values of A are computed in all cases, while the singular +// vectors are optionally computed depending on the input kind. +// +// The full singular value decomposition (kind == SVDFull) is a factorization +// of an m×n matrix A of the form +// A = U * Σ * V^T +// where Σ is an m×n diagonal matrix, U is an m×m orthogonal matrix, and V is an +// n×n orthogonal matrix. The diagonal elements of Σ are the singular values of A. +// The first min(m,n) columns of U and V are, respectively, the left and right +// singular vectors of A. +// +// Significant storage space can be saved by using the thin representation of +// the SVD (kind == SVDThin) instead of the full SVD, especially if +// m >> n or m << n. The thin SVD finds +// A = U~ * Σ * V~^T +// where U~ is of size m×min(m,n), Σ is a diagonal matrix of size min(m,n)×min(m,n) +// and V~ is of size n×min(m,n). +// +// Factorize returns whether the decomposition succeeded. If the decomposition +// failed, routines that require a successful factorization will panic. +func (svd *SVD) Factorize(a Matrix, kind SVDKind) (ok bool) { + // kill previous factorization + svd.s = svd.s[:0] + svd.kind = kind + + m, n := a.Dims() + var jobU, jobVT lapack.SVDJob + + // TODO(btracey): This code should be modified to have the smaller + // matrix written in-place into aCopy when the lapack/native/dgesvd + // implementation is complete. + switch { + case kind&SVDFullU != 0: + jobU = lapack.SVDAll + svd.u = blas64.General{ + Rows: m, + Cols: m, + Stride: m, + Data: use(svd.u.Data, m*m), + } + case kind&SVDThinU != 0: + jobU = lapack.SVDStore + svd.u = blas64.General{ + Rows: m, + Cols: min(m, n), + Stride: min(m, n), + Data: use(svd.u.Data, m*min(m, n)), + } + default: + jobU = lapack.SVDNone + } + switch { + case kind&SVDFullV != 0: + svd.vt = blas64.General{ + Rows: n, + Cols: n, + Stride: n, + Data: use(svd.vt.Data, n*n), + } + jobVT = lapack.SVDAll + case kind&SVDThinV != 0: + svd.vt = blas64.General{ + Rows: min(m, n), + Cols: n, + Stride: n, + Data: use(svd.vt.Data, min(m, n)*n), + } + jobVT = lapack.SVDStore + default: + jobVT = lapack.SVDNone + } + + // A is destroyed on call, so copy the matrix. + aCopy := DenseCopyOf(a) + svd.kind = kind + svd.s = use(svd.s, min(m, n)) + + work := []float64{0} + lapack64.Gesvd(jobU, jobVT, aCopy.mat, svd.u, svd.vt, svd.s, work, -1) + work = getFloats(int(work[0]), false) + ok = lapack64.Gesvd(jobU, jobVT, aCopy.mat, svd.u, svd.vt, svd.s, work, len(work)) + putFloats(work) + if !ok { + svd.kind = 0 + } + return ok +} + +// Kind returns the SVDKind of the decomposition. If no decomposition has been +// computed, Kind returns -1. +func (svd *SVD) Kind() SVDKind { + if !svd.succFact() { + return -1 + } + return svd.kind +} + +// Cond returns the 2-norm condition number for the factorized matrix. Cond will +// panic if the receiver does not contain a successful factorization. +func (svd *SVD) Cond() float64 { + if !svd.succFact() { + panic(badFact) + } + return svd.s[0] / svd.s[len(svd.s)-1] +} + +// Values returns the singular values of the factorized matrix in descending order. +// +// If the input slice is non-nil, the values will be stored in-place into +// the slice. In this case, the slice must have length min(m,n), and Values will +// panic with ErrSliceLengthMismatch otherwise. If the input slice is nil, a new +// slice of the appropriate length will be allocated and returned. +// +// Values will panic if the receiver does not contain a successful factorization. +func (svd *SVD) Values(s []float64) []float64 { + if !svd.succFact() { + panic(badFact) + } + if s == nil { + s = make([]float64, len(svd.s)) + } + if len(s) != len(svd.s) { + panic(ErrSliceLengthMismatch) + } + copy(s, svd.s) + return s +} + +// UTo extracts the matrix U from the singular value decomposition. The first +// min(m,n) columns are the left singular vectors and correspond to the singular +// values as returned from SVD.Values. +// +// If dst is not nil, U is stored in-place into dst, and dst must have size +// m×m if the full U was computed, size m×min(m,n) if the thin U was computed, +// and UTo panics otherwise. If dst is nil, a new matrix of the appropriate size +// is allocated and returned. +func (svd *SVD) UTo(dst *Dense) *Dense { + if !svd.succFact() { + panic(badFact) + } + kind := svd.kind + if kind&SVDThinU == 0 && kind&SVDFullU == 0 { + panic("svd: u not computed during factorization") + } + r := svd.u.Rows + c := svd.u.Cols + if dst == nil { + dst = NewDense(r, c, nil) + } else { + dst.reuseAs(r, c) + } + + tmp := &Dense{ + mat: svd.u, + capRows: r, + capCols: c, + } + dst.Copy(tmp) + + return dst +} + +// VTo extracts the matrix V from the singular value decomposition. The first +// min(m,n) columns are the right singular vectors and correspond to the singular +// values as returned from SVD.Values. +// +// If dst is not nil, V is stored in-place into dst, and dst must have size +// n×n if the full V was computed, size n×min(m,n) if the thin V was computed, +// and VTo panics otherwise. If dst is nil, a new matrix of the appropriate size +// is allocated and returned. +func (svd *SVD) VTo(dst *Dense) *Dense { + if !svd.succFact() { + panic(badFact) + } + kind := svd.kind + if kind&SVDThinU == 0 && kind&SVDFullV == 0 { + panic("svd: v not computed during factorization") + } + r := svd.vt.Rows + c := svd.vt.Cols + if dst == nil { + dst = NewDense(c, r, nil) + } else { + dst.reuseAs(c, r) + } + + tmp := &Dense{ + mat: svd.vt, + capRows: r, + capCols: c, + } + dst.Copy(tmp.T()) + + return dst +} diff --git a/vendor/gonum.org/v1/gonum/mat/symband.go b/vendor/gonum.org/v1/gonum/mat/symband.go new file mode 100644 index 0000000000..add9a807d3 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/symband.go @@ -0,0 +1,221 @@ +// Copyright ©2017 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +var ( + symBandDense *SymBandDense + _ Matrix = symBandDense + _ Symmetric = symBandDense + _ Banded = symBandDense + _ SymBanded = symBandDense + _ RawSymBander = symBandDense + _ MutableSymBanded = symBandDense + + _ NonZeroDoer = symBandDense + _ RowNonZeroDoer = symBandDense + _ ColNonZeroDoer = symBandDense +) + +// SymBandDense represents a symmetric band matrix in dense storage format. +type SymBandDense struct { + mat blas64.SymmetricBand +} + +// SymBanded is a symmetric band matrix interface type. +type SymBanded interface { + Banded + + // Symmetric returns the number of rows/columns in the matrix. + Symmetric() int + + // SymBand returns the number of rows/columns in the matrix, and the size of + // the bandwidth. + SymBand() (n, k int) +} + +// MutableSymBanded is a symmetric band matrix interface type that allows elements +// to be altered. +type MutableSymBanded interface { + SymBanded + SetSymBand(i, j int, v float64) +} + +// A RawSymBander can return a blas64.SymmetricBand representation of the receiver. +// Changes to the blas64.SymmetricBand.Data slice will be reflected in the original +// matrix, changes to the N, K, Stride and Uplo fields will not. +type RawSymBander interface { + RawSymBand() blas64.SymmetricBand +} + +// NewSymBandDense creates a new SymBand matrix with n rows and columns. If data == nil, +// a new slice is allocated for the backing slice. If len(data) == n*(k+1), +// data is used as the backing slice, and changes to the elements of the returned +// SymBandDense will be reflected in data. If neither of these is true, NewSymBandDense +// will panic. k must be at least zero and less than n, otherwise NewSymBandDense will panic. +// +// The data must be arranged in row-major order constructed by removing the zeros +// from the rows outside the band and aligning the diagonals. SymBandDense matrices +// are stored in the upper triangle. For example, the matrix +// 1 2 3 0 0 0 +// 2 4 5 6 0 0 +// 3 5 7 8 9 0 +// 0 6 8 10 11 12 +// 0 0 9 11 13 14 +// 0 0 0 12 14 15 +// becomes (* entries are never accessed) +// 1 2 3 +// 4 5 6 +// 7 8 9 +// 10 11 12 +// 13 14 * +// 15 * * +// which is passed to NewSymBandDense as []float64{1, 2, ..., 15, *, *, *} with k=2. +// Only the values in the band portion of the matrix are used. +func NewSymBandDense(n, k int, data []float64) *SymBandDense { + if n <= 0 || k < 0 { + if n == 0 { + panic(ErrZeroLength) + } + panic("mat: negative dimension") + } + if k+1 > n { + panic("mat: band out of range") + } + bc := k + 1 + if data != nil && len(data) != n*bc { + panic(ErrShape) + } + if data == nil { + data = make([]float64, n*bc) + } + return &SymBandDense{ + mat: blas64.SymmetricBand{ + N: n, + K: k, + Stride: bc, + Uplo: blas.Upper, + Data: data, + }, + } +} + +// Dims returns the number of rows and columns in the matrix. +func (s *SymBandDense) Dims() (r, c int) { + return s.mat.N, s.mat.N +} + +// Symmetric returns the size of the receiver. +func (s *SymBandDense) Symmetric() int { + return s.mat.N +} + +// Bandwidth returns the bandwidths of the matrix. +func (s *SymBandDense) Bandwidth() (kl, ku int) { + return s.mat.K, s.mat.K +} + +// SymBand returns the number of rows/columns in the matrix, and the size of +// the bandwidth. +func (s *SymBandDense) SymBand() (n, k int) { + return s.mat.N, s.mat.K +} + +// T implements the Matrix interface. Symmetric matrices, by definition, are +// equal to their transpose, and this is a no-op. +func (s *SymBandDense) T() Matrix { + return s +} + +// TBand implements the Banded interface. +func (s *SymBandDense) TBand() Banded { + return s +} + +// RawSymBand returns the underlying blas64.SymBand used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in returned blas64.SymBand. +func (s *SymBandDense) RawSymBand() blas64.SymmetricBand { + return s.mat +} + +// SetRawSymBand sets the underlying blas64.SymmetricBand used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in the input. +// +// The supplied SymmetricBand must use blas.Upper storage format. +func (s *SymBandDense) SetRawSymBand(mat blas64.SymmetricBand) { + if mat.Uplo != blas.Upper { + panic("mat: blas64.SymmetricBand does not have blas.Upper storage") + } + s.mat = mat +} + +// Zero sets all of the matrix elements to zero. +func (s *SymBandDense) Zero() { + for i := 0; i < s.mat.N; i++ { + u := min(1+s.mat.K, s.mat.N-i) + zero(s.mat.Data[i*s.mat.Stride : i*s.mat.Stride+u]) + } +} + +// DiagView returns the diagonal as a matrix backed by the original data. +func (s *SymBandDense) DiagView() Diagonal { + n := s.mat.N + return &DiagDense{ + mat: blas64.Vector{ + N: n, + Inc: s.mat.Stride, + Data: s.mat.Data[:(n-1)*s.mat.Stride+1], + }, + } +} + +// DoNonZero calls the function fn for each of the non-zero elements of s. The function fn +// takes a row/column index and the element value of s at (i, j). +func (s *SymBandDense) DoNonZero(fn func(i, j int, v float64)) { + for i := 0; i < s.mat.N; i++ { + for j := max(0, i-s.mat.K); j < min(s.mat.N, i+s.mat.K+1); j++ { + v := s.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + } +} + +// DoRowNonZero calls the function fn for each of the non-zero elements of row i of s. The function fn +// takes a row/column index and the element value of s at (i, j). +func (s *SymBandDense) DoRowNonZero(i int, fn func(i, j int, v float64)) { + if i < 0 || s.mat.N <= i { + panic(ErrRowAccess) + } + for j := max(0, i-s.mat.K); j < min(s.mat.N, i+s.mat.K+1); j++ { + v := s.at(i, j) + if v != 0 { + fn(i, j, v) + } + } +} + +// DoColNonZero calls the function fn for each of the non-zero elements of column j of s. The function fn +// takes a row/column index and the element value of s at (i, j). +func (s *SymBandDense) DoColNonZero(j int, fn func(i, j int, v float64)) { + if j < 0 || s.mat.N <= j { + panic(ErrColAccess) + } + for i := 0; i < s.mat.N; i++ { + if i-s.mat.K <= j && j < i+s.mat.K+1 { + v := s.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + } +} diff --git a/vendor/gonum.org/v1/gonum/mat/symmetric.go b/vendor/gonum.org/v1/gonum/mat/symmetric.go new file mode 100644 index 0000000000..2ea5bdb039 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/symmetric.go @@ -0,0 +1,602 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +var ( + symDense *SymDense + + _ Matrix = symDense + _ Symmetric = symDense + _ RawSymmetricer = symDense + _ MutableSymmetric = symDense +) + +const ( + badSymTriangle = "mat: blas64.Symmetric not upper" + badSymCap = "mat: bad capacity for SymDense" +) + +// SymDense is a symmetric matrix that uses dense storage. SymDense +// matrices are stored in the upper triangle. +type SymDense struct { + mat blas64.Symmetric + cap int +} + +// Symmetric represents a symmetric matrix (where the element at {i, j} equals +// the element at {j, i}). Symmetric matrices are always square. +type Symmetric interface { + Matrix + // Symmetric returns the number of rows/columns in the matrix. + Symmetric() int +} + +// A RawSymmetricer can return a view of itself as a BLAS Symmetric matrix. +type RawSymmetricer interface { + RawSymmetric() blas64.Symmetric +} + +// A MutableSymmetric can set elements of a symmetric matrix. +type MutableSymmetric interface { + Symmetric + SetSym(i, j int, v float64) +} + +// NewSymDense creates a new Symmetric matrix with n rows and columns. If data == nil, +// a new slice is allocated for the backing slice. If len(data) == n*n, data is +// used as the backing slice, and changes to the elements of the returned SymDense +// will be reflected in data. If neither of these is true, NewSymDense will panic. +// NewSymDense will panic if n is zero. +// +// The data must be arranged in row-major order, i.e. the (i*c + j)-th +// element in the data slice is the {i, j}-th element in the matrix. +// Only the values in the upper triangular portion of the matrix are used. +func NewSymDense(n int, data []float64) *SymDense { + if n <= 0 { + if n == 0 { + panic(ErrZeroLength) + } + panic("mat: negative dimension") + } + if data != nil && n*n != len(data) { + panic(ErrShape) + } + if data == nil { + data = make([]float64, n*n) + } + return &SymDense{ + mat: blas64.Symmetric{ + N: n, + Stride: n, + Data: data, + Uplo: blas.Upper, + }, + cap: n, + } +} + +// Dims returns the number of rows and columns in the matrix. +func (s *SymDense) Dims() (r, c int) { + return s.mat.N, s.mat.N +} + +// Caps returns the number of rows and columns in the backing matrix. +func (s *SymDense) Caps() (r, c int) { + return s.cap, s.cap +} + +// T returns the receiver, the transpose of a symmetric matrix. +func (s *SymDense) T() Matrix { + return s +} + +// Symmetric implements the Symmetric interface and returns the number of rows +// and columns in the matrix. +func (s *SymDense) Symmetric() int { + return s.mat.N +} + +// RawSymmetric returns the matrix as a blas64.Symmetric. The returned +// value must be stored in upper triangular format. +func (s *SymDense) RawSymmetric() blas64.Symmetric { + return s.mat +} + +// SetRawSymmetric sets the underlying blas64.Symmetric used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in the input. +// +// The supplied Symmetric must use blas.Upper storage format. +func (s *SymDense) SetRawSymmetric(mat blas64.Symmetric) { + if mat.Uplo != blas.Upper { + panic(badSymTriangle) + } + s.mat = mat +} + +// Reset zeros the dimensions of the matrix so that it can be reused as the +// receiver of a dimensionally restricted operation. +// +// See the Reseter interface for more information. +func (s *SymDense) Reset() { + // N and Stride must be zeroed in unison. + s.mat.N, s.mat.Stride = 0, 0 + s.mat.Data = s.mat.Data[:0] +} + +// Zero sets all of the matrix elements to zero. +func (s *SymDense) Zero() { + for i := 0; i < s.mat.N; i++ { + zero(s.mat.Data[i*s.mat.Stride+i : i*s.mat.Stride+s.mat.N]) + } +} + +// IsZero returns whether the receiver is zero-sized. Zero-sized matrices can be the +// receiver for size-restricted operations. SymDense matrices can be zeroed using Reset. +func (s *SymDense) IsZero() bool { + // It must be the case that m.Dims() returns + // zeros in this case. See comment in Reset(). + return s.mat.N == 0 +} + +// reuseAs resizes an empty matrix to a n×n matrix, +// or checks that a non-empty matrix is n×n. +func (s *SymDense) reuseAs(n int) { + if n == 0 { + panic(ErrZeroLength) + } + if s.mat.N > s.cap { + panic(badSymCap) + } + if s.IsZero() { + s.mat = blas64.Symmetric{ + N: n, + Stride: n, + Data: use(s.mat.Data, n*n), + Uplo: blas.Upper, + } + s.cap = n + return + } + if s.mat.Uplo != blas.Upper { + panic(badSymTriangle) + } + if s.mat.N != n { + panic(ErrShape) + } +} + +func (s *SymDense) isolatedWorkspace(a Symmetric) (w *SymDense, restore func()) { + n := a.Symmetric() + if n == 0 { + panic(ErrZeroLength) + } + w = getWorkspaceSym(n, false) + return w, func() { + s.CopySym(w) + putWorkspaceSym(w) + } +} + +// DiagView returns the diagonal as a matrix backed by the original data. +func (s *SymDense) DiagView() Diagonal { + n := s.mat.N + return &DiagDense{ + mat: blas64.Vector{ + N: n, + Inc: s.mat.Stride + 1, + Data: s.mat.Data[:(n-1)*s.mat.Stride+n], + }, + } +} + +func (s *SymDense) AddSym(a, b Symmetric) { + n := a.Symmetric() + if n != b.Symmetric() { + panic(ErrShape) + } + s.reuseAs(n) + + if a, ok := a.(RawSymmetricer); ok { + if b, ok := b.(RawSymmetricer); ok { + amat, bmat := a.RawSymmetric(), b.RawSymmetric() + if s != a { + s.checkOverlap(generalFromSymmetric(amat)) + } + if s != b { + s.checkOverlap(generalFromSymmetric(bmat)) + } + for i := 0; i < n; i++ { + btmp := bmat.Data[i*bmat.Stride+i : i*bmat.Stride+n] + stmp := s.mat.Data[i*s.mat.Stride+i : i*s.mat.Stride+n] + for j, v := range amat.Data[i*amat.Stride+i : i*amat.Stride+n] { + stmp[j] = v + btmp[j] + } + } + return + } + } + + s.checkOverlapMatrix(a) + s.checkOverlapMatrix(b) + for i := 0; i < n; i++ { + stmp := s.mat.Data[i*s.mat.Stride : i*s.mat.Stride+n] + for j := i; j < n; j++ { + stmp[j] = a.At(i, j) + b.At(i, j) + } + } +} + +func (s *SymDense) CopySym(a Symmetric) int { + n := a.Symmetric() + n = min(n, s.mat.N) + if n == 0 { + return 0 + } + switch a := a.(type) { + case RawSymmetricer: + amat := a.RawSymmetric() + if amat.Uplo != blas.Upper { + panic(badSymTriangle) + } + for i := 0; i < n; i++ { + copy(s.mat.Data[i*s.mat.Stride+i:i*s.mat.Stride+n], amat.Data[i*amat.Stride+i:i*amat.Stride+n]) + } + default: + for i := 0; i < n; i++ { + stmp := s.mat.Data[i*s.mat.Stride : i*s.mat.Stride+n] + for j := i; j < n; j++ { + stmp[j] = a.At(i, j) + } + } + } + return n +} + +// SymRankOne performs a symetric rank-one update to the matrix a and stores +// the result in the receiver +// s = a + alpha * x * x' +func (s *SymDense) SymRankOne(a Symmetric, alpha float64, x Vector) { + n, c := x.Dims() + if a.Symmetric() != n || c != 1 { + panic(ErrShape) + } + s.reuseAs(n) + + if s != a { + if rs, ok := a.(RawSymmetricer); ok { + s.checkOverlap(generalFromSymmetric(rs.RawSymmetric())) + } + s.CopySym(a) + } + + xU, _ := untranspose(x) + if rv, ok := xU.(RawVectorer); ok { + xmat := rv.RawVector() + s.checkOverlap((&VecDense{mat: xmat}).asGeneral()) + blas64.Syr(alpha, xmat, s.mat) + return + } + + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + s.set(i, j, s.at(i, j)+alpha*x.AtVec(i)*x.AtVec(j)) + } + } +} + +// SymRankK performs a symmetric rank-k update to the matrix a and stores the +// result into the receiver. If a is zero, see SymOuterK. +// s = a + alpha * x * x' +func (s *SymDense) SymRankK(a Symmetric, alpha float64, x Matrix) { + n := a.Symmetric() + r, _ := x.Dims() + if r != n { + panic(ErrShape) + } + xMat, aTrans := untranspose(x) + var g blas64.General + if rm, ok := xMat.(RawMatrixer); ok { + g = rm.RawMatrix() + } else { + g = DenseCopyOf(x).mat + aTrans = false + } + if a != s { + if rs, ok := a.(RawSymmetricer); ok { + s.checkOverlap(generalFromSymmetric(rs.RawSymmetric())) + } + s.reuseAs(n) + s.CopySym(a) + } + t := blas.NoTrans + if aTrans { + t = blas.Trans + } + blas64.Syrk(t, alpha, g, 1, s.mat) +} + +// SymOuterK calculates the outer product of x with itself and stores +// the result into the receiver. It is equivalent to the matrix +// multiplication +// s = alpha * x * x'. +// In order to update an existing matrix, see SymRankOne. +func (s *SymDense) SymOuterK(alpha float64, x Matrix) { + n, _ := x.Dims() + switch { + case s.IsZero(): + s.mat = blas64.Symmetric{ + N: n, + Stride: n, + Data: useZeroed(s.mat.Data, n*n), + Uplo: blas.Upper, + } + s.cap = n + s.SymRankK(s, alpha, x) + case s.mat.Uplo != blas.Upper: + panic(badSymTriangle) + case s.mat.N == n: + if s == x { + w := getWorkspaceSym(n, true) + w.SymRankK(w, alpha, x) + s.CopySym(w) + putWorkspaceSym(w) + } else { + switch r := x.(type) { + case RawMatrixer: + s.checkOverlap(r.RawMatrix()) + case RawSymmetricer: + s.checkOverlap(generalFromSymmetric(r.RawSymmetric())) + case RawTriangular: + s.checkOverlap(generalFromTriangular(r.RawTriangular())) + } + // Only zero the upper triangle. + for i := 0; i < n; i++ { + ri := i * s.mat.Stride + zero(s.mat.Data[ri+i : ri+n]) + } + s.SymRankK(s, alpha, x) + } + default: + panic(ErrShape) + } +} + +// RankTwo performs a symmmetric rank-two update to the matrix a and stores +// the result in the receiver +// m = a + alpha * (x * y' + y * x') +func (s *SymDense) RankTwo(a Symmetric, alpha float64, x, y Vector) { + n := s.mat.N + xr, xc := x.Dims() + if xr != n || xc != 1 { + panic(ErrShape) + } + yr, yc := y.Dims() + if yr != n || yc != 1 { + panic(ErrShape) + } + + if s != a { + if rs, ok := a.(RawSymmetricer); ok { + s.checkOverlap(generalFromSymmetric(rs.RawSymmetric())) + } + } + + var xmat, ymat blas64.Vector + fast := true + xU, _ := untranspose(x) + if rv, ok := xU.(RawVectorer); ok { + xmat = rv.RawVector() + s.checkOverlap((&VecDense{mat: xmat}).asGeneral()) + } else { + fast = false + } + yU, _ := untranspose(y) + if rv, ok := yU.(RawVectorer); ok { + ymat = rv.RawVector() + s.checkOverlap((&VecDense{mat: ymat}).asGeneral()) + } else { + fast = false + } + + if s != a { + if rs, ok := a.(RawSymmetricer); ok { + s.checkOverlap(generalFromSymmetric(rs.RawSymmetric())) + } + s.reuseAs(n) + s.CopySym(a) + } + + if fast { + if s != a { + s.reuseAs(n) + s.CopySym(a) + } + blas64.Syr2(alpha, xmat, ymat, s.mat) + return + } + + for i := 0; i < n; i++ { + s.reuseAs(n) + for j := i; j < n; j++ { + s.set(i, j, a.At(i, j)+alpha*(x.AtVec(i)*y.AtVec(j)+y.AtVec(i)*x.AtVec(j))) + } + } +} + +// ScaleSym multiplies the elements of a by f, placing the result in the receiver. +func (s *SymDense) ScaleSym(f float64, a Symmetric) { + n := a.Symmetric() + s.reuseAs(n) + if a, ok := a.(RawSymmetricer); ok { + amat := a.RawSymmetric() + if s != a { + s.checkOverlap(generalFromSymmetric(amat)) + } + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + s.mat.Data[i*s.mat.Stride+j] = f * amat.Data[i*amat.Stride+j] + } + } + return + } + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + s.mat.Data[i*s.mat.Stride+j] = f * a.At(i, j) + } + } +} + +// SubsetSym extracts a subset of the rows and columns of the matrix a and stores +// the result in-place into the receiver. The resulting matrix size is +// len(set)×len(set). Specifically, at the conclusion of SubsetSym, +// s.At(i, j) equals a.At(set[i], set[j]). Note that the supplied set does not +// have to be a strict subset, dimension repeats are allowed. +func (s *SymDense) SubsetSym(a Symmetric, set []int) { + n := len(set) + na := a.Symmetric() + s.reuseAs(n) + var restore func() + if a == s { + s, restore = s.isolatedWorkspace(a) + defer restore() + } + + if a, ok := a.(RawSymmetricer); ok { + raw := a.RawSymmetric() + if s != a { + s.checkOverlap(generalFromSymmetric(raw)) + } + for i := 0; i < n; i++ { + ssub := s.mat.Data[i*s.mat.Stride : i*s.mat.Stride+n] + r := set[i] + rsub := raw.Data[r*raw.Stride : r*raw.Stride+na] + for j := i; j < n; j++ { + c := set[j] + if r <= c { + ssub[j] = rsub[c] + } else { + ssub[j] = raw.Data[c*raw.Stride+r] + } + } + } + return + } + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + s.mat.Data[i*s.mat.Stride+j] = a.At(set[i], set[j]) + } + } +} + +// SliceSym returns a new Matrix that shares backing data with the receiver. +// The returned matrix starts at {i,i} of the receiver and extends k-i rows +// and columns. The final row and column in the resulting matrix is k-1. +// SliceSym panics with ErrIndexOutOfRange if the slice is outside the +// capacity of the receiver. +func (s *SymDense) SliceSym(i, k int) Symmetric { + sz := s.cap + if i < 0 || sz < i || k < i || sz < k { + panic(ErrIndexOutOfRange) + } + v := *s + v.mat.Data = s.mat.Data[i*s.mat.Stride+i : (k-1)*s.mat.Stride+k] + v.mat.N = k - i + v.cap = s.cap - i + return &v +} + +// Trace returns the trace of the matrix. +func (s *SymDense) Trace() float64 { + // TODO(btracey): could use internal asm sum routine. + var v float64 + for i := 0; i < s.mat.N; i++ { + v += s.mat.Data[i*s.mat.Stride+i] + } + return v +} + +// GrowSym returns the receiver expanded by n rows and n columns. If the +// dimensions of the expanded matrix are outside the capacity of the receiver +// a new allocation is made, otherwise not. Note that the receiver itself is +// not modified during the call to GrowSquare. +func (s *SymDense) GrowSym(n int) Symmetric { + if n < 0 { + panic(ErrIndexOutOfRange) + } + if n == 0 { + return s + } + var v SymDense + n += s.mat.N + if n > s.cap { + v.mat = blas64.Symmetric{ + N: n, + Stride: n, + Uplo: blas.Upper, + Data: make([]float64, n*n), + } + v.cap = n + // Copy elements, including those not currently visible. Use a temporary + // structure to avoid modifying the receiver. + var tmp SymDense + tmp.mat = blas64.Symmetric{ + N: s.cap, + Stride: s.mat.Stride, + Data: s.mat.Data, + Uplo: s.mat.Uplo, + } + tmp.cap = s.cap + v.CopySym(&tmp) + return &v + } + v.mat = blas64.Symmetric{ + N: n, + Stride: s.mat.Stride, + Uplo: blas.Upper, + Data: s.mat.Data[:(n-1)*s.mat.Stride+n], + } + v.cap = s.cap + return &v +} + +// PowPSD computes a^pow where a is a positive symmetric definite matrix. +// +// PowPSD returns an error if the matrix is not not positive symmetric definite +// or the Eigendecomposition is not successful. +func (s *SymDense) PowPSD(a Symmetric, pow float64) error { + dim := a.Symmetric() + s.reuseAs(dim) + + var eigen EigenSym + ok := eigen.Factorize(a, true) + if !ok { + return ErrFailedEigen + } + values := eigen.Values(nil) + for i, v := range values { + if v <= 0 { + return ErrNotPSD + } + values[i] = math.Pow(v, pow) + } + u := eigen.VectorsTo(nil) + + s.SymOuterK(values[0], u.ColView(0)) + + var v VecDense + for i := 1; i < dim; i++ { + v.ColViewOf(u, i) + s.SymRankOne(s, values[i], &v) + } + return nil +} diff --git a/vendor/gonum.org/v1/gonum/mat/triangular.go b/vendor/gonum.org/v1/gonum/mat/triangular.go new file mode 100644 index 0000000000..e32ee40549 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/triangular.go @@ -0,0 +1,659 @@ +// Copyright ©2015 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "math" + + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/lapack/lapack64" +) + +var ( + triDense *TriDense + _ Matrix = triDense + _ Triangular = triDense + _ RawTriangular = triDense + _ MutableTriangular = triDense + + _ NonZeroDoer = triDense + _ RowNonZeroDoer = triDense + _ ColNonZeroDoer = triDense +) + +const badTriCap = "mat: bad capacity for TriDense" + +// TriDense represents an upper or lower triangular matrix in dense storage +// format. +type TriDense struct { + mat blas64.Triangular + cap int +} + +// Triangular represents a triangular matrix. Triangular matrices are always square. +type Triangular interface { + Matrix + // Triangle returns the number of rows/columns in the matrix and its + // orientation. + Triangle() (n int, kind TriKind) + + // TTri is the equivalent of the T() method in the Matrix interface but + // guarantees the transpose is of triangular type. + TTri() Triangular +} + +// A RawTriangular can return a blas64.Triangular representation of the receiver. +// Changes to the blas64.Triangular.Data slice will be reflected in the original +// matrix, changes to the N, Stride, Uplo and Diag fields will not. +type RawTriangular interface { + RawTriangular() blas64.Triangular +} + +// A MutableTriangular can set elements of a triangular matrix. +type MutableTriangular interface { + Triangular + SetTri(i, j int, v float64) +} + +var ( + _ Matrix = TransposeTri{} + _ Triangular = TransposeTri{} + _ UntransposeTrier = TransposeTri{} +) + +// TransposeTri is a type for performing an implicit transpose of a Triangular +// matrix. It implements the Triangular interface, returning values from the +// transpose of the matrix within. +type TransposeTri struct { + Triangular Triangular +} + +// At returns the value of the element at row i and column j of the transposed +// matrix, that is, row j and column i of the Triangular field. +func (t TransposeTri) At(i, j int) float64 { + return t.Triangular.At(j, i) +} + +// Dims returns the dimensions of the transposed matrix. Triangular matrices are +// square and thus this is the same size as the original Triangular. +func (t TransposeTri) Dims() (r, c int) { + c, r = t.Triangular.Dims() + return r, c +} + +// T performs an implicit transpose by returning the Triangular field. +func (t TransposeTri) T() Matrix { + return t.Triangular +} + +// Triangle returns the number of rows/columns in the matrix and its orientation. +func (t TransposeTri) Triangle() (int, TriKind) { + n, upper := t.Triangular.Triangle() + return n, !upper +} + +// TTri performs an implicit transpose by returning the Triangular field. +func (t TransposeTri) TTri() Triangular { + return t.Triangular +} + +// Untranspose returns the Triangular field. +func (t TransposeTri) Untranspose() Matrix { + return t.Triangular +} + +func (t TransposeTri) UntransposeTri() Triangular { + return t.Triangular +} + +// NewTriDense creates a new Triangular matrix with n rows and columns. If data == nil, +// a new slice is allocated for the backing slice. If len(data) == n*n, data is +// used as the backing slice, and changes to the elements of the returned TriDense +// will be reflected in data. If neither of these is true, NewTriDense will panic. +// NewTriDense will panic if n is zero. +// +// The data must be arranged in row-major order, i.e. the (i*c + j)-th +// element in the data slice is the {i, j}-th element in the matrix. +// Only the values in the triangular portion corresponding to kind are used. +func NewTriDense(n int, kind TriKind, data []float64) *TriDense { + if n <= 0 { + if n == 0 { + panic(ErrZeroLength) + } + panic("mat: negative dimension") + } + if data != nil && len(data) != n*n { + panic(ErrShape) + } + if data == nil { + data = make([]float64, n*n) + } + uplo := blas.Lower + if kind == Upper { + uplo = blas.Upper + } + return &TriDense{ + mat: blas64.Triangular{ + N: n, + Stride: n, + Data: data, + Uplo: uplo, + Diag: blas.NonUnit, + }, + cap: n, + } +} + +func (t *TriDense) Dims() (r, c int) { + return t.mat.N, t.mat.N +} + +// Triangle returns the dimension of t and its orientation. The returned +// orientation is only valid when n is not zero. +func (t *TriDense) Triangle() (n int, kind TriKind) { + return t.mat.N, t.triKind() +} + +func (t *TriDense) isUpper() bool { + return isUpperUplo(t.mat.Uplo) +} + +func (t *TriDense) triKind() TriKind { + return TriKind(isUpperUplo(t.mat.Uplo)) +} + +func isUpperUplo(u blas.Uplo) bool { + switch u { + case blas.Upper: + return true + case blas.Lower: + return false + default: + panic(badTriangle) + } +} + +func uploToTriKind(u blas.Uplo) TriKind { + switch u { + case blas.Upper: + return Upper + case blas.Lower: + return Lower + default: + panic(badTriangle) + } +} + +// asSymBlas returns the receiver restructured as a blas64.Symmetric with the +// same backing memory. Panics if the receiver is unit. +// This returns a blas64.Symmetric and not a *SymDense because SymDense can only +// be upper triangular. +func (t *TriDense) asSymBlas() blas64.Symmetric { + if t.mat.Diag == blas.Unit { + panic("mat: cannot convert unit TriDense into blas64.Symmetric") + } + return blas64.Symmetric{ + N: t.mat.N, + Stride: t.mat.Stride, + Data: t.mat.Data, + Uplo: t.mat.Uplo, + } +} + +// T performs an implicit transpose by returning the receiver inside a Transpose. +func (t *TriDense) T() Matrix { + return Transpose{t} +} + +// TTri performs an implicit transpose by returning the receiver inside a TransposeTri. +func (t *TriDense) TTri() Triangular { + return TransposeTri{t} +} + +func (t *TriDense) RawTriangular() blas64.Triangular { + return t.mat +} + +// SetRawTriangular sets the underlying blas64.Triangular used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in the input. +// +// The supplied Triangular must not use blas.Unit storage format. +func (t *TriDense) SetRawTriangular(mat blas64.Triangular) { + if mat.Diag == blas.Unit { + panic("mat: cannot set TriDense with Unit storage format") + } + t.mat = mat +} + +// Reset zeros the dimensions of the matrix so that it can be reused as the +// receiver of a dimensionally restricted operation. +// +// See the Reseter interface for more information. +func (t *TriDense) Reset() { + // N and Stride must be zeroed in unison. + t.mat.N, t.mat.Stride = 0, 0 + // Defensively zero Uplo to ensure + // it is set correctly later. + t.mat.Uplo = 0 + t.mat.Data = t.mat.Data[:0] +} + +// Zero sets all of the matrix elements to zero. +func (t *TriDense) Zero() { + if t.isUpper() { + for i := 0; i < t.mat.N; i++ { + zero(t.mat.Data[i*t.mat.Stride+i : i*t.mat.Stride+t.mat.N]) + } + return + } + for i := 0; i < t.mat.N; i++ { + zero(t.mat.Data[i*t.mat.Stride : i*t.mat.Stride+i+1]) + } +} + +// IsZero returns whether the receiver is zero-sized. Zero-sized matrices can be the +// receiver for size-restricted operations. TriDense matrices can be zeroed using Reset. +func (t *TriDense) IsZero() bool { + // It must be the case that t.Dims() returns + // zeros in this case. See comment in Reset(). + return t.mat.Stride == 0 +} + +// untranspose untransposes a matrix if applicable. If a is an Untransposer, then +// untranspose returns the underlying matrix and true. If it is not, then it returns +// the input matrix and false. +func untransposeTri(a Triangular) (Triangular, bool) { + if ut, ok := a.(UntransposeTrier); ok { + return ut.UntransposeTri(), true + } + return a, false +} + +// reuseAs resizes a zero receiver to an n×n triangular matrix with the given +// orientation. If the receiver is non-zero, reuseAs checks that the receiver +// is the correct size and orientation. +func (t *TriDense) reuseAs(n int, kind TriKind) { + if n == 0 { + panic(ErrZeroLength) + } + ul := blas.Lower + if kind == Upper { + ul = blas.Upper + } + if t.mat.N > t.cap { + panic(badTriCap) + } + if t.IsZero() { + t.mat = blas64.Triangular{ + N: n, + Stride: n, + Diag: blas.NonUnit, + Data: use(t.mat.Data, n*n), + Uplo: ul, + } + t.cap = n + return + } + if t.mat.N != n { + panic(ErrShape) + } + if t.mat.Uplo != ul { + panic(ErrTriangle) + } +} + +// isolatedWorkspace returns a new TriDense matrix w with the size of a and +// returns a callback to defer which performs cleanup at the return of the call. +// This should be used when a method receiver is the same pointer as an input argument. +func (t *TriDense) isolatedWorkspace(a Triangular) (w *TriDense, restore func()) { + n, kind := a.Triangle() + if n == 0 { + panic(ErrZeroLength) + } + w = getWorkspaceTri(n, kind, false) + return w, func() { + t.Copy(w) + putWorkspaceTri(w) + } +} + +// DiagView returns the diagonal as a matrix backed by the original data. +func (t *TriDense) DiagView() Diagonal { + if t.mat.Diag == blas.Unit { + panic("mat: cannot take view of Unit diagonal") + } + n := t.mat.N + return &DiagDense{ + mat: blas64.Vector{ + N: n, + Inc: t.mat.Stride + 1, + Data: t.mat.Data[:(n-1)*t.mat.Stride+n], + }, + } +} + +// Copy makes a copy of elements of a into the receiver. It is similar to the +// built-in copy; it copies as much as the overlap between the two matrices and +// returns the number of rows and columns it copied. Only elements within the +// receiver's non-zero triangle are set. +// +// See the Copier interface for more information. +func (t *TriDense) Copy(a Matrix) (r, c int) { + r, c = a.Dims() + r = min(r, t.mat.N) + c = min(c, t.mat.N) + if r == 0 || c == 0 { + return 0, 0 + } + + switch a := a.(type) { + case RawMatrixer: + amat := a.RawMatrix() + if t.isUpper() { + for i := 0; i < r; i++ { + copy(t.mat.Data[i*t.mat.Stride+i:i*t.mat.Stride+c], amat.Data[i*amat.Stride+i:i*amat.Stride+c]) + } + } else { + for i := 0; i < r; i++ { + copy(t.mat.Data[i*t.mat.Stride:i*t.mat.Stride+i+1], amat.Data[i*amat.Stride:i*amat.Stride+i+1]) + } + } + case RawTriangular: + amat := a.RawTriangular() + aIsUpper := isUpperUplo(amat.Uplo) + tIsUpper := t.isUpper() + switch { + case tIsUpper && aIsUpper: + for i := 0; i < r; i++ { + copy(t.mat.Data[i*t.mat.Stride+i:i*t.mat.Stride+c], amat.Data[i*amat.Stride+i:i*amat.Stride+c]) + } + case !tIsUpper && !aIsUpper: + for i := 0; i < r; i++ { + copy(t.mat.Data[i*t.mat.Stride:i*t.mat.Stride+i+1], amat.Data[i*amat.Stride:i*amat.Stride+i+1]) + } + default: + for i := 0; i < r; i++ { + t.set(i, i, amat.Data[i*amat.Stride+i]) + } + } + default: + isUpper := t.isUpper() + for i := 0; i < r; i++ { + if isUpper { + for j := i; j < c; j++ { + t.set(i, j, a.At(i, j)) + } + } else { + for j := 0; j <= i; j++ { + t.set(i, j, a.At(i, j)) + } + } + } + } + + return r, c +} + +// InverseTri computes the inverse of the triangular matrix a, storing the result +// into the receiver. If a is ill-conditioned, a Condition error will be returned. +// Note that matrix inversion is numerically unstable, and should generally be +// avoided where possible, for example by using the Solve routines. +func (t *TriDense) InverseTri(a Triangular) error { + t.checkOverlapMatrix(a) + n, _ := a.Triangle() + t.reuseAs(a.Triangle()) + t.Copy(a) + work := getFloats(3*n, false) + iwork := getInts(n, false) + cond := lapack64.Trcon(CondNorm, t.mat, work, iwork) + putFloats(work) + putInts(iwork) + if math.IsInf(cond, 1) { + return Condition(cond) + } + ok := lapack64.Trtri(t.mat) + if !ok { + return Condition(math.Inf(1)) + } + if cond > ConditionTolerance { + return Condition(cond) + } + return nil +} + +// MulTri takes the product of triangular matrices a and b and places the result +// in the receiver. The size of a and b must match, and they both must have the +// same TriKind, or Mul will panic. +func (t *TriDense) MulTri(a, b Triangular) { + n, kind := a.Triangle() + nb, kindb := b.Triangle() + if n != nb { + panic(ErrShape) + } + if kind != kindb { + panic(ErrTriangle) + } + + aU, _ := untransposeTri(a) + bU, _ := untransposeTri(b) + t.checkOverlapMatrix(bU) + t.checkOverlapMatrix(aU) + t.reuseAs(n, kind) + var restore func() + if t == aU { + t, restore = t.isolatedWorkspace(aU) + defer restore() + } else if t == bU { + t, restore = t.isolatedWorkspace(bU) + defer restore() + } + + // TODO(btracey): Improve the set of fast-paths. + if kind == Upper { + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + var v float64 + for k := i; k <= j; k++ { + v += a.At(i, k) * b.At(k, j) + } + t.SetTri(i, j, v) + } + } + return + } + for i := 0; i < n; i++ { + for j := 0; j <= i; j++ { + var v float64 + for k := j; k <= i; k++ { + v += a.At(i, k) * b.At(k, j) + } + t.SetTri(i, j, v) + } + } +} + +// ScaleTri multiplies the elements of a by f, placing the result in the receiver. +// If the receiver is non-zero, the size and kind of the receiver must match +// the input, or ScaleTri will panic. +func (t *TriDense) ScaleTri(f float64, a Triangular) { + n, kind := a.Triangle() + t.reuseAs(n, kind) + + // TODO(btracey): Improve the set of fast-paths. + switch a := a.(type) { + case RawTriangular: + amat := a.RawTriangular() + if t != a { + t.checkOverlap(generalFromTriangular(amat)) + } + if kind == Upper { + for i := 0; i < n; i++ { + ts := t.mat.Data[i*t.mat.Stride+i : i*t.mat.Stride+n] + as := amat.Data[i*amat.Stride+i : i*amat.Stride+n] + for i, v := range as { + ts[i] = v * f + } + } + return + } + for i := 0; i < n; i++ { + ts := t.mat.Data[i*t.mat.Stride : i*t.mat.Stride+i+1] + as := amat.Data[i*amat.Stride : i*amat.Stride+i+1] + for i, v := range as { + ts[i] = v * f + } + } + return + default: + t.checkOverlapMatrix(a) + isUpper := kind == Upper + for i := 0; i < n; i++ { + if isUpper { + for j := i; j < n; j++ { + t.set(i, j, f*a.At(i, j)) + } + } else { + for j := 0; j <= i; j++ { + t.set(i, j, f*a.At(i, j)) + } + } + } + } +} + +// Trace returns the trace of the matrix. +func (t *TriDense) Trace() float64 { + // TODO(btracey): could use internal asm sum routine. + var v float64 + for i := 0; i < t.mat.N; i++ { + v += t.mat.Data[i*t.mat.Stride+i] + } + return v +} + +// copySymIntoTriangle copies a symmetric matrix into a TriDense +func copySymIntoTriangle(t *TriDense, s Symmetric) { + n, upper := t.Triangle() + ns := s.Symmetric() + if n != ns { + panic("mat: triangle size mismatch") + } + ts := t.mat.Stride + if rs, ok := s.(RawSymmetricer); ok { + sd := rs.RawSymmetric() + ss := sd.Stride + if upper { + if sd.Uplo == blas.Upper { + for i := 0; i < n; i++ { + copy(t.mat.Data[i*ts+i:i*ts+n], sd.Data[i*ss+i:i*ss+n]) + } + return + } + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + t.mat.Data[i*ts+j] = sd.Data[j*ss+i] + } + } + return + } + if sd.Uplo == blas.Upper { + for i := 0; i < n; i++ { + for j := 0; j <= i; j++ { + t.mat.Data[i*ts+j] = sd.Data[j*ss+i] + } + } + return + } + for i := 0; i < n; i++ { + copy(t.mat.Data[i*ts:i*ts+i+1], sd.Data[i*ss:i*ss+i+1]) + } + return + } + if upper { + for i := 0; i < n; i++ { + for j := i; j < n; j++ { + t.mat.Data[i*ts+j] = s.At(i, j) + } + } + return + } + for i := 0; i < n; i++ { + for j := 0; j <= i; j++ { + t.mat.Data[i*ts+j] = s.At(i, j) + } + } +} + +// DoNonZero calls the function fn for each of the non-zero elements of t. The function fn +// takes a row/column index and the element value of t at (i, j). +func (t *TriDense) DoNonZero(fn func(i, j int, v float64)) { + if t.isUpper() { + for i := 0; i < t.mat.N; i++ { + for j := i; j < t.mat.N; j++ { + v := t.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + } + return + } + for i := 0; i < t.mat.N; i++ { + for j := 0; j <= i; j++ { + v := t.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + } +} + +// DoRowNonZero calls the function fn for each of the non-zero elements of row i of t. The function fn +// takes a row/column index and the element value of t at (i, j). +func (t *TriDense) DoRowNonZero(i int, fn func(i, j int, v float64)) { + if i < 0 || t.mat.N <= i { + panic(ErrRowAccess) + } + if t.isUpper() { + for j := i; j < t.mat.N; j++ { + v := t.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + return + } + for j := 0; j <= i; j++ { + v := t.at(i, j) + if v != 0 { + fn(i, j, v) + } + } +} + +// DoColNonZero calls the function fn for each of the non-zero elements of column j of t. The function fn +// takes a row/column index and the element value of t at (i, j). +func (t *TriDense) DoColNonZero(j int, fn func(i, j int, v float64)) { + if j < 0 || t.mat.N <= j { + panic(ErrColAccess) + } + if t.isUpper() { + for i := 0; i <= j; i++ { + v := t.at(i, j) + if v != 0 { + fn(i, j, v) + } + } + return + } + for i := j; i < t.mat.N; i++ { + v := t.at(i, j) + if v != 0 { + fn(i, j, v) + } + } +} diff --git a/vendor/gonum.org/v1/gonum/mat/triband.go b/vendor/gonum.org/v1/gonum/mat/triband.go new file mode 100644 index 0000000000..f97855046e --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/triband.go @@ -0,0 +1,353 @@ +// Copyright ©2018 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" +) + +var ( + triBand TriBanded + _ Banded = triBand + _ Triangular = triBand + + triBandDense *TriBandDense + _ Matrix = triBandDense + _ Triangular = triBandDense + _ Banded = triBandDense + _ TriBanded = triBandDense + _ RawTriBander = triBandDense + _ MutableTriBanded = triBandDense +) + +// TriBanded is a triangular band matrix interface type. +type TriBanded interface { + Banded + + // Triangle returns the number of rows/columns in the matrix and its + // orientation. + Triangle() (n int, kind TriKind) + + // TTri is the equivalent of the T() method in the Matrix interface but + // guarantees the transpose is of triangular type. + TTri() Triangular + + // TriBand returns the number of rows/columns in the matrix, the + // size of the bandwidth, and the orientation. + TriBand() (n, k int, kind TriKind) + + // TTriBand is the equivalent of the T() method in the Matrix interface but + // guarantees the transpose is of banded triangular type. + TTriBand() TriBanded +} + +// A RawTriBander can return a blas64.TriangularBand representation of the receiver. +// Changes to the blas64.TriangularBand.Data slice will be reflected in the original +// matrix, changes to the N, K, Stride, Uplo and Diag fields will not. +type RawTriBander interface { + RawTriBand() blas64.TriangularBand +} + +// MutableTriBanded is a triangular band matrix interface type that allows +// elements to be altered. +type MutableTriBanded interface { + TriBanded + SetTriBand(i, j int, v float64) +} + +var ( + tTriBand TransposeTriBand + _ Matrix = tTriBand + _ TriBanded = tTriBand + _ Untransposer = tTriBand + _ UntransposeTrier = tTriBand + _ UntransposeBander = tTriBand + _ UntransposeTriBander = tTriBand +) + +// TransposeTriBand is a type for performing an implicit transpose of a TriBanded +// matrix. It implements the TriBanded interface, returning values from the +// transpose of the matrix within. +type TransposeTriBand struct { + TriBanded TriBanded +} + +// At returns the value of the element at row i and column j of the transposed +// matrix, that is, row j and column i of the TriBanded field. +func (t TransposeTriBand) At(i, j int) float64 { + return t.TriBanded.At(j, i) +} + +// Dims returns the dimensions of the transposed matrix. TriBanded matrices are +// square and thus this is the same size as the original TriBanded. +func (t TransposeTriBand) Dims() (r, c int) { + c, r = t.TriBanded.Dims() + return r, c +} + +// T performs an implicit transpose by returning the TriBand field. +func (t TransposeTriBand) T() Matrix { + return t.TriBanded +} + +// Triangle returns the number of rows/columns in the matrix and its orientation. +func (t TransposeTriBand) Triangle() (int, TriKind) { + n, upper := t.TriBanded.Triangle() + return n, !upper +} + +// TTri performs an implicit transpose by returning the TriBand field. +func (t TransposeTriBand) TTri() Triangular { + return t.TriBanded +} + +// Bandwidth returns the upper and lower bandwidths of the matrix. +func (t TransposeTriBand) Bandwidth() (kl, ku int) { + kl, ku = t.TriBanded.Bandwidth() + return ku, kl +} + +// TBand performs an implicit transpose by returning the TriBand field. +func (t TransposeTriBand) TBand() Banded { + return t.TriBanded +} + +// TriBand returns the number of rows/columns in the matrix, the +// size of the bandwidth, and the orientation. +func (t TransposeTriBand) TriBand() (n, k int, kind TriKind) { + n, k, kind = t.TriBanded.TriBand() + return n, k, !kind +} + +// TTriBand performs an implicit transpose by returning the TriBand field. +func (t TransposeTriBand) TTriBand() TriBanded { + return t.TriBanded +} + +// Untranspose returns the Triangular field. +func (t TransposeTriBand) Untranspose() Matrix { + return t.TriBanded +} + +// UntransposeTri returns the underlying Triangular matrix. +func (t TransposeTriBand) UntransposeTri() Triangular { + return t.TriBanded +} + +// UntransposeBand returns the underlying Banded matrix. +func (t TransposeTriBand) UntransposeBand() Banded { + return t.TriBanded +} + +// UntransposeTriBand returns the underlying TriBanded matrix. +func (t TransposeTriBand) UntransposeTriBand() TriBanded { + return t.TriBanded +} + +// TriBandDense represents a triangular band matrix in dense storage format. +type TriBandDense struct { + mat blas64.TriangularBand +} + +// NewTriBandDense creates a new triangular banded matrix with n rows and columns, +// k bands in the direction of the specified kind. If data == nil, +// a new slice is allocated for the backing slice. If len(data) == n*(k+1), +// data is used as the backing slice, and changes to the elements of the returned +// TriBandDense will be reflected in data. If neither of these is true, NewTriBandDense +// will panic. k must be at least zero and less than n, otherwise NewTriBandDense will panic. +// +// The data must be arranged in row-major order constructed by removing the zeros +// from the rows outside the band and aligning the diagonals. For example, if +// the upper-triangular banded matrix +// 1 2 3 0 0 0 +// 0 4 5 6 0 0 +// 0 0 7 8 9 0 +// 0 0 0 10 11 12 +// 0 0 0 0 13 14 +// 0 0 0 0 0 15 +// becomes (* entries are never accessed) +// 1 2 3 +// 4 5 6 +// 7 8 9 +// 10 11 12 +// 13 14 * +// 15 * * +// which is passed to NewTriBandDense as []float64{1, 2, ..., 15, *, *, *} +// with k=2 and kind = mat.Upper. +// The lower triangular banded matrix +// 1 0 0 0 0 0 +// 2 3 0 0 0 0 +// 4 5 6 0 0 0 +// 0 7 8 9 0 0 +// 0 0 10 11 12 0 +// 0 0 0 13 14 15 +// becomes (* entries are never accessed) +// * * 1 +// * 2 3 +// 4 5 6 +// 7 8 9 +// 10 11 12 +// 13 14 15 +// which is passed to NewTriBandDense as []float64{*, *, *, 1, 2, ..., 15} +// with k=2 and kind = mat.Lower. +// Only the values in the band portion of the matrix are used. +func NewTriBandDense(n, k int, kind TriKind, data []float64) *TriBandDense { + if n <= 0 || k < 0 { + if n == 0 { + panic(ErrZeroLength) + } + panic("mat: negative dimension") + } + if k+1 > n { + panic("mat: band out of range") + } + bc := k + 1 + if data != nil && len(data) != n*bc { + panic(ErrShape) + } + if data == nil { + data = make([]float64, n*bc) + } + uplo := blas.Lower + if kind { + uplo = blas.Upper + } + return &TriBandDense{ + mat: blas64.TriangularBand{ + Uplo: uplo, + Diag: blas.NonUnit, + N: n, + K: k, + Data: data, + Stride: bc, + }, + } +} + +// Dims returns the number of rows and columns in the matrix. +func (t *TriBandDense) Dims() (r, c int) { + return t.mat.N, t.mat.N +} + +// T performs an implicit transpose by returning the receiver inside a Transpose. +func (t *TriBandDense) T() Matrix { + return Transpose{t} +} + +// IsZero returns whether the receiver is zero-sized. Zero-sized matrices can be the +// receiver for size-restricted operations. TriBandDense matrices can be zeroed using Reset. +func (t *TriBandDense) IsZero() bool { + // It must be the case that t.Dims() returns + // zeros in this case. See comment in Reset(). + return t.mat.Stride == 0 +} + +// Reset zeros the dimensions of the matrix so that it can be reused as the +// receiver of a dimensionally restricted operation. +// +// See the Reseter interface for more information. +func (t *TriBandDense) Reset() { + t.mat.N = 0 + t.mat.Stride = 0 + t.mat.K = 0 + t.mat.Data = t.mat.Data[:0] +} + +// Zero sets all of the matrix elements to zero. +func (t *TriBandDense) Zero() { + if t.isUpper() { + for i := 0; i < t.mat.N; i++ { + u := min(1+t.mat.K, t.mat.N-i) + zero(t.mat.Data[i*t.mat.Stride : i*t.mat.Stride+u]) + } + return + } + for i := 0; i < t.mat.N; i++ { + l := max(0, t.mat.K-i) + zero(t.mat.Data[i*t.mat.Stride+l : i*t.mat.Stride+t.mat.K+1]) + } +} + +func (t *TriBandDense) isUpper() bool { + return isUpperUplo(t.mat.Uplo) +} + +func (t *TriBandDense) triKind() TriKind { + return TriKind(isUpperUplo(t.mat.Uplo)) +} + +// Triangle returns the dimension of t and its orientation. The returned +// orientation is only valid when n is not zero. +func (t *TriBandDense) Triangle() (n int, kind TriKind) { + return t.mat.N, t.triKind() +} + +// TTri performs an implicit transpose by returning the receiver inside a TransposeTri. +func (t *TriBandDense) TTri() Triangular { + return TransposeTri{t} +} + +// Bandwidth returns the upper and lower bandwidths of the matrix. +func (t *TriBandDense) Bandwidth() (kl, ku int) { + if t.isUpper() { + return 0, t.mat.K + } + return t.mat.K, 0 +} + +// TBand performs an implicit transpose by returning the receiver inside a TransposeBand. +func (t *TriBandDense) TBand() Banded { + return TransposeBand{t} +} + +// TriBand returns the number of rows/columns in the matrix, the +// size of the bandwidth, and the orientation. +func (t *TriBandDense) TriBand() (n, k int, kind TriKind) { + return t.mat.N, t.mat.K, TriKind(!t.IsZero()) && t.triKind() +} + +// TTriBand performs an implicit transpose by returning the receiver inside a TransposeTriBand. +func (t *TriBandDense) TTriBand() TriBanded { + return TransposeTriBand{t} +} + +// RawTriBand returns the underlying blas64.TriangularBand used by the receiver. +// Changes to the blas64.TriangularBand.Data slice will be reflected in the original +// matrix, changes to the N, K, Stride, Uplo and Diag fields will not. +func (t *TriBandDense) RawTriBand() blas64.TriangularBand { + return t.mat +} + +// SetRawTriBand sets the underlying blas64.TriangularBand used by the receiver. +// Changes to elements in the receiver following the call will be reflected +// in the input. +// +// The supplied TriangularBand must not use blas.Unit storage format. +func (t *TriBandDense) SetRawTriBand(mat blas64.TriangularBand) { + if mat.Diag == blas.Unit { + panic("mat: cannot set TriBand with Unit storage") + } + t.mat = mat +} + +// DiagView returns the diagonal as a matrix backed by the original data. +func (t *TriBandDense) DiagView() Diagonal { + if t.mat.Diag == blas.Unit { + panic("mat: cannot take view of Unit diagonal") + } + n := t.mat.N + data := t.mat.Data + if !t.isUpper() { + data = data[t.mat.K:] + } + return &DiagDense{ + mat: blas64.Vector{ + N: n, + Inc: t.mat.Stride, + Data: data[:(n-1)*t.mat.Stride+1], + }, + } +} diff --git a/vendor/gonum.org/v1/gonum/mat/vector.go b/vendor/gonum.org/v1/gonum/mat/vector.go new file mode 100644 index 0000000000..8191312bfe --- /dev/null +++ b/vendor/gonum.org/v1/gonum/mat/vector.go @@ -0,0 +1,741 @@ +// Copyright ©2013 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mat + +import ( + "gonum.org/v1/gonum/blas" + "gonum.org/v1/gonum/blas/blas64" + "gonum.org/v1/gonum/internal/asm/f64" +) + +var ( + vector *VecDense + + _ Matrix = vector + _ Vector = vector + _ Reseter = vector +) + +// Vector is a vector. +type Vector interface { + Matrix + AtVec(int) float64 + Len() int +} + +// TransposeVec is a type for performing an implicit transpose of a Vector. +// It implements the Vector interface, returning values from the transpose +// of the vector within. +type TransposeVec struct { + Vector Vector +} + +// At returns the value of the element at row i and column j of the transposed +// matrix, that is, row j and column i of the Vector field. +func (t TransposeVec) At(i, j int) float64 { + return t.Vector.At(j, i) +} + +// AtVec returns the element at position i. It panics if i is out of bounds. +func (t TransposeVec) AtVec(i int) float64 { + return t.Vector.AtVec(i) +} + +// Dims returns the dimensions of the transposed vector. +func (t TransposeVec) Dims() (r, c int) { + c, r = t.Vector.Dims() + return r, c +} + +// T performs an implicit transpose by returning the Vector field. +func (t TransposeVec) T() Matrix { + return t.Vector +} + +// Len returns the number of columns in the vector. +func (t TransposeVec) Len() int { + return t.Vector.Len() +} + +// TVec performs an implicit transpose by returning the Vector field. +func (t TransposeVec) TVec() Vector { + return t.Vector +} + +// Untranspose returns the Vector field. +func (t TransposeVec) Untranspose() Matrix { + return t.Vector +} + +func (t TransposeVec) UntransposeVec() Vector { + return t.Vector +} + +// VecDense represents a column vector. +type VecDense struct { + mat blas64.Vector + // A BLAS vector can have a negative increment, but allowing this + // in the mat type complicates a lot of code, and doesn't gain anything. + // VecDense must have positive increment in this package. +} + +// NewVecDense creates a new VecDense of length n. If data == nil, +// a new slice is allocated for the backing slice. If len(data) == n, data is +// used as the backing slice, and changes to the elements of the returned VecDense +// will be reflected in data. If neither of these is true, NewVecDense will panic. +// NewVecDense will panic if n is zero. +func NewVecDense(n int, data []float64) *VecDense { + if n <= 0 { + if n == 0 { + panic(ErrZeroLength) + } + panic("mat: negative dimension") + } + if len(data) != n && data != nil { + panic(ErrShape) + } + if data == nil { + data = make([]float64, n) + } + return &VecDense{ + mat: blas64.Vector{ + N: n, + Inc: 1, + Data: data, + }, + } +} + +// SliceVec returns a new Vector that shares backing data with the receiver. +// The returned matrix starts at i of the receiver and extends k-i elements. +// SliceVec panics with ErrIndexOutOfRange if the slice is outside the capacity +// of the receiver. +func (v *VecDense) SliceVec(i, k int) Vector { + if i < 0 || k <= i || v.Cap() < k { + panic(ErrIndexOutOfRange) + } + return &VecDense{ + mat: blas64.Vector{ + N: k - i, + Inc: v.mat.Inc, + Data: v.mat.Data[i*v.mat.Inc : (k-1)*v.mat.Inc+1], + }, + } +} + +// Dims returns the number of rows and columns in the matrix. Columns is always 1 +// for a non-Reset vector. +func (v *VecDense) Dims() (r, c int) { + if v.IsZero() { + return 0, 0 + } + return v.mat.N, 1 +} + +// Caps returns the number of rows and columns in the backing matrix. Columns is always 1 +// for a non-Reset vector. +func (v *VecDense) Caps() (r, c int) { + if v.IsZero() { + return 0, 0 + } + return v.Cap(), 1 +} + +// Len returns the length of the vector. +func (v *VecDense) Len() int { + return v.mat.N +} + +// Cap returns the capacity of the vector. +func (v *VecDense) Cap() int { + if v.IsZero() { + return 0 + } + return (cap(v.mat.Data)-1)/v.mat.Inc + 1 +} + +// T performs an implicit transpose by returning the receiver inside a Transpose. +func (v *VecDense) T() Matrix { + return Transpose{v} +} + +// TVec performs an implicit transpose by returning the receiver inside a TransposeVec. +func (v *VecDense) TVec() Vector { + return TransposeVec{v} +} + +// Reset zeros the length of the vector so that it can be reused as the +// receiver of a dimensionally restricted operation. +// +// See the Reseter interface for more information. +func (v *VecDense) Reset() { + // No change of Inc or N to 0 may be + // made unless both are set to 0. + v.mat.Inc = 0 + v.mat.N = 0 + v.mat.Data = v.mat.Data[:0] +} + +// Zero sets all of the matrix elements to zero. +func (v *VecDense) Zero() { + for i := 0; i < v.mat.N; i++ { + v.mat.Data[v.mat.Inc*i] = 0 + } +} + +// CloneVec makes a copy of a into the receiver, overwriting the previous value +// of the receiver. +func (v *VecDense) CloneVec(a Vector) { + if v == a { + return + } + n := a.Len() + v.mat = blas64.Vector{ + N: n, + Inc: 1, + Data: use(v.mat.Data, n), + } + if r, ok := a.(RawVectorer); ok { + blas64.Copy(r.RawVector(), v.mat) + return + } + for i := 0; i < a.Len(); i++ { + v.SetVec(i, a.AtVec(i)) + } +} + +// VecDenseCopyOf returns a newly allocated copy of the elements of a. +func VecDenseCopyOf(a Vector) *VecDense { + v := &VecDense{} + v.CloneVec(a) + return v +} + +func (v *VecDense) RawVector() blas64.Vector { + return v.mat +} + +// CopyVec makes a copy of elements of a into the receiver. It is similar to the +// built-in copy; it copies as much as the overlap between the two vectors and +// returns the number of elements it copied. +func (v *VecDense) CopyVec(a Vector) int { + n := min(v.Len(), a.Len()) + if v == a { + return n + } + if r, ok := a.(RawVectorer); ok { + blas64.Copy(r.RawVector(), v.mat) + return n + } + for i := 0; i < n; i++ { + v.setVec(i, a.AtVec(i)) + } + return n +} + +// ScaleVec scales the vector a by alpha, placing the result in the receiver. +func (v *VecDense) ScaleVec(alpha float64, a Vector) { + n := a.Len() + + if v == a { + if v.mat.Inc == 1 { + f64.ScalUnitary(alpha, v.mat.Data) + return + } + f64.ScalInc(alpha, v.mat.Data, uintptr(n), uintptr(v.mat.Inc)) + return + } + + v.reuseAs(n) + + if rv, ok := a.(RawVectorer); ok { + mat := rv.RawVector() + v.checkOverlap(mat) + if v.mat.Inc == 1 && mat.Inc == 1 { + f64.ScalUnitaryTo(v.mat.Data, alpha, mat.Data) + return + } + f64.ScalIncTo(v.mat.Data, uintptr(v.mat.Inc), + alpha, mat.Data, uintptr(n), uintptr(mat.Inc)) + return + } + + for i := 0; i < n; i++ { + v.setVec(i, alpha*a.AtVec(i)) + } +} + +// AddScaledVec adds the vectors a and alpha*b, placing the result in the receiver. +func (v *VecDense) AddScaledVec(a Vector, alpha float64, b Vector) { + if alpha == 1 { + v.AddVec(a, b) + return + } + if alpha == -1 { + v.SubVec(a, b) + return + } + + ar := a.Len() + br := b.Len() + + if ar != br { + panic(ErrShape) + } + + var amat, bmat blas64.Vector + fast := true + aU, _ := untranspose(a) + if rv, ok := aU.(RawVectorer); ok { + amat = rv.RawVector() + if v != a { + v.checkOverlap(amat) + } + } else { + fast = false + } + bU, _ := untranspose(b) + if rv, ok := bU.(RawVectorer); ok { + bmat = rv.RawVector() + if v != b { + v.checkOverlap(bmat) + } + } else { + fast = false + } + + v.reuseAs(ar) + + switch { + case alpha == 0: // v <- a + if v == a { + return + } + v.CopyVec(a) + case v == a && v == b: // v <- v + alpha * v = (alpha + 1) * v + blas64.Scal(alpha+1, v.mat) + case !fast: // v <- a + alpha * b without blas64 support. + for i := 0; i < ar; i++ { + v.setVec(i, a.AtVec(i)+alpha*b.AtVec(i)) + } + case v == a && v != b: // v <- v + alpha * b + if v.mat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + f64.AxpyUnitaryTo(v.mat.Data, alpha, bmat.Data, amat.Data) + } else { + f64.AxpyInc(alpha, bmat.Data, v.mat.Data, + uintptr(ar), uintptr(bmat.Inc), uintptr(v.mat.Inc), 0, 0) + } + default: // v <- a + alpha * b or v <- a + alpha * v + if v.mat.Inc == 1 && amat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + f64.AxpyUnitaryTo(v.mat.Data, alpha, bmat.Data, amat.Data) + } else { + f64.AxpyIncTo(v.mat.Data, uintptr(v.mat.Inc), 0, + alpha, bmat.Data, amat.Data, + uintptr(ar), uintptr(bmat.Inc), uintptr(amat.Inc), 0, 0) + } + } +} + +// AddVec adds the vectors a and b, placing the result in the receiver. +func (v *VecDense) AddVec(a, b Vector) { + ar := a.Len() + br := b.Len() + + if ar != br { + panic(ErrShape) + } + + v.reuseAs(ar) + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + + if arv, ok := aU.(RawVectorer); ok { + if brv, ok := bU.(RawVectorer); ok { + amat := arv.RawVector() + bmat := brv.RawVector() + + if v != a { + v.checkOverlap(amat) + } + if v != b { + v.checkOverlap(bmat) + } + + if v.mat.Inc == 1 && amat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + f64.AxpyUnitaryTo(v.mat.Data, 1, bmat.Data, amat.Data) + return + } + f64.AxpyIncTo(v.mat.Data, uintptr(v.mat.Inc), 0, + 1, bmat.Data, amat.Data, + uintptr(ar), uintptr(bmat.Inc), uintptr(amat.Inc), 0, 0) + return + } + } + + for i := 0; i < ar; i++ { + v.setVec(i, a.AtVec(i)+b.AtVec(i)) + } +} + +// SubVec subtracts the vector b from a, placing the result in the receiver. +func (v *VecDense) SubVec(a, b Vector) { + ar := a.Len() + br := b.Len() + + if ar != br { + panic(ErrShape) + } + + v.reuseAs(ar) + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + + if arv, ok := aU.(RawVectorer); ok { + if brv, ok := bU.(RawVectorer); ok { + amat := arv.RawVector() + bmat := brv.RawVector() + + if v != a { + v.checkOverlap(amat) + } + if v != b { + v.checkOverlap(bmat) + } + + if v.mat.Inc == 1 && amat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + f64.AxpyUnitaryTo(v.mat.Data, -1, bmat.Data, amat.Data) + return + } + f64.AxpyIncTo(v.mat.Data, uintptr(v.mat.Inc), 0, + -1, bmat.Data, amat.Data, + uintptr(ar), uintptr(bmat.Inc), uintptr(amat.Inc), 0, 0) + return + } + } + + for i := 0; i < ar; i++ { + v.setVec(i, a.AtVec(i)-b.AtVec(i)) + } +} + +// MulElemVec performs element-wise multiplication of a and b, placing the result +// in the receiver. +func (v *VecDense) MulElemVec(a, b Vector) { + ar := a.Len() + br := b.Len() + + if ar != br { + panic(ErrShape) + } + + v.reuseAs(ar) + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + + if arv, ok := aU.(RawVectorer); ok { + if brv, ok := bU.(RawVectorer); ok { + amat := arv.RawVector() + bmat := brv.RawVector() + + if v != a { + v.checkOverlap(amat) + } + if v != b { + v.checkOverlap(bmat) + } + + if v.mat.Inc == 1 && amat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + for i, a := range amat.Data { + v.mat.Data[i] = a * bmat.Data[i] + } + return + } + var ia, ib int + for i := 0; i < ar; i++ { + v.setVec(i, amat.Data[ia]*bmat.Data[ib]) + ia += amat.Inc + ib += bmat.Inc + } + return + } + } + + for i := 0; i < ar; i++ { + v.setVec(i, a.AtVec(i)*b.AtVec(i)) + } +} + +// DivElemVec performs element-wise division of a by b, placing the result +// in the receiver. +func (v *VecDense) DivElemVec(a, b Vector) { + ar := a.Len() + br := b.Len() + + if ar != br { + panic(ErrShape) + } + + v.reuseAs(ar) + + aU, _ := untranspose(a) + bU, _ := untranspose(b) + + if arv, ok := aU.(RawVectorer); ok { + if brv, ok := bU.(RawVectorer); ok { + amat := arv.RawVector() + bmat := brv.RawVector() + + if v != a { + v.checkOverlap(amat) + } + if v != b { + v.checkOverlap(bmat) + } + + if v.mat.Inc == 1 && amat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + for i, a := range amat.Data { + v.setVec(i, a/bmat.Data[i]) + } + return + } + var ia, ib int + for i := 0; i < ar; i++ { + v.setVec(i, amat.Data[ia]/bmat.Data[ib]) + ia += amat.Inc + ib += bmat.Inc + } + } + } + + for i := 0; i < ar; i++ { + v.setVec(i, a.AtVec(i)/b.AtVec(i)) + } +} + +// MulVec computes a * b. The result is stored into the receiver. +// MulVec panics if the number of columns in a does not equal the number of rows in b +// or if the number of columns in b does not equal 1. +func (v *VecDense) MulVec(a Matrix, b Vector) { + r, c := a.Dims() + br, bc := b.Dims() + if c != br || bc != 1 { + panic(ErrShape) + } + + aU, trans := untranspose(a) + var bmat blas64.Vector + fast := true + bU, _ := untranspose(b) + if rv, ok := bU.(RawVectorer); ok { + bmat = rv.RawVector() + if v != b { + v.checkOverlap(bmat) + } + } else { + fast = false + } + + v.reuseAs(r) + var restore func() + if v == aU { + v, restore = v.isolatedWorkspace(aU.(*VecDense)) + defer restore() + } else if v == b { + v, restore = v.isolatedWorkspace(b) + defer restore() + } + + // TODO(kortschak): Improve the non-fast paths. + switch aU := aU.(type) { + case Vector: + if b.Len() == 1 { + // {n,1} x {1,1} + v.ScaleVec(b.AtVec(0), aU) + return + } + + // {1,n} x {n,1} + if fast { + if rv, ok := aU.(RawVectorer); ok { + amat := rv.RawVector() + if v != aU { + v.checkOverlap(amat) + } + + if amat.Inc == 1 && bmat.Inc == 1 { + // Fast path for a common case. + v.setVec(0, f64.DotUnitary(amat.Data, bmat.Data)) + return + } + v.setVec(0, f64.DotInc(amat.Data, bmat.Data, + uintptr(c), uintptr(amat.Inc), uintptr(bmat.Inc), 0, 0)) + return + } + } + var sum float64 + for i := 0; i < c; i++ { + sum += aU.AtVec(i) * b.AtVec(i) + } + v.setVec(0, sum) + return + case RawSymmetricer: + if fast { + amat := aU.RawSymmetric() + // We don't know that a is a *SymDense, so make + // a temporary SymDense to check overlap. + (&SymDense{mat: amat}).checkOverlap(v.asGeneral()) + blas64.Symv(1, amat, bmat, 0, v.mat) + return + } + case RawTriangular: + v.CopyVec(b) + amat := aU.RawTriangular() + // We don't know that a is a *TriDense, so make + // a temporary TriDense to check overlap. + (&TriDense{mat: amat}).checkOverlap(v.asGeneral()) + ta := blas.NoTrans + if trans { + ta = blas.Trans + } + blas64.Trmv(ta, amat, v.mat) + case RawMatrixer: + if fast { + amat := aU.RawMatrix() + // We don't know that a is a *Dense, so make + // a temporary Dense to check overlap. + (&Dense{mat: amat}).checkOverlap(v.asGeneral()) + t := blas.NoTrans + if trans { + t = blas.Trans + } + blas64.Gemv(t, 1, amat, bmat, 0, v.mat) + return + } + default: + if fast { + for i := 0; i < r; i++ { + var f float64 + for j := 0; j < c; j++ { + f += a.At(i, j) * bmat.Data[j*bmat.Inc] + } + v.setVec(i, f) + } + return + } + } + + for i := 0; i < r; i++ { + var f float64 + for j := 0; j < c; j++ { + f += a.At(i, j) * b.AtVec(j) + } + v.setVec(i, f) + } +} + +// reuseAs resizes an empty vector to a r×1 vector, +// or checks that a non-empty matrix is r×1. +func (v *VecDense) reuseAs(r int) { + if r == 0 { + panic(ErrZeroLength) + } + if v.IsZero() { + v.mat = blas64.Vector{ + N: r, + Inc: 1, + Data: use(v.mat.Data, r), + } + return + } + if r != v.mat.N { + panic(ErrShape) + } +} + +// IsZero returns whether the receiver is zero-sized. Zero-sized vectors can be the +// receiver for size-restricted operations. VecDenses can be zeroed using Reset. +func (v *VecDense) IsZero() bool { + // It must be the case that v.Dims() returns + // zeros in this case. See comment in Reset(). + return v.mat.Inc == 0 +} + +func (v *VecDense) isolatedWorkspace(a Vector) (n *VecDense, restore func()) { + l := a.Len() + if l == 0 { + panic(ErrZeroLength) + } + n = getWorkspaceVec(l, false) + return n, func() { + v.CopyVec(n) + putWorkspaceVec(n) + } +} + +// asDense returns a Dense representation of the receiver with the same +// underlying data. +func (v *VecDense) asDense() *Dense { + return &Dense{ + mat: v.asGeneral(), + capRows: v.mat.N, + capCols: 1, + } +} + +// asGeneral returns a blas64.General representation of the receiver with the +// same underlying data. +func (v *VecDense) asGeneral() blas64.General { + return blas64.General{ + Rows: v.mat.N, + Cols: 1, + Stride: v.mat.Inc, + Data: v.mat.Data, + } +} + +// ColViewOf reflects the column j of the RawMatrixer m, into the receiver +// backed by the same underlying data. The length of the receiver must either be +// zero or match the number of rows in m. +func (v *VecDense) ColViewOf(m RawMatrixer, j int) { + rm := m.RawMatrix() + + if j >= rm.Cols || j < 0 { + panic(ErrColAccess) + } + if !v.IsZero() && v.mat.N != rm.Rows { + panic(ErrShape) + } + + v.mat.Inc = rm.Stride + v.mat.Data = rm.Data[j : (rm.Rows-1)*rm.Stride+j+1] + v.mat.N = rm.Rows +} + +// RowViewOf reflects the row i of the RawMatrixer m, into the receiver +// backed by the same underlying data. The length of the receiver must either be +// zero or match the number of columns in m. +func (v *VecDense) RowViewOf(m RawMatrixer, i int) { + rm := m.RawMatrix() + + if i >= rm.Rows || i < 0 { + panic(ErrRowAccess) + } + if !v.IsZero() && v.mat.N != rm.Cols { + panic(ErrShape) + } + + v.mat.Inc = 1 + v.mat.Data = rm.Data[i*rm.Stride : i*rm.Stride+rm.Cols] + v.mat.N = rm.Cols +} diff --git a/vendor/k8s.io/client-go/discovery/fake/discovery.go b/vendor/k8s.io/client-go/discovery/fake/discovery.go new file mode 100644 index 0000000000..335473dd19 --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/fake/discovery.go @@ -0,0 +1,160 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "fmt" + + "github.com/googleapis/gnostic/OpenAPIv2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/version" + kubeversion "k8s.io/client-go/pkg/version" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/testing" +) + +// FakeDiscovery implements discovery.DiscoveryInterface and sometimes calls testing.Fake.Invoke with an action, +// but doesn't respect the return value if any. There is a way to fake static values like ServerVersion by using the Faked... fields on the struct. +type FakeDiscovery struct { + *testing.Fake + FakedServerVersion *version.Info +} + +// ServerResourcesForGroupVersion returns the supported resources for a group +// and version. +func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + action := testing.ActionImpl{ + Verb: "get", + Resource: schema.GroupVersionResource{Resource: "resource"}, + } + c.Invokes(action, nil) + for _, resourceList := range c.Resources { + if resourceList.GroupVersion == groupVersion { + return resourceList, nil + } + } + return nil, fmt.Errorf("GroupVersion %q not found", groupVersion) +} + +// ServerResources returns the supported resources for all groups and versions. +// Deprecated: use ServerGroupsAndResources instead. +func (c *FakeDiscovery) ServerResources() ([]*metav1.APIResourceList, error) { + _, rs, err := c.ServerGroupsAndResources() + return rs, err +} + +// ServerGroupsAndResources returns the supported groups and resources for all groups and versions. +func (c *FakeDiscovery) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + sgs, err := c.ServerGroups() + if err != nil { + return nil, nil, err + } + resultGroups := []*metav1.APIGroup{} + for i := range sgs.Groups { + resultGroups = append(resultGroups, &sgs.Groups[i]) + } + + action := testing.ActionImpl{ + Verb: "get", + Resource: schema.GroupVersionResource{Resource: "resource"}, + } + c.Invokes(action, nil) + return resultGroups, c.Resources, nil +} + +// ServerPreferredResources returns the supported resources with the version +// preferred by the server. +func (c *FakeDiscovery) ServerPreferredResources() ([]*metav1.APIResourceList, error) { + return nil, nil +} + +// ServerPreferredNamespacedResources returns the supported namespaced resources +// with the version preferred by the server. +func (c *FakeDiscovery) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { + return nil, nil +} + +// ServerGroups returns the supported groups, with information like supported +// versions and the preferred version. +func (c *FakeDiscovery) ServerGroups() (*metav1.APIGroupList, error) { + action := testing.ActionImpl{ + Verb: "get", + Resource: schema.GroupVersionResource{Resource: "group"}, + } + c.Invokes(action, nil) + + groups := map[string]*metav1.APIGroup{} + + for _, res := range c.Resources { + gv, err := schema.ParseGroupVersion(res.GroupVersion) + if err != nil { + return nil, err + } + group := groups[gv.Group] + if group == nil { + group = &metav1.APIGroup{ + Name: gv.Group, + PreferredVersion: metav1.GroupVersionForDiscovery{ + GroupVersion: res.GroupVersion, + Version: gv.Version, + }, + } + groups[gv.Group] = group + } + + group.Versions = append(group.Versions, metav1.GroupVersionForDiscovery{ + GroupVersion: res.GroupVersion, + Version: gv.Version, + }) + } + + list := &metav1.APIGroupList{} + for _, apiGroup := range groups { + list.Groups = append(list.Groups, *apiGroup) + } + + return list, nil + +} + +// ServerVersion retrieves and parses the server's version. +func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { + action := testing.ActionImpl{} + action.Verb = "get" + action.Resource = schema.GroupVersionResource{Resource: "version"} + c.Invokes(action, nil) + + if c.FakedServerVersion != nil { + return c.FakedServerVersion, nil + } + + versionInfo := kubeversion.Get() + return &versionInfo, nil +} + +// OpenAPISchema retrieves and parses the swagger API schema the server supports. +func (c *FakeDiscovery) OpenAPISchema() (*openapi_v2.Document, error) { + return &openapi_v2.Document{}, nil +} + +// RESTClient returns a RESTClient that is used to communicate with API server +// by this client implementation. +func (c *FakeDiscovery) RESTClient() restclient.Interface { + return nil +} diff --git a/vendor/k8s.io/client-go/testing/actions.go b/vendor/k8s.io/client-go/testing/actions.go new file mode 100644 index 0000000000..f56b34ee87 --- /dev/null +++ b/vendor/k8s.io/client-go/testing/actions.go @@ -0,0 +1,671 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "path" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +func NewRootGetAction(resource schema.GroupVersionResource, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Name = name + + return action +} + +func NewGetAction(resource schema.GroupVersionResource, namespace, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewGetSubresourceAction(resource schema.GroupVersionResource, namespace, subresource, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Subresource = subresource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewRootGetSubresourceAction(resource schema.GroupVersionResource, subresource, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Subresource = subresource + action.Name = name + + return action +} + +func NewRootListAction(resource schema.GroupVersionResource, kind schema.GroupVersionKind, opts interface{}) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + action.Kind = kind + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewListAction(resource schema.GroupVersionResource, kind schema.GroupVersionKind, namespace string, opts interface{}) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + action.Kind = kind + action.Namespace = namespace + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewRootCreateAction(resource schema.GroupVersionResource, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Object = object + + return action +} + +func NewCreateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource string, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Subresource = subresource + action.Name = name + action.Object = object + + return action +} + +func NewCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource, namespace string, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Namespace = namespace + action.Subresource = subresource + action.Name = name + action.Object = object + + return action +} + +func NewRootUpdateAction(resource schema.GroupVersionResource, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Object = object + + return action +} + +func NewUpdateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootPatchAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Name = name + action.PatchType = pt + action.Patch = patch + + return action +} + +func NewPatchAction(resource schema.GroupVersionResource, namespace string, name string, pt types.PatchType, patch []byte) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Namespace = namespace + action.Name = name + action.PatchType = pt + action.Patch = patch + + return action +} + +func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Subresource = path.Join(subresources...) + action.Name = name + action.PatchType = pt + action.Patch = patch + + return action +} + +func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Subresource = path.Join(subresources...) + action.Namespace = namespace + action.Name = name + action.PatchType = pt + action.Patch = patch + + return action +} + +func NewRootUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Subresource = subresource + action.Object = object + + return action +} +func NewUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, namespace string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Subresource = subresource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootDeleteAction(resource schema.GroupVersionResource, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Name = name + + return action +} + +func NewRootDeleteSubresourceAction(resource schema.GroupVersionResource, subresource string, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Subresource = subresource + action.Name = name + + return action +} + +func NewDeleteAction(resource schema.GroupVersionResource, namespace, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewDeleteSubresourceAction(resource schema.GroupVersionResource, subresource, namespace, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Subresource = subresource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewRootDeleteCollectionAction(resource schema.GroupVersionResource, opts interface{}) DeleteCollectionActionImpl { + action := DeleteCollectionActionImpl{} + action.Verb = "delete-collection" + action.Resource = resource + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewDeleteCollectionAction(resource schema.GroupVersionResource, namespace string, opts interface{}) DeleteCollectionActionImpl { + action := DeleteCollectionActionImpl{} + action.Verb = "delete-collection" + action.Resource = resource + action.Namespace = namespace + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewRootWatchAction(resource schema.GroupVersionResource, opts interface{}) WatchActionImpl { + action := WatchActionImpl{} + action.Verb = "watch" + action.Resource = resource + labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) + action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} + + return action +} + +func ExtractFromListOptions(opts interface{}) (labelSelector labels.Selector, fieldSelector fields.Selector, resourceVersion string) { + var err error + switch t := opts.(type) { + case metav1.ListOptions: + labelSelector, err = labels.Parse(t.LabelSelector) + if err != nil { + panic(fmt.Errorf("invalid selector %q: %v", t.LabelSelector, err)) + } + fieldSelector, err = fields.ParseSelector(t.FieldSelector) + if err != nil { + panic(fmt.Errorf("invalid selector %q: %v", t.FieldSelector, err)) + } + resourceVersion = t.ResourceVersion + default: + panic(fmt.Errorf("expect a ListOptions %T", opts)) + } + if labelSelector == nil { + labelSelector = labels.Everything() + } + if fieldSelector == nil { + fieldSelector = fields.Everything() + } + return labelSelector, fieldSelector, resourceVersion +} + +func NewWatchAction(resource schema.GroupVersionResource, namespace string, opts interface{}) WatchActionImpl { + action := WatchActionImpl{} + action.Verb = "watch" + action.Resource = resource + action.Namespace = namespace + labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) + action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} + + return action +} + +func NewProxyGetAction(resource schema.GroupVersionResource, namespace, scheme, name, port, path string, params map[string]string) ProxyGetActionImpl { + action := ProxyGetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Namespace = namespace + action.Scheme = scheme + action.Name = name + action.Port = port + action.Path = path + action.Params = params + return action +} + +type ListRestrictions struct { + Labels labels.Selector + Fields fields.Selector +} +type WatchRestrictions struct { + Labels labels.Selector + Fields fields.Selector + ResourceVersion string +} + +type Action interface { + GetNamespace() string + GetVerb() string + GetResource() schema.GroupVersionResource + GetSubresource() string + Matches(verb, resource string) bool + + // DeepCopy is used to copy an action to avoid any risk of accidental mutation. Most people never need to call this + // because the invocation logic deep copies before calls to storage and reactors. + DeepCopy() Action +} + +type GenericAction interface { + Action + GetValue() interface{} +} + +type GetAction interface { + Action + GetName() string +} + +type ListAction interface { + Action + GetListRestrictions() ListRestrictions +} + +type CreateAction interface { + Action + GetObject() runtime.Object +} + +type UpdateAction interface { + Action + GetObject() runtime.Object +} + +type DeleteAction interface { + Action + GetName() string +} + +type DeleteCollectionAction interface { + Action + GetListRestrictions() ListRestrictions +} + +type PatchAction interface { + Action + GetName() string + GetPatchType() types.PatchType + GetPatch() []byte +} + +type WatchAction interface { + Action + GetWatchRestrictions() WatchRestrictions +} + +type ProxyGetAction interface { + Action + GetScheme() string + GetName() string + GetPort() string + GetPath() string + GetParams() map[string]string +} + +type ActionImpl struct { + Namespace string + Verb string + Resource schema.GroupVersionResource + Subresource string +} + +func (a ActionImpl) GetNamespace() string { + return a.Namespace +} +func (a ActionImpl) GetVerb() string { + return a.Verb +} +func (a ActionImpl) GetResource() schema.GroupVersionResource { + return a.Resource +} +func (a ActionImpl) GetSubresource() string { + return a.Subresource +} +func (a ActionImpl) Matches(verb, resource string) bool { + return strings.EqualFold(verb, a.Verb) && + strings.EqualFold(resource, a.Resource.Resource) +} +func (a ActionImpl) DeepCopy() Action { + ret := a + return ret +} + +type GenericActionImpl struct { + ActionImpl + Value interface{} +} + +func (a GenericActionImpl) GetValue() interface{} { + return a.Value +} + +func (a GenericActionImpl) DeepCopy() Action { + return GenericActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + // TODO this is wrong, but no worse than before + Value: a.Value, + } +} + +type GetActionImpl struct { + ActionImpl + Name string +} + +func (a GetActionImpl) GetName() string { + return a.Name +} + +func (a GetActionImpl) DeepCopy() Action { + return GetActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + } +} + +type ListActionImpl struct { + ActionImpl + Kind schema.GroupVersionKind + Name string + ListRestrictions ListRestrictions +} + +func (a ListActionImpl) GetKind() schema.GroupVersionKind { + return a.Kind +} + +func (a ListActionImpl) GetListRestrictions() ListRestrictions { + return a.ListRestrictions +} + +func (a ListActionImpl) DeepCopy() Action { + return ListActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Kind: a.Kind, + Name: a.Name, + ListRestrictions: ListRestrictions{ + Labels: a.ListRestrictions.Labels.DeepCopySelector(), + Fields: a.ListRestrictions.Fields.DeepCopySelector(), + }, + } +} + +type CreateActionImpl struct { + ActionImpl + Name string + Object runtime.Object +} + +func (a CreateActionImpl) GetObject() runtime.Object { + return a.Object +} + +func (a CreateActionImpl) DeepCopy() Action { + return CreateActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + Object: a.Object.DeepCopyObject(), + } +} + +type UpdateActionImpl struct { + ActionImpl + Object runtime.Object +} + +func (a UpdateActionImpl) GetObject() runtime.Object { + return a.Object +} + +func (a UpdateActionImpl) DeepCopy() Action { + return UpdateActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Object: a.Object.DeepCopyObject(), + } +} + +type PatchActionImpl struct { + ActionImpl + Name string + PatchType types.PatchType + Patch []byte +} + +func (a PatchActionImpl) GetName() string { + return a.Name +} + +func (a PatchActionImpl) GetPatch() []byte { + return a.Patch +} + +func (a PatchActionImpl) GetPatchType() types.PatchType { + return a.PatchType +} + +func (a PatchActionImpl) DeepCopy() Action { + patch := make([]byte, len(a.Patch)) + copy(patch, a.Patch) + return PatchActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + PatchType: a.PatchType, + Patch: patch, + } +} + +type DeleteActionImpl struct { + ActionImpl + Name string +} + +func (a DeleteActionImpl) GetName() string { + return a.Name +} + +func (a DeleteActionImpl) DeepCopy() Action { + return DeleteActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + } +} + +type DeleteCollectionActionImpl struct { + ActionImpl + ListRestrictions ListRestrictions +} + +func (a DeleteCollectionActionImpl) GetListRestrictions() ListRestrictions { + return a.ListRestrictions +} + +func (a DeleteCollectionActionImpl) DeepCopy() Action { + return DeleteCollectionActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + ListRestrictions: ListRestrictions{ + Labels: a.ListRestrictions.Labels.DeepCopySelector(), + Fields: a.ListRestrictions.Fields.DeepCopySelector(), + }, + } +} + +type WatchActionImpl struct { + ActionImpl + WatchRestrictions WatchRestrictions +} + +func (a WatchActionImpl) GetWatchRestrictions() WatchRestrictions { + return a.WatchRestrictions +} + +func (a WatchActionImpl) DeepCopy() Action { + return WatchActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + WatchRestrictions: WatchRestrictions{ + Labels: a.WatchRestrictions.Labels.DeepCopySelector(), + Fields: a.WatchRestrictions.Fields.DeepCopySelector(), + ResourceVersion: a.WatchRestrictions.ResourceVersion, + }, + } +} + +type ProxyGetActionImpl struct { + ActionImpl + Scheme string + Name string + Port string + Path string + Params map[string]string +} + +func (a ProxyGetActionImpl) GetScheme() string { + return a.Scheme +} + +func (a ProxyGetActionImpl) GetName() string { + return a.Name +} + +func (a ProxyGetActionImpl) GetPort() string { + return a.Port +} + +func (a ProxyGetActionImpl) GetPath() string { + return a.Path +} + +func (a ProxyGetActionImpl) GetParams() map[string]string { + return a.Params +} + +func (a ProxyGetActionImpl) DeepCopy() Action { + params := map[string]string{} + for k, v := range a.Params { + params[k] = v + } + return ProxyGetActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Scheme: a.Scheme, + Name: a.Name, + Port: a.Port, + Path: a.Path, + Params: params, + } +} diff --git a/vendor/k8s.io/client-go/testing/fake.go b/vendor/k8s.io/client-go/testing/fake.go new file mode 100644 index 0000000000..8b9ee149c8 --- /dev/null +++ b/vendor/k8s.io/client-go/testing/fake.go @@ -0,0 +1,216 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "sync" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" +) + +// Fake implements client.Interface. Meant to be embedded into a struct to get +// a default implementation. This makes faking out just the method you want to +// test easier. +type Fake struct { + sync.RWMutex + actions []Action // these may be castable to other types, but "Action" is the minimum + + // ReactionChain is the list of reactors that will be attempted for every + // request in the order they are tried. + ReactionChain []Reactor + // WatchReactionChain is the list of watch reactors that will be attempted + // for every request in the order they are tried. + WatchReactionChain []WatchReactor + // ProxyReactionChain is the list of proxy reactors that will be attempted + // for every request in the order they are tried. + ProxyReactionChain []ProxyReactor + + Resources []*metav1.APIResourceList +} + +// Reactor is an interface to allow the composition of reaction functions. +type Reactor interface { + // Handles indicates whether or not this Reactor deals with a given + // action. + Handles(action Action) bool + // React handles the action and returns results. It may choose to + // delegate by indicated handled=false. + React(action Action) (handled bool, ret runtime.Object, err error) +} + +// WatchReactor is an interface to allow the composition of watch functions. +type WatchReactor interface { + // Handles indicates whether or not this Reactor deals with a given + // action. + Handles(action Action) bool + // React handles a watch action and returns results. It may choose to + // delegate by indicating handled=false. + React(action Action) (handled bool, ret watch.Interface, err error) +} + +// ProxyReactor is an interface to allow the composition of proxy get +// functions. +type ProxyReactor interface { + // Handles indicates whether or not this Reactor deals with a given + // action. + Handles(action Action) bool + // React handles a watch action and returns results. It may choose to + // delegate by indicating handled=false. + React(action Action) (handled bool, ret restclient.ResponseWrapper, err error) +} + +// ReactionFunc is a function that returns an object or error for a given +// Action. If "handled" is false, then the test client will ignore the +// results and continue to the next ReactionFunc. A ReactionFunc can describe +// reactions on subresources by testing the result of the action's +// GetSubresource() method. +type ReactionFunc func(action Action) (handled bool, ret runtime.Object, err error) + +// WatchReactionFunc is a function that returns a watch interface. If +// "handled" is false, then the test client will ignore the results and +// continue to the next ReactionFunc. +type WatchReactionFunc func(action Action) (handled bool, ret watch.Interface, err error) + +// ProxyReactionFunc is a function that returns a ResponseWrapper interface +// for a given Action. If "handled" is false, then the test client will +// ignore the results and continue to the next ProxyReactionFunc. +type ProxyReactionFunc func(action Action) (handled bool, ret restclient.ResponseWrapper, err error) + +// AddReactor appends a reactor to the end of the chain. +func (c *Fake) AddReactor(verb, resource string, reaction ReactionFunc) { + c.ReactionChain = append(c.ReactionChain, &SimpleReactor{verb, resource, reaction}) +} + +// PrependReactor adds a reactor to the beginning of the chain. +func (c *Fake) PrependReactor(verb, resource string, reaction ReactionFunc) { + c.ReactionChain = append([]Reactor{&SimpleReactor{verb, resource, reaction}}, c.ReactionChain...) +} + +// AddWatchReactor appends a reactor to the end of the chain. +func (c *Fake) AddWatchReactor(resource string, reaction WatchReactionFunc) { + c.WatchReactionChain = append(c.WatchReactionChain, &SimpleWatchReactor{resource, reaction}) +} + +// PrependWatchReactor adds a reactor to the beginning of the chain. +func (c *Fake) PrependWatchReactor(resource string, reaction WatchReactionFunc) { + c.WatchReactionChain = append([]WatchReactor{&SimpleWatchReactor{resource, reaction}}, c.WatchReactionChain...) +} + +// AddProxyReactor appends a reactor to the end of the chain. +func (c *Fake) AddProxyReactor(resource string, reaction ProxyReactionFunc) { + c.ProxyReactionChain = append(c.ProxyReactionChain, &SimpleProxyReactor{resource, reaction}) +} + +// PrependProxyReactor adds a reactor to the beginning of the chain. +func (c *Fake) PrependProxyReactor(resource string, reaction ProxyReactionFunc) { + c.ProxyReactionChain = append([]ProxyReactor{&SimpleProxyReactor{resource, reaction}}, c.ProxyReactionChain...) +} + +// Invokes records the provided Action and then invokes the ReactionFunc that +// handles the action if one exists. defaultReturnObj is expected to be of the +// same type a normal call would return. +func (c *Fake) Invokes(action Action, defaultReturnObj runtime.Object) (runtime.Object, error) { + c.Lock() + defer c.Unlock() + + actionCopy := action.DeepCopy() + c.actions = append(c.actions, action.DeepCopy()) + for _, reactor := range c.ReactionChain { + if !reactor.Handles(actionCopy) { + continue + } + + handled, ret, err := reactor.React(actionCopy) + if !handled { + continue + } + + return ret, err + } + + return defaultReturnObj, nil +} + +// InvokesWatch records the provided Action and then invokes the ReactionFunc +// that handles the action if one exists. +func (c *Fake) InvokesWatch(action Action) (watch.Interface, error) { + c.Lock() + defer c.Unlock() + + actionCopy := action.DeepCopy() + c.actions = append(c.actions, action.DeepCopy()) + for _, reactor := range c.WatchReactionChain { + if !reactor.Handles(actionCopy) { + continue + } + + handled, ret, err := reactor.React(actionCopy) + if !handled { + continue + } + + return ret, err + } + + return nil, fmt.Errorf("unhandled watch: %#v", action) +} + +// InvokesProxy records the provided Action and then invokes the ReactionFunc +// that handles the action if one exists. +func (c *Fake) InvokesProxy(action Action) restclient.ResponseWrapper { + c.Lock() + defer c.Unlock() + + actionCopy := action.DeepCopy() + c.actions = append(c.actions, action.DeepCopy()) + for _, reactor := range c.ProxyReactionChain { + if !reactor.Handles(actionCopy) { + continue + } + + handled, ret, err := reactor.React(actionCopy) + if !handled || err != nil { + continue + } + + return ret + } + + return nil +} + +// ClearActions clears the history of actions called on the fake client. +func (c *Fake) ClearActions() { + c.Lock() + defer c.Unlock() + + c.actions = make([]Action, 0) +} + +// Actions returns a chronologically ordered slice fake actions called on the +// fake client. +func (c *Fake) Actions() []Action { + c.RLock() + defer c.RUnlock() + fa := make([]Action, len(c.actions)) + copy(fa, c.actions) + return fa +} diff --git a/vendor/k8s.io/client-go/testing/fixture.go b/vendor/k8s.io/client-go/testing/fixture.go new file mode 100644 index 0000000000..54f600ad3f --- /dev/null +++ b/vendor/k8s.io/client-go/testing/fixture.go @@ -0,0 +1,577 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "reflect" + "sync" + + jsonpatch "github.com/evanphx/json-patch" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" +) + +// ObjectTracker keeps track of objects. It is intended to be used to +// fake calls to a server by returning objects based on their kind, +// namespace and name. +type ObjectTracker interface { + // Add adds an object to the tracker. If object being added + // is a list, its items are added separately. + Add(obj runtime.Object) error + + // Get retrieves the object by its kind, namespace and name. + Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) + + // Create adds an object to the tracker in the specified namespace. + Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error + + // Update updates an existing object in the tracker in the specified namespace. + Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error + + // List retrieves all objects of a given kind in the given + // namespace. Only non-List kinds are accepted. + List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) + + // Delete deletes an existing object from the tracker. If object + // didn't exist in the tracker prior to deletion, Delete returns + // no error. + Delete(gvr schema.GroupVersionResource, ns, name string) error + + // Watch watches objects from the tracker. Watch returns a channel + // which will push added / modified / deleted object. + Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) +} + +// ObjectScheme abstracts the implementation of common operations on objects. +type ObjectScheme interface { + runtime.ObjectCreater + runtime.ObjectTyper +} + +// ObjectReaction returns a ReactionFunc that applies core.Action to +// the given tracker. +func ObjectReaction(tracker ObjectTracker) ReactionFunc { + return func(action Action) (bool, runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + // Here and below we need to switch on implementation types, + // not on interfaces, as some interfaces are identical + // (e.g. UpdateAction and CreateAction), so if we use them, + // updates and creates end up matching the same case branch. + switch action := action.(type) { + + case ListActionImpl: + obj, err := tracker.List(gvr, action.GetKind(), ns) + return true, obj, err + + case GetActionImpl: + obj, err := tracker.Get(gvr, ns, action.GetName()) + return true, obj, err + + case CreateActionImpl: + objMeta, err := meta.Accessor(action.GetObject()) + if err != nil { + return true, nil, err + } + if action.GetSubresource() == "" { + err = tracker.Create(gvr, action.GetObject(), ns) + } else { + // TODO: Currently we're handling subresource creation as an update + // on the enclosing resource. This works for some subresources but + // might not be generic enough. + err = tracker.Update(gvr, action.GetObject(), ns) + } + if err != nil { + return true, nil, err + } + obj, err := tracker.Get(gvr, ns, objMeta.GetName()) + return true, obj, err + + case UpdateActionImpl: + objMeta, err := meta.Accessor(action.GetObject()) + if err != nil { + return true, nil, err + } + err = tracker.Update(gvr, action.GetObject(), ns) + if err != nil { + return true, nil, err + } + obj, err := tracker.Get(gvr, ns, objMeta.GetName()) + return true, obj, err + + case DeleteActionImpl: + err := tracker.Delete(gvr, ns, action.GetName()) + if err != nil { + return true, nil, err + } + return true, nil, nil + + case PatchActionImpl: + obj, err := tracker.Get(gvr, ns, action.GetName()) + if err != nil { + return true, nil, err + } + + old, err := json.Marshal(obj) + if err != nil { + return true, nil, err + } + + // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields + // in obj that are removed by patch are cleared + value := reflect.ValueOf(obj) + value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) + + switch action.GetPatchType() { + case types.JSONPatchType: + patch, err := jsonpatch.DecodePatch(action.GetPatch()) + if err != nil { + return true, nil, err + } + modified, err := patch.Apply(old) + if err != nil { + return true, nil, err + } + + if err = json.Unmarshal(modified, obj); err != nil { + return true, nil, err + } + case types.MergePatchType: + modified, err := jsonpatch.MergePatch(old, action.GetPatch()) + if err != nil { + return true, nil, err + } + + if err := json.Unmarshal(modified, obj); err != nil { + return true, nil, err + } + case types.StrategicMergePatchType: + mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) + if err != nil { + return true, nil, err + } + if err = json.Unmarshal(mergedByte, obj); err != nil { + return true, nil, err + } + default: + return true, nil, fmt.Errorf("PatchType is not supported") + } + + if err = tracker.Update(gvr, obj, ns); err != nil { + return true, nil, err + } + + return true, obj, nil + + default: + return false, nil, fmt.Errorf("no reaction implemented for %s", action) + } + } +} + +type tracker struct { + scheme ObjectScheme + decoder runtime.Decoder + lock sync.RWMutex + objects map[schema.GroupVersionResource][]runtime.Object + // The value type of watchers is a map of which the key is either a namespace or + // all/non namespace aka "" and its value is list of fake watchers. + // Manipulations on resources will broadcast the notification events into the + // watchers' channel. Note that too many unhandled events (currently 100, + // see apimachinery/pkg/watch.DefaultChanSize) will cause a panic. + watchers map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher +} + +var _ ObjectTracker = &tracker{} + +// NewObjectTracker returns an ObjectTracker that can be used to keep track +// of objects for the fake clientset. Mostly useful for unit tests. +func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder) ObjectTracker { + return &tracker{ + scheme: scheme, + decoder: decoder, + objects: make(map[schema.GroupVersionResource][]runtime.Object), + watchers: make(map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher), + } +} + +func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) { + // Heuristic for list kind: original kind + List suffix. Might + // not always be true but this tracker has a pretty limited + // understanding of the actual API model. + listGVK := gvk + listGVK.Kind = listGVK.Kind + "List" + // GVK does have the concept of "internal version". The scheme recognizes + // the runtime.APIVersionInternal, but not the empty string. + if listGVK.Version == "" { + listGVK.Version = runtime.APIVersionInternal + } + + list, err := t.scheme.New(listGVK) + if err != nil { + return nil, err + } + + if !meta.IsListType(list) { + return nil, fmt.Errorf("%q is not a list type", listGVK.Kind) + } + + t.lock.RLock() + defer t.lock.RUnlock() + + objs, ok := t.objects[gvr] + if !ok { + return list, nil + } + + matchingObjs, err := filterByNamespace(objs, ns) + if err != nil { + return nil, err + } + if err := meta.SetList(list, matchingObjs); err != nil { + return nil, err + } + return list.DeepCopyObject(), nil +} + +func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) { + t.lock.Lock() + defer t.lock.Unlock() + + fakewatcher := watch.NewRaceFreeFake() + + if _, exists := t.watchers[gvr]; !exists { + t.watchers[gvr] = make(map[string][]*watch.RaceFreeFakeWatcher) + } + t.watchers[gvr][ns] = append(t.watchers[gvr][ns], fakewatcher) + return fakewatcher, nil +} + +func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) { + errNotFound := errors.NewNotFound(gvr.GroupResource(), name) + + t.lock.RLock() + defer t.lock.RUnlock() + + objs, ok := t.objects[gvr] + if !ok { + return nil, errNotFound + } + + var matchingObjs []runtime.Object + for _, obj := range objs { + acc, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + if acc.GetNamespace() != ns { + continue + } + if acc.GetName() != name { + continue + } + matchingObjs = append(matchingObjs, obj) + } + if len(matchingObjs) == 0 { + return nil, errNotFound + } + if len(matchingObjs) > 1 { + return nil, fmt.Errorf("more than one object matched gvr %s, ns: %q name: %q", gvr, ns, name) + } + + // Only one object should match in the tracker if it works + // correctly, as Add/Update methods enforce kind/namespace/name + // uniqueness. + obj := matchingObjs[0].DeepCopyObject() + if status, ok := obj.(*metav1.Status); ok { + if status.Status != metav1.StatusSuccess { + return nil, &errors.StatusError{ErrStatus: *status} + } + } + + return obj, nil +} + +func (t *tracker) Add(obj runtime.Object) error { + if meta.IsListType(obj) { + return t.addList(obj, false) + } + objMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + gvks, _, err := t.scheme.ObjectKinds(obj) + if err != nil { + return err + } + + if partial, ok := obj.(*metav1.PartialObjectMetadata); ok && len(partial.TypeMeta.APIVersion) > 0 { + gvks = []schema.GroupVersionKind{partial.TypeMeta.GroupVersionKind()} + } + + if len(gvks) == 0 { + return fmt.Errorf("no registered kinds for %v", obj) + } + for _, gvk := range gvks { + // NOTE: UnsafeGuessKindToResource is a heuristic and default match. The + // actual registration in apiserver can specify arbitrary route for a + // gvk. If a test uses such objects, it cannot preset the tracker with + // objects via Add(). Instead, it should trigger the Create() function + // of the tracker, where an arbitrary gvr can be specified. + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + // Resource doesn't have the concept of "__internal" version, just set it to "". + if gvr.Version == runtime.APIVersionInternal { + gvr.Version = "" + } + + err := t.add(gvr, obj, objMeta.GetNamespace(), false) + if err != nil { + return err + } + } + return nil +} + +func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + return t.add(gvr, obj, ns, false) +} + +func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + return t.add(gvr, obj, ns, true) +} + +func (t *tracker) getWatches(gvr schema.GroupVersionResource, ns string) []*watch.RaceFreeFakeWatcher { + watches := []*watch.RaceFreeFakeWatcher{} + if t.watchers[gvr] != nil { + if w := t.watchers[gvr][ns]; w != nil { + watches = append(watches, w...) + } + if ns != metav1.NamespaceAll { + if w := t.watchers[gvr][metav1.NamespaceAll]; w != nil { + watches = append(watches, w...) + } + } + } + return watches +} + +func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns string, replaceExisting bool) error { + t.lock.Lock() + defer t.lock.Unlock() + + gr := gvr.GroupResource() + + // To avoid the object from being accidentally modified by caller + // after it's been added to the tracker, we always store the deep + // copy. + obj = obj.DeepCopyObject() + + newMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + + // Propagate namespace to the new object if hasn't already been set. + if len(newMeta.GetNamespace()) == 0 { + newMeta.SetNamespace(ns) + } + + if ns != newMeta.GetNamespace() { + msg := fmt.Sprintf("request namespace does not match object namespace, request: %q object: %q", ns, newMeta.GetNamespace()) + return errors.NewBadRequest(msg) + } + + for i, existingObj := range t.objects[gvr] { + oldMeta, err := meta.Accessor(existingObj) + if err != nil { + return err + } + if oldMeta.GetNamespace() == newMeta.GetNamespace() && oldMeta.GetName() == newMeta.GetName() { + if replaceExisting { + for _, w := range t.getWatches(gvr, ns) { + w.Modify(obj) + } + t.objects[gvr][i] = obj + return nil + } + return errors.NewAlreadyExists(gr, newMeta.GetName()) + } + } + + if replaceExisting { + // Tried to update but no matching object was found. + return errors.NewNotFound(gr, newMeta.GetName()) + } + + t.objects[gvr] = append(t.objects[gvr], obj) + + for _, w := range t.getWatches(gvr, ns) { + w.Add(obj) + } + + return nil +} + +func (t *tracker) addList(obj runtime.Object, replaceExisting bool) error { + list, err := meta.ExtractList(obj) + if err != nil { + return err + } + errs := runtime.DecodeList(list, t.decoder) + if len(errs) > 0 { + return errs[0] + } + for _, obj := range list { + if err := t.Add(obj); err != nil { + return err + } + } + return nil +} + +func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error { + t.lock.Lock() + defer t.lock.Unlock() + + found := false + + for i, existingObj := range t.objects[gvr] { + objMeta, err := meta.Accessor(existingObj) + if err != nil { + return err + } + if objMeta.GetNamespace() == ns && objMeta.GetName() == name { + obj := t.objects[gvr][i] + t.objects[gvr] = append(t.objects[gvr][:i], t.objects[gvr][i+1:]...) + for _, w := range t.getWatches(gvr, ns) { + w.Delete(obj) + } + found = true + break + } + } + + if found { + return nil + } + + return errors.NewNotFound(gvr.GroupResource(), name) +} + +// filterByNamespace returns all objects in the collection that +// match provided namespace. Empty namespace matches +// non-namespaced objects. +func filterByNamespace(objs []runtime.Object, ns string) ([]runtime.Object, error) { + var res []runtime.Object + + for _, obj := range objs { + acc, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + if ns != "" && acc.GetNamespace() != ns { + continue + } + res = append(res, obj) + } + + return res, nil +} + +func DefaultWatchReactor(watchInterface watch.Interface, err error) WatchReactionFunc { + return func(action Action) (bool, watch.Interface, error) { + return true, watchInterface, err + } +} + +// SimpleReactor is a Reactor. Each reaction function is attached to a given verb,resource tuple. "*" in either field matches everything for that value. +// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions +type SimpleReactor struct { + Verb string + Resource string + + Reaction ReactionFunc +} + +func (r *SimpleReactor) Handles(action Action) bool { + verbCovers := r.Verb == "*" || r.Verb == action.GetVerb() + if !verbCovers { + return false + } + resourceCovers := r.Resource == "*" || r.Resource == action.GetResource().Resource + if !resourceCovers { + return false + } + + return true +} + +func (r *SimpleReactor) React(action Action) (bool, runtime.Object, error) { + return r.Reaction(action) +} + +// SimpleWatchReactor is a WatchReactor. Each reaction function is attached to a given resource. "*" matches everything for that value. +// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions +type SimpleWatchReactor struct { + Resource string + + Reaction WatchReactionFunc +} + +func (r *SimpleWatchReactor) Handles(action Action) bool { + resourceCovers := r.Resource == "*" || r.Resource == action.GetResource().Resource + if !resourceCovers { + return false + } + + return true +} + +func (r *SimpleWatchReactor) React(action Action) (bool, watch.Interface, error) { + return r.Reaction(action) +} + +// SimpleProxyReactor is a ProxyReactor. Each reaction function is attached to a given resource. "*" matches everything for that value. +// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions. +type SimpleProxyReactor struct { + Resource string + + Reaction ProxyReactionFunc +} + +func (r *SimpleProxyReactor) Handles(action Action) bool { + resourceCovers := r.Resource == "*" || r.Resource == action.GetResource().Resource + if !resourceCovers { + return false + } + + return true +} + +func (r *SimpleProxyReactor) React(action Action) (bool, restclient.ResponseWrapper, error) { + return r.Reaction(action) +} diff --git a/vendor/k8s.io/code-generator/CONTRIBUTING.md b/vendor/k8s.io/code-generator/CONTRIBUTING.md new file mode 100644 index 0000000000..76625b7bc9 --- /dev/null +++ b/vendor/k8s.io/code-generator/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kubernetes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/code-generator](https://git.k8s.io/kubernetes/staging/src/k8s.io/code-generator) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/sig-architecture/staging.md) for more information diff --git a/vendor/k8s.io/code-generator/LICENSE b/vendor/k8s.io/code-generator/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/k8s.io/code-generator/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/code-generator/OWNERS b/vendor/k8s.io/code-generator/OWNERS new file mode 100644 index 0000000000..6f7abe3edb --- /dev/null +++ b/vendor/k8s.io/code-generator/OWNERS @@ -0,0 +1,13 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- lavalamp +- wojtek-t +- sttts +reviewers: +- lavalamp +- wojtek-t +- sttts +labels: +- sig/api-machinery +- area/code-generation diff --git a/vendor/k8s.io/code-generator/README.md b/vendor/k8s.io/code-generator/README.md new file mode 100644 index 0000000000..e03c6bf55e --- /dev/null +++ b/vendor/k8s.io/code-generator/README.md @@ -0,0 +1,24 @@ +# code-generator + +Golang code-generators used to implement [Kubernetes-style API types](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md). + +## Purpose + +These code-generators can be used +- in the context of [CustomResourceDefinition](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/) to build native, versioned clients, + informers and other helpers +- in the context of [User-provider API Servers](https://github.com/kubernetes/apiserver) to build conversions between internal and versioned types, defaulters, protobuf codecs, + internal and versioned clients and informers. + +## Resources +- The example [sample controller](https://github.com/kubernetes/sample-controller) shows a code example of a controller that uses the clients, listers and informers generated by this library. +- The article [Kubernetes Deep Dive: Code Generation for CustomResources](https://blog.openshift.com/kubernetes-deep-dive-code-generation-customresources/) gives a step by step instruction on how to use this library. + +## Compatibility + +HEAD of this repo will match HEAD of k8s.io/apiserver, k8s.io/apimachinery, and k8s.io/client-go. + +## Where does it come from? + +`code-generator` is synced from https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/code-generator. +Code changes are made in that location, merged into `k8s.io/kubernetes` and later synced here. diff --git a/vendor/k8s.io/code-generator/SECURITY_CONTACTS b/vendor/k8s.io/code-generator/SECURITY_CONTACTS new file mode 100644 index 0000000000..6df6a4d6a1 --- /dev/null +++ b/vendor/k8s.io/code-generator/SECURITY_CONTACTS @@ -0,0 +1,17 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Committee to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +cjcullen +joelsmith +liggitt +philips +tallclair diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS b/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS new file mode 100644 index 0000000000..62866d0b19 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- lavalamp +- wojtek-t +- caesarxuchao +reviewers: +- lavalamp +- wojtek-t +- caesarxuchao diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/README.md b/vendor/k8s.io/code-generator/cmd/client-gen/README.md new file mode 100644 index 0000000000..092a61151c --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/README.md @@ -0,0 +1,4 @@ +See [generating-clientset.md](https://git.k8s.io/community/contributors/devel/sig-api-machinery/generating-clientset.md) + + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/staging/src/k8s.io/code-generator/client-gen/README.md?pixel)]() diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/client-gen/args/args.go new file mode 100644 index 0000000000..f45be1bb83 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/args/args.go @@ -0,0 +1,120 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + "path" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" + + "k8s.io/code-generator/cmd/client-gen/types" + codegenutil "k8s.io/code-generator/pkg/util" +) + +var DefaultInputDirs = []string{} + +// ClientGenArgs is a wrapper for arguments to client-gen. +type CustomArgs struct { + // A sorted list of group versions to generate. For each of them the package path is found + // in GroupVersionToInputPath. + Groups []types.GroupVersions + + // Overrides for which types should be included in the client. + IncludedTypesOverrides map[types.GroupVersion][]string + + // ClientsetName is the name of the clientset to be generated. It's + // populated from command-line arguments. + ClientsetName string + // ClientsetAPIPath is the default API HTTP path for generated clients. + ClientsetAPIPath string + // ClientsetOnly determines if we should generate the clients for groups and + // types along with the clientset. It's populated from command-line + // arguments. + ClientsetOnly bool + // FakeClient determines if client-gen generates the fake clients. + FakeClient bool +} + +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{ + ClientsetName: "internalclientset", + ClientsetAPIPath: "/apis", + ClientsetOnly: false, + FakeClient: true, + } + genericArgs.CustomArgs = customArgs + genericArgs.InputDirs = DefaultInputDirs + + if pkg := codegenutil.CurrentPackage(); len(pkg) != 0 { + genericArgs.OutputPackagePath = path.Join(pkg, "pkg/client/clientset") + } + + return genericArgs, customArgs +} + +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet, inputBase string) { + gvsBuilder := NewGroupVersionsBuilder(&ca.Groups) + pflag.Var(NewGVPackagesValue(gvsBuilder, nil), "input", "group/versions that client-gen will generate clients for. At most one version per group is allowed. Specified in the format \"group1/version1,group2/version2...\".") + pflag.Var(NewGVTypesValue(&ca.IncludedTypesOverrides, []string{}), "included-types-overrides", "list of group/version/type for which client should be generated. By default, client is generated for all types which have genclient in types.go. This overrides that. For each groupVersion in this list, only the types mentioned here will be included. The default check of genclient will be used for other group versions.") + pflag.Var(NewInputBasePathValue(gvsBuilder, inputBase), "input-base", "base path to look for the api group.") + pflag.StringVarP(&ca.ClientsetName, "clientset-name", "n", ca.ClientsetName, "the name of the generated clientset package.") + pflag.StringVarP(&ca.ClientsetAPIPath, "clientset-api-path", "", ca.ClientsetAPIPath, "the value of default API HTTP path, starting with / and without trailing /.") + pflag.BoolVar(&ca.ClientsetOnly, "clientset-only", ca.ClientsetOnly, "when set, client-gen only generates the clientset shell, without generating the individual typed clients") + pflag.BoolVar(&ca.FakeClient, "fake-clientset", ca.FakeClient, "when set, client-gen will generate the fake clientset that can be used in tests") + + // support old flags + fs.SetNormalizeFunc(mapFlagName("clientset-path", "output-package", fs.GetNormalizeFunc())) +} + +func Validate(genericArgs *args.GeneratorArgs) error { + customArgs := genericArgs.CustomArgs.(*CustomArgs) + + if len(genericArgs.OutputPackagePath) == 0 { + return fmt.Errorf("output package cannot be empty") + } + if len(customArgs.ClientsetName) == 0 { + return fmt.Errorf("clientset name cannot be empty") + } + if len(customArgs.ClientsetAPIPath) == 0 { + return fmt.Errorf("clientset API path cannot be empty") + } + + return nil +} + +// GroupVersionPackages returns a map from GroupVersion to the package with the types.go. +func (ca *CustomArgs) GroupVersionPackages() map[types.GroupVersion]string { + res := map[types.GroupVersion]string{} + for _, pkg := range ca.Groups { + for _, v := range pkg.Versions { + res[types.GroupVersion{Group: pkg.Group, Version: v.Version}] = v.Package + } + } + return res +} + +func mapFlagName(from, to string, old func(fs *pflag.FlagSet, name string) pflag.NormalizedName) func(fs *pflag.FlagSet, name string) pflag.NormalizedName { + return func(fs *pflag.FlagSet, name string) pflag.NormalizedName { + if name == from { + name = to + } + return old(fs, name) + } +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/args/gvpackages.go b/vendor/k8s.io/code-generator/cmd/client-gen/args/gvpackages.go new file mode 100644 index 0000000000..8da71d6f9b --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/args/gvpackages.go @@ -0,0 +1,183 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "bytes" + "encoding/csv" + "flag" + "path" + "sort" + "strings" + + "k8s.io/code-generator/cmd/client-gen/types" +) + +type inputBasePathValue struct { + builder *groupVersionsBuilder +} + +var _ flag.Value = &inputBasePathValue{} + +func NewInputBasePathValue(builder *groupVersionsBuilder, def string) *inputBasePathValue { + v := &inputBasePathValue{ + builder: builder, + } + v.Set(def) + return v +} + +func (s *inputBasePathValue) Set(val string) error { + s.builder.importBasePath = val + return s.builder.update() +} + +func (s *inputBasePathValue) Type() string { + return "string" +} + +func (s *inputBasePathValue) String() string { + return s.builder.importBasePath +} + +type gvPackagesValue struct { + builder *groupVersionsBuilder + groups []string + changed bool +} + +func NewGVPackagesValue(builder *groupVersionsBuilder, def []string) *gvPackagesValue { + gvp := new(gvPackagesValue) + gvp.builder = builder + if def != nil { + if err := gvp.set(def); err != nil { + panic(err) + } + } + return gvp +} + +var _ flag.Value = &gvPackagesValue{} + +func (s *gvPackagesValue) set(vs []string) error { + if s.changed { + s.groups = append(s.groups, vs...) + } else { + s.groups = append([]string(nil), vs...) + } + + s.builder.groups = s.groups + return s.builder.update() +} + +func (s *gvPackagesValue) Set(val string) error { + vs, err := readAsCSV(val) + if err != nil { + return err + } + if err := s.set(vs); err != nil { + return err + } + s.changed = true + return nil +} + +func (s *gvPackagesValue) Type() string { + return "stringSlice" +} + +func (s *gvPackagesValue) String() string { + str, _ := writeAsCSV(s.groups) + return "[" + str + "]" +} + +type groupVersionsBuilder struct { + value *[]types.GroupVersions + groups []string + importBasePath string +} + +func NewGroupVersionsBuilder(groups *[]types.GroupVersions) *groupVersionsBuilder { + return &groupVersionsBuilder{ + value: groups, + } +} + +func (p *groupVersionsBuilder) update() error { + var seenGroups = make(map[types.Group]*types.GroupVersions) + for _, v := range p.groups { + pth, gvString := parsePathGroupVersion(v) + gv, err := types.ToGroupVersion(gvString) + if err != nil { + return err + } + + versionPkg := types.PackageVersion{Package: path.Join(p.importBasePath, pth, gv.Group.NonEmpty(), gv.Version.String()), Version: gv.Version} + if group, ok := seenGroups[gv.Group]; ok { + seenGroups[gv.Group].Versions = append(group.Versions, versionPkg) + } else { + seenGroups[gv.Group] = &types.GroupVersions{ + PackageName: gv.Group.NonEmpty(), + Group: gv.Group, + Versions: []types.PackageVersion{versionPkg}, + } + } + } + + var groupNames []string + for groupName := range seenGroups { + groupNames = append(groupNames, groupName.String()) + } + sort.Strings(groupNames) + *p.value = []types.GroupVersions{} + for _, groupName := range groupNames { + *p.value = append(*p.value, *seenGroups[types.Group(groupName)]) + } + + return nil +} + +func parsePathGroupVersion(pgvString string) (gvPath string, gvString string) { + subs := strings.Split(pgvString, "/") + length := len(subs) + switch length { + case 0, 1, 2: + return "", pgvString + default: + return strings.Join(subs[:length-2], "/"), strings.Join(subs[length-2:], "/") + } +} + +func readAsCSV(val string) ([]string, error) { + if val == "" { + return []string{}, nil + } + stringReader := strings.NewReader(val) + csvReader := csv.NewReader(stringReader) + return csvReader.Read() +} + +func writeAsCSV(vals []string) (string, error) { + b := &bytes.Buffer{} + w := csv.NewWriter(b) + err := w.Write(vals) + if err != nil { + return "", err + } + w.Flush() + return strings.TrimSuffix(b.String(), "\n"), nil +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/args/gvtype.go b/vendor/k8s.io/code-generator/cmd/client-gen/args/gvtype.go new file mode 100644 index 0000000000..e4e3ccb536 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/args/gvtype.go @@ -0,0 +1,110 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "flag" + "fmt" + "strings" + + "k8s.io/code-generator/cmd/client-gen/types" +) + +type gvTypeValue struct { + gvToTypes *map[types.GroupVersion][]string + changed bool +} + +func NewGVTypesValue(gvToTypes *map[types.GroupVersion][]string, def []string) *gvTypeValue { + gvt := new(gvTypeValue) + gvt.gvToTypes = gvToTypes + if def != nil { + if err := gvt.set(def); err != nil { + panic(err) + } + } + return gvt +} + +var _ flag.Value = &gvTypeValue{} + +func (s *gvTypeValue) set(vs []string) error { + if !s.changed { + *s.gvToTypes = map[types.GroupVersion][]string{} + } + + for _, input := range vs { + gvString, typeStr, err := parseGroupVersionType(input) + if err != nil { + return err + } + gv, err := types.ToGroupVersion(gvString) + if err != nil { + return err + } + types, ok := (*s.gvToTypes)[gv] + if !ok { + types = []string{} + } + types = append(types, typeStr) + (*s.gvToTypes)[gv] = types + } + + return nil +} + +func (s *gvTypeValue) Set(val string) error { + vs, err := readAsCSV(val) + if err != nil { + return err + } + if err := s.set(vs); err != nil { + return err + } + s.changed = true + return nil +} + +func (s *gvTypeValue) Type() string { + return "stringSlice" +} + +func (s *gvTypeValue) String() string { + strs := make([]string, 0, len(*s.gvToTypes)) + for gv, ts := range *s.gvToTypes { + for _, t := range ts { + strs = append(strs, gv.Group.String()+"/"+gv.Version.String()+"/"+t) + } + } + str, _ := writeAsCSV(strs) + return "[" + str + "]" +} + +func parseGroupVersionType(gvtString string) (gvString string, typeStr string, err error) { + invalidFormatErr := fmt.Errorf("invalid value: %s, should be of the form group/version/type", gvtString) + subs := strings.Split(gvtString, "/") + length := len(subs) + switch length { + case 2: + // gvtString of the form group/type, e.g. api/Service,extensions/ReplicaSet + return subs[0] + "/", subs[1], nil + case 3: + return strings.Join(subs[:length-1], "/"), subs[length-1], nil + default: + return "", "", invalidFormatErr + } +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go new file mode 100644 index 0000000000..18980744f0 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go @@ -0,0 +1,403 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package generators has the generators for the client-gen utility. +package generators + +import ( + "path/filepath" + "strings" + + clientgenargs "k8s.io/code-generator/cmd/client-gen/args" + "k8s.io/code-generator/cmd/client-gen/generators/fake" + "k8s.io/code-generator/cmd/client-gen/generators/scheme" + "k8s.io/code-generator/cmd/client-gen/generators/util" + "k8s.io/code-generator/cmd/client-gen/path" + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + codegennamer "k8s.io/code-generator/pkg/namer" + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/klog" +) + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + pluralExceptions := map[string]string{ + "Endpoints": "Endpoints", + } + lowercaseNamer := namer.NewAllLowercasePluralNamer(pluralExceptions) + + publicNamer := &ExceptionNamer{ + Exceptions: map[string]string{ + // these exceptions are used to deconflict the generated code + // you can put your fully qualified package like + // to generate a name that doesn't conflict with your group. + // "k8s.io/apis/events/v1beta1.Event": "EventResource" + }, + KeyFunc: func(t *types.Type) string { + return t.Name.Package + "." + t.Name.Name + }, + Delegate: namer.NewPublicNamer(0), + } + privateNamer := &ExceptionNamer{ + Exceptions: map[string]string{ + // these exceptions are used to deconflict the generated code + // you can put your fully qualified package like + // to generate a name that doesn't conflict with your group. + // "k8s.io/apis/events/v1beta1.Event": "eventResource" + }, + KeyFunc: func(t *types.Type) string { + return t.Name.Package + "." + t.Name.Name + }, + Delegate: namer.NewPrivateNamer(0), + } + publicPluralNamer := &ExceptionNamer{ + Exceptions: map[string]string{ + // these exceptions are used to deconflict the generated code + // you can put your fully qualified package like + // to generate a name that doesn't conflict with your group. + // "k8s.io/apis/events/v1beta1.Event": "EventResource" + }, + KeyFunc: func(t *types.Type) string { + return t.Name.Package + "." + t.Name.Name + }, + Delegate: namer.NewPublicPluralNamer(pluralExceptions), + } + privatePluralNamer := &ExceptionNamer{ + Exceptions: map[string]string{ + // you can put your fully qualified package like + // to generate a name that doesn't conflict with your group. + // "k8s.io/apis/events/v1beta1.Event": "eventResource" + // these exceptions are used to deconflict the generated code + "k8s.io/apis/events/v1beta1.Event": "eventResources", + "k8s.io/kubernetes/pkg/apis/events.Event": "eventResources", + }, + KeyFunc: func(t *types.Type) string { + return t.Name.Package + "." + t.Name.Name + }, + Delegate: namer.NewPrivatePluralNamer(pluralExceptions), + } + + return namer.NameSystems{ + "singularKind": namer.NewPublicNamer(0), + "public": publicNamer, + "private": privateNamer, + "raw": namer.NewRawNamer("", nil), + "publicPlural": publicPluralNamer, + "privatePlural": privatePluralNamer, + "allLowercasePlural": lowercaseNamer, + "resource": codegennamer.NewTagOverrideNamer("resourceName", lowercaseNamer), + } +} + +// ExceptionNamer allows you specify exceptional cases with exact names. This allows you to have control +// for handling various conflicts, like group and resource names for instance. +type ExceptionNamer struct { + Exceptions map[string]string + KeyFunc func(*types.Type) string + + Delegate namer.Namer +} + +// Name provides the requested name for a type. +func (n *ExceptionNamer) Name(t *types.Type) string { + key := n.KeyFunc(t) + if exception, ok := n.Exceptions[key]; ok { + return exception + } + return n.Delegate.Name(t) +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "public" +} + +func packageForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clientsetPackage string, groupPackageName string, groupGoName string, apiPath string, srcTreePath string, inputPackage string, boilerplate []byte) generator.Package { + groupVersionClientPackage := filepath.Join(clientsetPackage, "typed", strings.ToLower(groupPackageName), strings.ToLower(gv.Version.NonEmpty())) + return &generator.DefaultPackage{ + PackageName: strings.ToLower(gv.Version.NonEmpty()), + PackagePath: groupVersionClientPackage, + HeaderText: boilerplate, + PackageDocumentation: []byte( + `// This package has the automatically generated typed clients. +`), + // GeneratorFunc returns a list of generators. Each generator makes a + // single file. + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = []generator.Generator{ + // Always generate a "doc.go" file. + generator.DefaultGen{OptionalName: "doc"}, + } + // Since we want a file per type that we generate a client for, we + // have to provide a function for this. + for _, t := range typeList { + generators = append(generators, &genClientForType{ + DefaultGen: generator.DefaultGen{ + OptionalName: strings.ToLower(c.Namers["private"].Name(t)), + }, + outputPackage: groupVersionClientPackage, + clientsetPackage: clientsetPackage, + group: gv.Group.NonEmpty(), + version: gv.Version.String(), + groupGoName: groupGoName, + typeToMatch: t, + imports: generator.NewImportTracker(), + }) + } + + generators = append(generators, &genGroup{ + DefaultGen: generator.DefaultGen{ + OptionalName: groupPackageName + "_client", + }, + outputPackage: groupVersionClientPackage, + inputPackage: inputPackage, + clientsetPackage: clientsetPackage, + group: gv.Group.NonEmpty(), + version: gv.Version.String(), + groupGoName: groupGoName, + apiPath: apiPath, + types: typeList, + imports: generator.NewImportTracker(), + }) + + expansionFileName := "generated_expansion" + generators = append(generators, &genExpansion{ + groupPackagePath: filepath.Join(srcTreePath, groupVersionClientPackage), + DefaultGen: generator.DefaultGen{ + OptionalName: expansionFileName, + }, + types: typeList, + }) + + return generators + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + return util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).GenerateClient + }, + } +} + +func packageForClientset(customArgs *clientgenargs.CustomArgs, clientsetPackage string, groupGoNames map[clientgentypes.GroupVersion]string, boilerplate []byte) generator.Package { + return &generator.DefaultPackage{ + PackageName: customArgs.ClientsetName, + PackagePath: clientsetPackage, + HeaderText: boilerplate, + PackageDocumentation: []byte( + `// This package has the automatically generated clientset. +`), + // GeneratorFunc returns a list of generators. Each generator generates a + // single file. + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = []generator.Generator{ + // Always generate a "doc.go" file. + generator.DefaultGen{OptionalName: "doc"}, + + &genClientset{ + DefaultGen: generator.DefaultGen{ + OptionalName: "clientset", + }, + groups: customArgs.Groups, + groupGoNames: groupGoNames, + clientsetPackage: clientsetPackage, + outputPackage: customArgs.ClientsetName, + imports: generator.NewImportTracker(), + }, + } + return generators + }, + } +} + +func packageForScheme(customArgs *clientgenargs.CustomArgs, clientsetPackage string, srcTreePath string, groupGoNames map[clientgentypes.GroupVersion]string, boilerplate []byte) generator.Package { + schemePackage := filepath.Join(clientsetPackage, "scheme") + + // create runtime.Registry for internal client because it has to know about group versions + internalClient := false +NextGroup: + for _, group := range customArgs.Groups { + for _, v := range group.Versions { + if v.String() == "" { + internalClient = true + break NextGroup + } + } + } + + return &generator.DefaultPackage{ + PackageName: "scheme", + PackagePath: schemePackage, + HeaderText: boilerplate, + PackageDocumentation: []byte( + `// This package contains the scheme of the automatically generated clientset. +`), + // GeneratorFunc returns a list of generators. Each generator generates a + // single file. + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = []generator.Generator{ + // Always generate a "doc.go" file. + generator.DefaultGen{OptionalName: "doc"}, + + &scheme.GenScheme{ + DefaultGen: generator.DefaultGen{ + OptionalName: "register", + }, + InputPackages: customArgs.GroupVersionPackages(), + OutputPackage: schemePackage, + OutputPath: filepath.Join(srcTreePath, schemePackage), + Groups: customArgs.Groups, + GroupGoNames: groupGoNames, + ImportTracker: generator.NewImportTracker(), + CreateRegistry: internalClient, + }, + } + return generators + }, + } +} + +// applyGroupOverrides applies group name overrides to each package, if applicable. If there is a +// comment of the form "// +groupName=somegroup" or "// +groupName=somegroup.foo.bar.io", use the +// first field (somegroup) as the name of the group in Go code, e.g. as the func name in a clientset. +// +// If the first field of the groupName is not unique within the clientset, use "// +groupName=unique +func applyGroupOverrides(universe types.Universe, customArgs *clientgenargs.CustomArgs) { + // Create a map from "old GV" to "new GV" so we know what changes we need to make. + changes := make(map[clientgentypes.GroupVersion]clientgentypes.GroupVersion) + for gv, inputDir := range customArgs.GroupVersionPackages() { + p := universe.Package(inputDir) + if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { + newGV := clientgentypes.GroupVersion{ + Group: clientgentypes.Group(override[0]), + Version: gv.Version, + } + changes[gv] = newGV + } + } + + // Modify customArgs.Groups based on the groupName overrides. + newGroups := make([]clientgentypes.GroupVersions, 0, len(customArgs.Groups)) + for _, gvs := range customArgs.Groups { + gv := clientgentypes.GroupVersion{ + Group: gvs.Group, + Version: gvs.Versions[0].Version, // we only need a version, and the first will do + } + if newGV, ok := changes[gv]; ok { + // There's an override, so use it. + newGVS := clientgentypes.GroupVersions{ + PackageName: gvs.PackageName, + Group: newGV.Group, + Versions: gvs.Versions, + } + newGroups = append(newGroups, newGVS) + } else { + // No override. + newGroups = append(newGroups, gvs) + } + } + customArgs.Groups = newGroups +} + +// Packages makes the client package definition. +func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + + customArgs, ok := arguments.CustomArgs.(*clientgenargs.CustomArgs) + if !ok { + klog.Fatalf("cannot convert arguments.CustomArgs to clientgenargs.CustomArgs") + } + includedTypesOverrides := customArgs.IncludedTypesOverrides + + applyGroupOverrides(context.Universe, customArgs) + + gvToTypes := map[clientgentypes.GroupVersion][]*types.Type{} + groupGoNames := make(map[clientgentypes.GroupVersion]string) + for gv, inputDir := range customArgs.GroupVersionPackages() { + p := context.Universe.Package(path.Vendorless(inputDir)) + + // If there's a comment of the form "// +groupGoName=SomeUniqueShortName", use that as + // the Go group identifier in CamelCase. It defaults + groupGoNames[gv] = namer.IC(strings.Split(gv.Group.NonEmpty(), ".")[0]) + if override := types.ExtractCommentTags("+", p.Comments)["groupGoName"]; override != nil { + groupGoNames[gv] = namer.IC(override[0]) + } + + // Package are indexed with the vendor prefix stripped + for n, t := range p.Types { + // filter out types which are not included in user specified overrides. + typesOverride, ok := includedTypesOverrides[gv] + if ok { + found := false + for _, typeStr := range typesOverride { + if typeStr == n { + found = true + break + } + } + if !found { + continue + } + } else { + // User has not specified any override for this group version. + // filter out types which dont have genclient. + if tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)); !tags.GenerateClient { + continue + } + } + if _, found := gvToTypes[gv]; !found { + gvToTypes[gv] = []*types.Type{} + } + gvToTypes[gv] = append(gvToTypes[gv], t) + } + } + + var packageList []generator.Package + clientsetPackage := filepath.Join(arguments.OutputPackagePath, customArgs.ClientsetName) + + packageList = append(packageList, packageForClientset(customArgs, clientsetPackage, groupGoNames, boilerplate)) + packageList = append(packageList, packageForScheme(customArgs, clientsetPackage, arguments.OutputBase, groupGoNames, boilerplate)) + if customArgs.FakeClient { + packageList = append(packageList, fake.PackageForClientset(customArgs, clientsetPackage, groupGoNames, boilerplate)) + } + + // If --clientset-only=true, we don't regenerate the individual typed clients. + if customArgs.ClientsetOnly { + return generator.Packages(packageList) + } + + orderer := namer.Orderer{Namer: namer.NewPrivateNamer(0)} + gvPackages := customArgs.GroupVersionPackages() + for _, group := range customArgs.Groups { + for _, version := range group.Versions { + gv := clientgentypes.GroupVersion{Group: group.Group, Version: version.Version} + types := gvToTypes[gv] + inputPath := gvPackages[gv] + packageList = append(packageList, packageForGroup(gv, orderer.OrderTypes(types), clientsetPackage, group.PackageName, groupGoNames[gv], customArgs.ClientsetAPIPath, arguments.OutputBase, inputPath, boilerplate)) + if customArgs.FakeClient { + packageList = append(packageList, fake.PackageForGroup(gv, orderer.OrderTypes(types), clientsetPackage, group.PackageName, groupGoNames[gv], inputPath, boilerplate)) + } + } + } + + return generator.Packages(packageList) +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go new file mode 100644 index 0000000000..4b3854be6e --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go @@ -0,0 +1,130 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "path/filepath" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/types" + + clientgenargs "k8s.io/code-generator/cmd/client-gen/args" + scheme "k8s.io/code-generator/cmd/client-gen/generators/scheme" + "k8s.io/code-generator/cmd/client-gen/generators/util" + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" +) + +func PackageForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clientsetPackage string, groupPackageName string, groupGoName string, inputPackage string, boilerplate []byte) generator.Package { + outputPackage := filepath.Join(clientsetPackage, "typed", strings.ToLower(groupPackageName), strings.ToLower(gv.Version.NonEmpty()), "fake") + // TODO: should make this a function, called by here and in client-generator.go + realClientPackage := filepath.Join(clientsetPackage, "typed", strings.ToLower(groupPackageName), strings.ToLower(gv.Version.NonEmpty())) + return &generator.DefaultPackage{ + PackageName: "fake", + PackagePath: outputPackage, + HeaderText: boilerplate, + PackageDocumentation: []byte( + `// Package fake has the automatically generated clients. +`), + // GeneratorFunc returns a list of generators. Each generator makes a + // single file. + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = []generator.Generator{ + // Always generate a "doc.go" file. + generator.DefaultGen{OptionalName: "doc"}, + } + // Since we want a file per type that we generate a client for, we + // have to provide a function for this. + for _, t := range typeList { + generators = append(generators, &genFakeForType{ + DefaultGen: generator.DefaultGen{ + OptionalName: "fake_" + strings.ToLower(c.Namers["private"].Name(t)), + }, + outputPackage: outputPackage, + inputPackage: inputPackage, + group: gv.Group.NonEmpty(), + version: gv.Version.String(), + groupGoName: groupGoName, + typeToMatch: t, + imports: generator.NewImportTracker(), + }) + } + + generators = append(generators, &genFakeForGroup{ + DefaultGen: generator.DefaultGen{ + OptionalName: "fake_" + groupPackageName + "_client", + }, + outputPackage: outputPackage, + realClientPackage: realClientPackage, + group: gv.Group.NonEmpty(), + version: gv.Version.String(), + groupGoName: groupGoName, + types: typeList, + imports: generator.NewImportTracker(), + }) + return generators + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + return util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).GenerateClient + }, + } +} + +func PackageForClientset(customArgs *clientgenargs.CustomArgs, clientsetPackage string, groupGoNames map[clientgentypes.GroupVersion]string, boilerplate []byte) generator.Package { + return &generator.DefaultPackage{ + // TODO: we'll generate fake clientset for different release in the future. + // Package name and path are hard coded for now. + PackageName: "fake", + PackagePath: filepath.Join(clientsetPackage, "fake"), + HeaderText: boilerplate, + PackageDocumentation: []byte( + `// This package has the automatically generated fake clientset. +`), + // GeneratorFunc returns a list of generators. Each generator generates a + // single file. + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = []generator.Generator{ + // Always generate a "doc.go" file. + generator.DefaultGen{OptionalName: "doc"}, + + &genClientset{ + DefaultGen: generator.DefaultGen{ + OptionalName: "clientset_generated", + }, + groups: customArgs.Groups, + groupGoNames: groupGoNames, + fakeClientsetPackage: clientsetPackage, + outputPackage: "fake", + imports: generator.NewImportTracker(), + realClientsetPackage: clientsetPackage, + }, + &scheme.GenScheme{ + DefaultGen: generator.DefaultGen{ + OptionalName: "register", + }, + InputPackages: customArgs.GroupVersionPackages(), + OutputPackage: clientsetPackage, + Groups: customArgs.Groups, + GroupGoNames: groupGoNames, + ImportTracker: generator.NewImportTracker(), + PrivateScheme: true, + }, + } + return generators + }, + } +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go new file mode 100644 index 0000000000..d23b8005f3 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go @@ -0,0 +1,167 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "fmt" + "io" + "path/filepath" + "strings" + + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +// genClientset generates a package for a clientset. +type genClientset struct { + generator.DefaultGen + groups []clientgentypes.GroupVersions + groupGoNames map[clientgentypes.GroupVersion]string + fakeClientsetPackage string + outputPackage string + imports namer.ImportTracker + clientsetGenerated bool + // the import path of the generated real clientset. + realClientsetPackage string +} + +var _ generator.Generator = &genClientset{} + +func (g *genClientset) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +// We only want to call GenerateType() once. +func (g *genClientset) Filter(c *generator.Context, t *types.Type) bool { + ret := !g.clientsetGenerated + g.clientsetGenerated = true + return ret +} + +func (g *genClientset) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + for _, group := range g.groups { + for _, version := range group.Versions { + groupClientPackage := filepath.Join(g.fakeClientsetPackage, "typed", strings.ToLower(group.PackageName), strings.ToLower(version.NonEmpty())) + fakeGroupClientPackage := filepath.Join(groupClientPackage, "fake") + + groupAlias := strings.ToLower(g.groupGoNames[clientgentypes.GroupVersion{Group: group.Group, Version: version.Version}]) + imports = append(imports, fmt.Sprintf("%s%s \"%s\"", groupAlias, strings.ToLower(version.NonEmpty()), groupClientPackage)) + imports = append(imports, fmt.Sprintf("fake%s%s \"%s\"", groupAlias, strings.ToLower(version.NonEmpty()), fakeGroupClientPackage)) + } + } + // the package that has the clientset Interface + imports = append(imports, fmt.Sprintf("clientset \"%s\"", g.realClientsetPackage)) + // imports for the code in commonTemplate + imports = append(imports, + "k8s.io/client-go/testing", + "k8s.io/client-go/discovery", + "fakediscovery \"k8s.io/client-go/discovery/fake\"", + "k8s.io/apimachinery/pkg/runtime", + "k8s.io/apimachinery/pkg/watch", + ) + + return +} + +func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + // TODO: We actually don't need any type information to generate the clientset, + // perhaps we can adapt the go2ild framework to this kind of usage. + sw := generator.NewSnippetWriter(w, c, "$", "$") + + allGroups := clientgentypes.ToGroupVersionInfo(g.groups, g.groupGoNames) + + sw.Do(common, nil) + sw.Do(checkImpl, nil) + + for _, group := range allGroups { + m := map[string]interface{}{ + "group": group.Group, + "version": group.Version, + "PackageAlias": group.PackageAlias, + "GroupGoName": group.GroupGoName, + "Version": namer.IC(group.Version.String()), + } + + sw.Do(clientsetInterfaceImplTemplate, m) + } + + return sw.Error() +} + +// This part of code is version-independent, unchanging. +var common = ` +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} +` + +var checkImpl = ` +var _ clientset.Interface = &Clientset{} +` + +var clientsetInterfaceImplTemplate = ` +// $.GroupGoName$$.Version$ retrieves the $.GroupGoName$$.Version$Client +func (c *Clientset) $.GroupGoName$$.Version$() $.PackageAlias$.$.GroupGoName$$.Version$Interface { + return &fake$.PackageAlias$.Fake$.GroupGoName$$.Version${Fake: &c.Fake} +} +` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go new file mode 100644 index 0000000000..8f4d5785ef --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go @@ -0,0 +1,130 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "fmt" + "io" + "path/filepath" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/code-generator/cmd/client-gen/generators/util" +) + +// genFakeForGroup produces a file for a group client, e.g. ExtensionsClient for the extension group. +type genFakeForGroup struct { + generator.DefaultGen + outputPackage string + realClientPackage string + group string + version string + groupGoName string + // types in this group + types []*types.Type + imports namer.ImportTracker + // If the genGroup has been called. This generator should only execute once. + called bool +} + +var _ generator.Generator = &genFakeForGroup{} + +// We only want to call GenerateType() once per group. +func (g *genFakeForGroup) Filter(c *generator.Context, t *types.Type) bool { + if !g.called { + g.called = true + return true + } + return false +} + +func (g *genFakeForGroup) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *genFakeForGroup) Imports(c *generator.Context) (imports []string) { + imports = g.imports.ImportLines() + if len(g.types) != 0 { + imports = append(imports, fmt.Sprintf("%s \"%s\"", strings.ToLower(filepath.Base(g.realClientPackage)), g.realClientPackage)) + } + return imports +} + +func (g *genFakeForGroup) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + + m := map[string]interface{}{ + "GroupGoName": g.groupGoName, + "Version": namer.IC(g.version), + "Fake": c.Universe.Type(types.Name{Package: "k8s.io/client-go/testing", Name: "Fake"}), + "RESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), + "RESTClient": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "RESTClient"}), + } + + sw.Do(groupClientTemplate, m) + for _, t := range g.types { + tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + if err != nil { + return err + } + wrapper := map[string]interface{}{ + "type": t, + "GroupGoName": g.groupGoName, + "Version": namer.IC(g.version), + "realClientPackage": strings.ToLower(filepath.Base(g.realClientPackage)), + } + if tags.NonNamespaced { + sw.Do(getterImplNonNamespaced, wrapper) + continue + } + sw.Do(getterImplNamespaced, wrapper) + } + sw.Do(getRESTClient, m) + return sw.Error() +} + +var groupClientTemplate = ` +type Fake$.GroupGoName$$.Version$ struct { + *$.Fake|raw$ +} +` + +var getterImplNamespaced = ` +func (c *Fake$.GroupGoName$$.Version$) $.type|publicPlural$(namespace string) $.realClientPackage$.$.type|public$Interface { + return &Fake$.type|publicPlural${c, namespace} +} +` + +var getterImplNonNamespaced = ` +func (c *Fake$.GroupGoName$$.Version$) $.type|publicPlural$() $.realClientPackage$.$.type|public$Interface { + return &Fake$.type|publicPlural${c} +} +` + +var getRESTClient = ` +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *Fake$.GroupGoName$$.Version$) RESTClient() $.RESTClientInterface|raw$ { + var ret *$.RESTClient|raw$ + return ret +} +` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go new file mode 100644 index 0000000000..f5888aef15 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go @@ -0,0 +1,479 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "io" + "path/filepath" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/code-generator/cmd/client-gen/generators/util" + "k8s.io/code-generator/cmd/client-gen/path" +) + +// genFakeForType produces a file for each top-level type. +type genFakeForType struct { + generator.DefaultGen + outputPackage string + group string + version string + groupGoName string + inputPackage string + typeToMatch *types.Type + imports namer.ImportTracker +} + +var _ generator.Generator = &genFakeForType{} + +// Filter ignores all but one type because we're making a single file per type. +func (g *genFakeForType) Filter(c *generator.Context, t *types.Type) bool { return t == g.typeToMatch } + +func (g *genFakeForType) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *genFakeForType) Imports(c *generator.Context) (imports []string) { + return g.imports.ImportLines() +} + +// Ideally, we'd like genStatus to return true if there is a subresource path +// registered for "status" in the API server, but we do not have that +// information, so genStatus returns true if the type has a status field. +func genStatus(t *types.Type) bool { + // Default to true if we have a Status member + hasStatus := false + for _, m := range t.Members { + if m.Name == "Status" { + hasStatus = true + break + } + } + + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + return hasStatus && !tags.NoStatus +} + +// hasObjectMeta returns true if the type has a ObjectMeta field. +func hasObjectMeta(t *types.Type) bool { + for _, m := range t.Members { + if m.Embedded == true && m.Name == "ObjectMeta" { + return true + } + } + return false +} + +// GenerateType makes the body of a file implementing the individual typed client for type t. +func (g *genFakeForType) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + pkg := filepath.Base(t.Name.Package) + tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + if err != nil { + return err + } + canonicalGroup := g.group + if canonicalGroup == "core" { + canonicalGroup = "" + } + + groupName := g.group + if g.group == "core" { + groupName = "" + } + + // allow user to define a group name that's different from the one parsed from the directory. + p := c.Universe.Package(path.Vendorless(g.inputPackage)) + if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { + groupName = override[0] + } + + const pkgClientGoTesting = "k8s.io/client-go/testing" + m := map[string]interface{}{ + "type": t, + "inputType": t, + "resultType": t, + "subresourcePath": "", + "package": pkg, + "Package": namer.IC(pkg), + "namespaced": !tags.NonNamespaced, + "Group": namer.IC(g.group), + "GroupGoName": g.groupGoName, + "Version": namer.IC(g.version), + "group": canonicalGroup, + "groupName": groupName, + "version": g.version, + "DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}), + "ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}), + "GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}), + "Everything": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/labels", Name: "Everything"}), + "GroupVersionResource": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersionResource"}), + "GroupVersionKind": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersionKind"}), + "PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}), + "watchInterface": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/watch", Name: "Interface"}), + + "NewRootListAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootListAction"}), + "NewListAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewListAction"}), + "NewRootGetAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootGetAction"}), + "NewGetAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewGetAction"}), + "NewRootDeleteAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootDeleteAction"}), + "NewDeleteAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewDeleteAction"}), + "NewRootDeleteCollectionAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootDeleteCollectionAction"}), + "NewDeleteCollectionAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewDeleteCollectionAction"}), + "NewRootUpdateAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootUpdateAction"}), + "NewUpdateAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewUpdateAction"}), + "NewRootCreateAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootCreateAction"}), + "NewCreateAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewCreateAction"}), + "NewRootWatchAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootWatchAction"}), + "NewWatchAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewWatchAction"}), + "NewCreateSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewCreateSubresourceAction"}), + "NewRootCreateSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootCreateSubresourceAction"}), + "NewUpdateSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewUpdateSubresourceAction"}), + "NewGetSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewGetSubresourceAction"}), + "NewRootGetSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootGetSubresourceAction"}), + "NewRootUpdateSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootUpdateSubresourceAction"}), + "NewRootPatchAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootPatchAction"}), + "NewPatchAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewPatchAction"}), + "NewRootPatchSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootPatchSubresourceAction"}), + "NewPatchSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewPatchSubresourceAction"}), + "ExtractFromListOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "ExtractFromListOptions"}), + } + + if tags.NonNamespaced { + sw.Do(structNonNamespaced, m) + } else { + sw.Do(structNamespaced, m) + } + + if tags.NoVerbs { + return sw.Error() + } + sw.Do(resource, m) + sw.Do(kind, m) + + if tags.HasVerb("get") { + sw.Do(getTemplate, m) + } + if tags.HasVerb("list") { + if hasObjectMeta(t) { + sw.Do(listUsingOptionsTemplate, m) + } else { + sw.Do(listTemplate, m) + } + } + if tags.HasVerb("watch") { + sw.Do(watchTemplate, m) + } + + if tags.HasVerb("create") { + sw.Do(createTemplate, m) + } + if tags.HasVerb("update") { + sw.Do(updateTemplate, m) + } + if tags.HasVerb("updateStatus") && genStatus(t) { + sw.Do(updateStatusTemplate, m) + } + if tags.HasVerb("delete") { + sw.Do(deleteTemplate, m) + } + if tags.HasVerb("deleteCollection") { + sw.Do(deleteCollectionTemplate, m) + } + if tags.HasVerb("patch") { + sw.Do(patchTemplate, m) + } + + // generate extended client methods + for _, e := range tags.Extensions { + inputType := *t + resultType := *t + if len(e.InputTypeOverride) > 0 { + if name, pkg := e.Input(); len(pkg) > 0 { + newType := c.Universe.Type(types.Name{Package: pkg, Name: name}) + inputType = *newType + } else { + inputType.Name.Name = e.InputTypeOverride + } + } + if len(e.ResultTypeOverride) > 0 { + if name, pkg := e.Result(); len(pkg) > 0 { + newType := c.Universe.Type(types.Name{Package: pkg, Name: name}) + resultType = *newType + } else { + resultType.Name.Name = e.ResultTypeOverride + } + } + m["inputType"] = &inputType + m["resultType"] = &resultType + m["subresourcePath"] = e.SubResourcePath + + if e.HasVerb("get") { + if e.IsSubresource() { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, getSubresourceTemplate), m) + } else { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, getTemplate), m) + } + } + + if e.HasVerb("list") { + + sw.Do(adjustTemplate(e.VerbName, e.VerbType, listTemplate), m) + } + + // TODO: Figure out schemantic for watching a sub-resource. + if e.HasVerb("watch") { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, watchTemplate), m) + } + + if e.HasVerb("create") { + if e.IsSubresource() { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, createSubresourceTemplate), m) + } else { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, createTemplate), m) + } + } + + if e.HasVerb("update") { + if e.IsSubresource() { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, updateSubresourceTemplate), m) + } else { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, updateTemplate), m) + } + } + + // TODO: Figure out schemantic for deleting a sub-resource (what arguments + // are passed, does it need two names? etc. + if e.HasVerb("delete") { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, deleteTemplate), m) + } + + if e.HasVerb("patch") { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, patchTemplate), m) + } + } + + return sw.Error() +} + +// adjustTemplate adjust the origin verb template using the expansion name. +// TODO: Make the verbs in templates parametrized so the strings.Replace() is +// not needed. +func adjustTemplate(name, verbType, template string) string { + return strings.Replace(template, " "+strings.Title(verbType), " "+name, -1) +} + +// template for the struct that implements the type's interface +var structNamespaced = ` +// Fake$.type|publicPlural$ implements $.type|public$Interface +type Fake$.type|publicPlural$ struct { + Fake *Fake$.GroupGoName$$.Version$ + ns string +} +` + +// template for the struct that implements the type's interface +var structNonNamespaced = ` +// Fake$.type|publicPlural$ implements $.type|public$Interface +type Fake$.type|publicPlural$ struct { + Fake *Fake$.GroupGoName$$.Version$ +} +` + +var resource = ` +var $.type|allLowercasePlural$Resource = $.GroupVersionResource|raw${Group: "$.groupName$", Version: "$.version$", Resource: "$.type|resource$"} +` + +var kind = ` +var $.type|allLowercasePlural$Kind = $.GroupVersionKind|raw${Group: "$.groupName$", Version: "$.version$", Kind: "$.type|singularKind$"} +` + +var listTemplate = ` +// List takes label and field selectors, and returns the list of $.type|publicPlural$ that match those selectors. +func (c *Fake$.type|publicPlural$) List(opts $.ListOptions|raw$) (result *$.type|raw$List, err error) { + obj, err := c.Fake. + $if .namespaced$Invokes($.NewListAction|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, c.ns, opts), &$.type|raw$List{}) + $else$Invokes($.NewRootListAction|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, opts), &$.type|raw$List{})$end$ + if obj == nil { + return nil, err + } + return obj.(*$.type|raw$List), err +} +` + +var listUsingOptionsTemplate = ` +// List takes label and field selectors, and returns the list of $.type|publicPlural$ that match those selectors. +func (c *Fake$.type|publicPlural$) List(opts $.ListOptions|raw$) (result *$.type|raw$List, err error) { + obj, err := c.Fake. + $if .namespaced$Invokes($.NewListAction|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, c.ns, opts), &$.type|raw$List{}) + $else$Invokes($.NewRootListAction|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, opts), &$.type|raw$List{})$end$ + if obj == nil { + return nil, err + } + + label, _, _ := $.ExtractFromListOptions|raw$(opts) + if label == nil { + label = $.Everything|raw$() + } + list := &$.type|raw$List{ListMeta: obj.(*$.type|raw$List).ListMeta} + for _, item := range obj.(*$.type|raw$List).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} +` + +var getTemplate = ` +// Get takes name of the $.type|private$, and returns the corresponding $.resultType|private$ object, and an error if there is any. +func (c *Fake$.type|publicPlural$) Get(name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { + obj, err := c.Fake. + $if .namespaced$Invokes($.NewGetAction|raw$($.type|allLowercasePlural$Resource, c.ns, name), &$.resultType|raw${}) + $else$Invokes($.NewRootGetAction|raw$($.type|allLowercasePlural$Resource, name), &$.resultType|raw${})$end$ + if obj == nil { + return nil, err + } + return obj.(*$.resultType|raw$), err +} +` + +var getSubresourceTemplate = ` +// Get takes name of the $.type|private$, and returns the corresponding $.resultType|private$ object, and an error if there is any. +func (c *Fake$.type|publicPlural$) Get($.type|private$Name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { + obj, err := c.Fake. + $if .namespaced$Invokes($.NewGetSubresourceAction|raw$($.type|allLowercasePlural$Resource, c.ns, "$.subresourcePath$", $.type|private$Name), &$.resultType|raw${}) + $else$Invokes($.NewRootGetSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", $.type|private$Name), &$.resultType|raw${})$end$ + if obj == nil { + return nil, err + } + return obj.(*$.resultType|raw$), err +} +` + +var deleteTemplate = ` +// Delete takes name of the $.type|private$ and deletes it. Returns an error if one occurs. +func (c *Fake$.type|publicPlural$) Delete(name string, options *$.DeleteOptions|raw$) error { + _, err := c.Fake. + $if .namespaced$Invokes($.NewDeleteAction|raw$($.type|allLowercasePlural$Resource, c.ns, name), &$.type|raw${}) + $else$Invokes($.NewRootDeleteAction|raw$($.type|allLowercasePlural$Resource, name), &$.type|raw${})$end$ + return err +} +` + +var deleteCollectionTemplate = ` +// DeleteCollection deletes a collection of objects. +func (c *Fake$.type|publicPlural$) DeleteCollection(options *$.DeleteOptions|raw$, listOptions $.ListOptions|raw$) error { + $if .namespaced$action := $.NewDeleteCollectionAction|raw$($.type|allLowercasePlural$Resource, c.ns, listOptions) + $else$action := $.NewRootDeleteCollectionAction|raw$($.type|allLowercasePlural$Resource, listOptions) + $end$ + _, err := c.Fake.Invokes(action, &$.type|raw$List{}) + return err +} +` +var createTemplate = ` +// Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. +func (c *Fake$.type|publicPlural$) Create($.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { + obj, err := c.Fake. + $if .namespaced$Invokes($.NewCreateAction|raw$($.inputType|allLowercasePlural$Resource, c.ns, $.inputType|private$), &$.resultType|raw${}) + $else$Invokes($.NewRootCreateAction|raw$($.inputType|allLowercasePlural$Resource, $.inputType|private$), &$.resultType|raw${})$end$ + if obj == nil { + return nil, err + } + return obj.(*$.resultType|raw$), err +} +` + +var createSubresourceTemplate = ` +// Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. +func (c *Fake$.type|publicPlural$) Create($.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { + obj, err := c.Fake. + $if .namespaced$Invokes($.NewCreateSubresourceAction|raw$($.type|allLowercasePlural$Resource, $.type|private$Name, "$.subresourcePath$", c.ns, $.inputType|private$), &$.resultType|raw${}) + $else$Invokes($.NewRootCreateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", $.inputType|private$), &$.resultType|raw${})$end$ + if obj == nil { + return nil, err + } + return obj.(*$.resultType|raw$), err +} +` + +var updateTemplate = ` +// Update takes the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. +func (c *Fake$.type|publicPlural$) Update($.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { + obj, err := c.Fake. + $if .namespaced$Invokes($.NewUpdateAction|raw$($.inputType|allLowercasePlural$Resource, c.ns, $.inputType|private$), &$.resultType|raw${}) + $else$Invokes($.NewRootUpdateAction|raw$($.inputType|allLowercasePlural$Resource, $.inputType|private$), &$.resultType|raw${})$end$ + if obj == nil { + return nil, err + } + return obj.(*$.resultType|raw$), err +} +` + +var updateSubresourceTemplate = ` +// Update takes the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. +func (c *Fake$.type|publicPlural$) Update($.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { + obj, err := c.Fake. + $if .namespaced$Invokes($.NewUpdateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", c.ns, $.inputType|private$), &$.inputType|raw${}) + $else$Invokes($.NewRootUpdateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", $.inputType|private$), &$.resultType|raw${})$end$ + if obj == nil { + return nil, err + } + return obj.(*$.resultType|raw$), err +} +` + +var updateStatusTemplate = ` +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *Fake$.type|publicPlural$) UpdateStatus($.type|private$ *$.type|raw$) (*$.type|raw$, error) { + obj, err := c.Fake. + $if .namespaced$Invokes($.NewUpdateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "status", c.ns, $.type|private$), &$.type|raw${}) + $else$Invokes($.NewRootUpdateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "status", $.type|private$), &$.type|raw${})$end$ + if obj == nil { + return nil, err + } + return obj.(*$.type|raw$), err +} +` + +var watchTemplate = ` +// Watch returns a $.watchInterface|raw$ that watches the requested $.type|privatePlural$. +func (c *Fake$.type|publicPlural$) Watch(opts $.ListOptions|raw$) ($.watchInterface|raw$, error) { + return c.Fake. + $if .namespaced$InvokesWatch($.NewWatchAction|raw$($.type|allLowercasePlural$Resource, c.ns, opts)) + $else$InvokesWatch($.NewRootWatchAction|raw$($.type|allLowercasePlural$Resource, opts))$end$ +} +` + +var patchTemplate = ` +// Patch applies the patch and returns the patched $.resultType|private$. +func (c *Fake$.type|publicPlural$) Patch(name string, pt $.PatchType|raw$, data []byte, subresources ...string) (result *$.resultType|raw$, err error) { + obj, err := c.Fake. + $if .namespaced$Invokes($.NewPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, c.ns, name, pt, data, subresources... ), &$.resultType|raw${}) + $else$Invokes($.NewRootPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, name, pt, data, subresources...), &$.resultType|raw${})$end$ + if obj == nil { + return nil, err + } + return obj.(*$.resultType|raw$), err +} +` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go new file mode 100644 index 0000000000..f7254343bd --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go @@ -0,0 +1,183 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "io" + "path/filepath" + "strings" + + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +// genClientset generates a package for a clientset. +type genClientset struct { + generator.DefaultGen + groups []clientgentypes.GroupVersions + groupGoNames map[clientgentypes.GroupVersion]string + clientsetPackage string + outputPackage string + imports namer.ImportTracker + clientsetGenerated bool +} + +var _ generator.Generator = &genClientset{} + +func (g *genClientset) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +// We only want to call GenerateType() once. +func (g *genClientset) Filter(c *generator.Context, t *types.Type) bool { + ret := !g.clientsetGenerated + g.clientsetGenerated = true + return ret +} + +func (g *genClientset) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + for _, group := range g.groups { + for _, version := range group.Versions { + typedClientPath := filepath.Join(g.clientsetPackage, "typed", strings.ToLower(group.PackageName), strings.ToLower(version.NonEmpty())) + groupAlias := strings.ToLower(g.groupGoNames[clientgentypes.GroupVersion{Group: group.Group, Version: version.Version}]) + imports = append(imports, fmt.Sprintf("%s%s \"%s\"", groupAlias, strings.ToLower(version.NonEmpty()), typedClientPath)) + } + } + return +} + +func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + // TODO: We actually don't need any type information to generate the clientset, + // perhaps we can adapt the go2ild framework to this kind of usage. + sw := generator.NewSnippetWriter(w, c, "$", "$") + + allGroups := clientgentypes.ToGroupVersionInfo(g.groups, g.groupGoNames) + m := map[string]interface{}{ + "allGroups": allGroups, + "Config": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Config"}), + "DefaultKubernetesUserAgent": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "DefaultKubernetesUserAgent"}), + "RESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), + "DiscoveryInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/discovery", Name: "DiscoveryInterface"}), + "DiscoveryClient": c.Universe.Type(types.Name{Package: "k8s.io/client-go/discovery", Name: "DiscoveryClient"}), + "NewDiscoveryClientForConfig": c.Universe.Function(types.Name{Package: "k8s.io/client-go/discovery", Name: "NewDiscoveryClientForConfig"}), + "NewDiscoveryClientForConfigOrDie": c.Universe.Function(types.Name{Package: "k8s.io/client-go/discovery", Name: "NewDiscoveryClientForConfigOrDie"}), + "NewDiscoveryClient": c.Universe.Function(types.Name{Package: "k8s.io/client-go/discovery", Name: "NewDiscoveryClient"}), + "flowcontrolNewTokenBucketRateLimiter": c.Universe.Function(types.Name{Package: "k8s.io/client-go/util/flowcontrol", Name: "NewTokenBucketRateLimiter"}), + } + sw.Do(clientsetInterface, m) + sw.Do(clientsetTemplate, m) + for _, g := range allGroups { + sw.Do(clientsetInterfaceImplTemplate, g) + } + sw.Do(getDiscoveryTemplate, m) + sw.Do(newClientsetForConfigTemplate, m) + sw.Do(newClientsetForConfigOrDieTemplate, m) + sw.Do(newClientsetForRESTClientTemplate, m) + + return sw.Error() +} + +var clientsetInterface = ` +type Interface interface { + Discovery() $.DiscoveryInterface|raw$ + $range .allGroups$$.GroupGoName$$.Version$() $.PackageAlias$.$.GroupGoName$$.Version$Interface + $end$ +} +` + +var clientsetTemplate = ` +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *$.DiscoveryClient|raw$ + $range .allGroups$$.LowerCaseGroupGoName$$.Version$ *$.PackageAlias$.$.GroupGoName$$.Version$Client + $end$ +} +` + +var clientsetInterfaceImplTemplate = ` +// $.GroupGoName$$.Version$ retrieves the $.GroupGoName$$.Version$Client +func (c *Clientset) $.GroupGoName$$.Version$() $.PackageAlias$.$.GroupGoName$$.Version$Interface { + return c.$.LowerCaseGroupGoName$$.Version$ +} +` + +var getDiscoveryTemplate = ` +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() $.DiscoveryInterface|raw$ { + if c == nil { + return nil + } + return c.DiscoveryClient +} +` + +var newClientsetForConfigTemplate = ` +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +func NewForConfig(c *$.Config|raw$) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("Burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = $.flowcontrolNewTokenBucketRateLimiter|raw$(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error +$range .allGroups$ cs.$.LowerCaseGroupGoName$$.Version$, err =$.PackageAlias$.NewForConfig(&configShallowCopy) + if err!=nil { + return nil, err + } +$end$ + cs.DiscoveryClient, err = $.NewDiscoveryClientForConfig|raw$(&configShallowCopy) + if err!=nil { + return nil, err + } + return &cs, nil +} +` + +var newClientsetForConfigOrDieTemplate = ` +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *$.Config|raw$) *Clientset { + var cs Clientset +$range .allGroups$ cs.$.LowerCaseGroupGoName$$.Version$ =$.PackageAlias$.NewForConfigOrDie(c) +$end$ + cs.DiscoveryClient = $.NewDiscoveryClientForConfigOrDie|raw$(c) + return &cs +} +` + +var newClientsetForRESTClientTemplate = ` +// New creates a new Clientset for the given RESTClient. +func New(c $.RESTClientInterface|raw$) *Clientset { + var cs Clientset +$range .allGroups$ cs.$.LowerCaseGroupGoName$$.Version$ =$.PackageAlias$.New(c) +$end$ + cs.DiscoveryClient = $.NewDiscoveryClient|raw$(c) + return &cs +} +` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_expansion.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_expansion.go new file mode 100644 index 0000000000..f47c079e02 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_expansion.go @@ -0,0 +1,54 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + "os" + "path/filepath" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/types" +) + +// genExpansion produces a file for a group client, e.g. ExtensionsClient for the extension group. +type genExpansion struct { + generator.DefaultGen + groupPackagePath string + // types in a group + types []*types.Type +} + +// We only want to call GenerateType() once per group. +func (g *genExpansion) Filter(c *generator.Context, t *types.Type) bool { + return len(g.types) == 0 || t == g.types[0] +} + +func (g *genExpansion) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + for _, t := range g.types { + if _, err := os.Stat(filepath.Join(g.groupPackagePath, strings.ToLower(t.Name.Name+"_expansion.go"))); os.IsNotExist(err) { + sw.Do(expansionInterfaceTemplate, t) + } + } + return sw.Error() +} + +var expansionInterfaceTemplate = ` +type $.|public$Expansion interface {} +` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go new file mode 100644 index 0000000000..215a0171ca --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go @@ -0,0 +1,246 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + "path/filepath" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/code-generator/cmd/client-gen/generators/util" + "k8s.io/code-generator/cmd/client-gen/path" +) + +// genGroup produces a file for a group client, e.g. ExtensionsClient for the extension group. +type genGroup struct { + generator.DefaultGen + outputPackage string + group string + version string + groupGoName string + apiPath string + // types in this group + types []*types.Type + imports namer.ImportTracker + inputPackage string + clientsetPackage string + // If the genGroup has been called. This generator should only execute once. + called bool +} + +var _ generator.Generator = &genGroup{} + +// We only want to call GenerateType() once per group. +func (g *genGroup) Filter(c *generator.Context, t *types.Type) bool { + if !g.called { + g.called = true + return true + } + return false +} + +func (g *genGroup) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *genGroup) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + imports = append(imports, filepath.Join(g.clientsetPackage, "scheme")) + return +} + +func (g *genGroup) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + + apiPath := func(group string) string { + if group == "core" { + return `"/api"` + } + return `"` + g.apiPath + `"` + } + + groupName := g.group + if g.group == "core" { + groupName = "" + } + // allow user to define a group name that's different from the one parsed from the directory. + p := c.Universe.Package(path.Vendorless(g.inputPackage)) + if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { + groupName = override[0] + } + + m := map[string]interface{}{ + "group": g.group, + "version": g.version, + "groupName": groupName, + "GroupGoName": g.groupGoName, + "Version": namer.IC(g.version), + "types": g.types, + "apiPath": apiPath(g.group), + "schemaGroupVersion": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersion"}), + "runtimeAPIVersionInternal": c.Universe.Variable(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "APIVersionInternal"}), + "restConfig": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Config"}), + "restDefaultKubernetesUserAgent": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "DefaultKubernetesUserAgent"}), + "restRESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), + "restRESTClientFor": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "RESTClientFor"}), + "SchemeGroupVersion": c.Universe.Variable(types.Name{Package: path.Vendorless(g.inputPackage), Name: "SchemeGroupVersion"}), + } + sw.Do(groupInterfaceTemplate, m) + sw.Do(groupClientTemplate, m) + for _, t := range g.types { + tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + if err != nil { + return err + } + wrapper := map[string]interface{}{ + "type": t, + "GroupGoName": g.groupGoName, + "Version": namer.IC(g.version), + } + if tags.NonNamespaced { + sw.Do(getterImplNonNamespaced, wrapper) + } else { + sw.Do(getterImplNamespaced, wrapper) + } + } + sw.Do(newClientForConfigTemplate, m) + sw.Do(newClientForConfigOrDieTemplate, m) + sw.Do(newClientForRESTClientTemplate, m) + if g.version == "" { + sw.Do(setInternalVersionClientDefaultsTemplate, m) + } else { + sw.Do(setClientDefaultsTemplate, m) + } + sw.Do(getRESTClient, m) + + return sw.Error() +} + +var groupInterfaceTemplate = ` +type $.GroupGoName$$.Version$Interface interface { + RESTClient() $.restRESTClientInterface|raw$ + $range .types$ $.|publicPlural$Getter + $end$ +} +` + +var groupClientTemplate = ` +// $.GroupGoName$$.Version$Client is used to interact with features provided by the $.groupName$ group. +type $.GroupGoName$$.Version$Client struct { + restClient $.restRESTClientInterface|raw$ +} +` + +var getterImplNamespaced = ` +func (c *$.GroupGoName$$.Version$Client) $.type|publicPlural$(namespace string) $.type|public$Interface { + return new$.type|publicPlural$(c, namespace) +} +` + +var getterImplNonNamespaced = ` +func (c *$.GroupGoName$$.Version$Client) $.type|publicPlural$() $.type|public$Interface { + return new$.type|publicPlural$(c) +} +` + +var newClientForConfigTemplate = ` +// NewForConfig creates a new $.GroupGoName$$.Version$Client for the given config. +func NewForConfig(c *$.restConfig|raw$) (*$.GroupGoName$$.Version$Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := $.restRESTClientFor|raw$(&config) + if err != nil { + return nil, err + } + return &$.GroupGoName$$.Version$Client{client}, nil +} +` + +var newClientForConfigOrDieTemplate = ` +// NewForConfigOrDie creates a new $.GroupGoName$$.Version$Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *$.restConfig|raw$) *$.GroupGoName$$.Version$Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} +` + +var getRESTClient = ` +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *$.GroupGoName$$.Version$Client) RESTClient() $.restRESTClientInterface|raw$ { + if c == nil { + return nil + } + return c.restClient +} +` + +var newClientForRESTClientTemplate = ` +// New creates a new $.GroupGoName$$.Version$Client for the given RESTClient. +func New(c $.restRESTClientInterface|raw$) *$.GroupGoName$$.Version$Client { + return &$.GroupGoName$$.Version$Client{c} +} +` + +var setInternalVersionClientDefaultsTemplate = ` +func setConfigDefaults(config *$.restConfig|raw$) error { + config.APIPath = $.apiPath$ + if config.UserAgent == "" { + config.UserAgent = $.restDefaultKubernetesUserAgent|raw$() + } + if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("$.groupName$")[0].Group { + gv := scheme.Scheme.PrioritizedVersionsForGroup("$.groupName$")[0] + config.GroupVersion = &gv + } + config.NegotiatedSerializer = scheme.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + + return nil +} +` + +var setClientDefaultsTemplate = ` +func setConfigDefaults(config *$.restConfig|raw$) error { + gv := $.SchemeGroupVersion|raw$ + config.GroupVersion = &gv + config.APIPath = $.apiPath$ + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = $.restDefaultKubernetesUserAgent|raw$() + } + + return nil +} +` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go new file mode 100644 index 0000000000..3e8fc7c4c6 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go @@ -0,0 +1,599 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + "path/filepath" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/code-generator/cmd/client-gen/generators/util" +) + +// genClientForType produces a file for each top-level type. +type genClientForType struct { + generator.DefaultGen + outputPackage string + clientsetPackage string + group string + version string + groupGoName string + typeToMatch *types.Type + imports namer.ImportTracker +} + +var _ generator.Generator = &genClientForType{} + +// Filter ignores all but one type because we're making a single file per type. +func (g *genClientForType) Filter(c *generator.Context, t *types.Type) bool { return t == g.typeToMatch } + +func (g *genClientForType) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *genClientForType) Imports(c *generator.Context) (imports []string) { + return g.imports.ImportLines() +} + +// Ideally, we'd like genStatus to return true if there is a subresource path +// registered for "status" in the API server, but we do not have that +// information, so genStatus returns true if the type has a status field. +func genStatus(t *types.Type) bool { + // Default to true if we have a Status member + hasStatus := false + for _, m := range t.Members { + if m.Name == "Status" { + hasStatus = true + break + } + } + return hasStatus && !util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).NoStatus +} + +// GenerateType makes the body of a file implementing the individual typed client for type t. +func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + pkg := filepath.Base(t.Name.Package) + tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + if err != nil { + return err + } + type extendedInterfaceMethod struct { + template string + args map[string]interface{} + } + extendedMethods := []extendedInterfaceMethod{} + for _, e := range tags.Extensions { + inputType := *t + resultType := *t + // TODO: Extract this to some helper method as this code is copied into + // 2 other places. + if len(e.InputTypeOverride) > 0 { + if name, pkg := e.Input(); len(pkg) > 0 { + newType := c.Universe.Type(types.Name{Package: pkg, Name: name}) + inputType = *newType + } else { + inputType.Name.Name = e.InputTypeOverride + } + } + if len(e.ResultTypeOverride) > 0 { + if name, pkg := e.Result(); len(pkg) > 0 { + newType := c.Universe.Type(types.Name{Package: pkg, Name: name}) + resultType = *newType + } else { + resultType.Name.Name = e.ResultTypeOverride + } + } + var updatedVerbtemplate string + if _, exists := subresourceDefaultVerbTemplates[e.VerbType]; e.IsSubresource() && exists { + updatedVerbtemplate = e.VerbName + "(" + strings.TrimPrefix(subresourceDefaultVerbTemplates[e.VerbType], strings.Title(e.VerbType)+"(") + } else { + updatedVerbtemplate = e.VerbName + "(" + strings.TrimPrefix(defaultVerbTemplates[e.VerbType], strings.Title(e.VerbType)+"(") + } + extendedMethods = append(extendedMethods, extendedInterfaceMethod{ + template: updatedVerbtemplate, + args: map[string]interface{}{ + "type": t, + "inputType": &inputType, + "resultType": &resultType, + "DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}), + "ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}), + "GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}), + "PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}), + }, + }) + } + m := map[string]interface{}{ + "type": t, + "inputType": t, + "resultType": t, + "package": pkg, + "Package": namer.IC(pkg), + "namespaced": !tags.NonNamespaced, + "Group": namer.IC(g.group), + "subresource": false, + "subresourcePath": "", + "GroupGoName": g.groupGoName, + "Version": namer.IC(g.version), + "DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}), + "ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}), + "GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}), + "PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}), + "watchInterface": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/watch", Name: "Interface"}), + "RESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), + "schemeParameterCodec": c.Universe.Variable(types.Name{Package: filepath.Join(g.clientsetPackage, "scheme"), Name: "ParameterCodec"}), + } + + sw.Do(getterComment, m) + if tags.NonNamespaced { + sw.Do(getterNonNamespaced, m) + } else { + sw.Do(getterNamespaced, m) + } + + sw.Do(interfaceTemplate1, m) + if !tags.NoVerbs { + if !genStatus(t) { + tags.SkipVerbs = append(tags.SkipVerbs, "updateStatus") + } + interfaceSuffix := "" + if len(extendedMethods) > 0 { + interfaceSuffix = "\n" + } + sw.Do("\n"+generateInterface(tags)+interfaceSuffix, m) + // add extended verbs into interface + for _, v := range extendedMethods { + sw.Do(v.template+interfaceSuffix, v.args) + } + + } + sw.Do(interfaceTemplate4, m) + + if tags.NonNamespaced { + sw.Do(structNonNamespaced, m) + sw.Do(newStructNonNamespaced, m) + } else { + sw.Do(structNamespaced, m) + sw.Do(newStructNamespaced, m) + } + + if tags.NoVerbs { + return sw.Error() + } + + if tags.HasVerb("get") { + sw.Do(getTemplate, m) + } + if tags.HasVerb("list") { + sw.Do(listTemplate, m) + } + if tags.HasVerb("watch") { + sw.Do(watchTemplate, m) + } + + if tags.HasVerb("create") { + sw.Do(createTemplate, m) + } + if tags.HasVerb("update") { + sw.Do(updateTemplate, m) + } + if tags.HasVerb("updateStatus") { + sw.Do(updateStatusTemplate, m) + } + if tags.HasVerb("delete") { + sw.Do(deleteTemplate, m) + } + if tags.HasVerb("deleteCollection") { + sw.Do(deleteCollectionTemplate, m) + } + if tags.HasVerb("patch") { + sw.Do(patchTemplate, m) + } + + // generate expansion methods + for _, e := range tags.Extensions { + inputType := *t + resultType := *t + if len(e.InputTypeOverride) > 0 { + if name, pkg := e.Input(); len(pkg) > 0 { + newType := c.Universe.Type(types.Name{Package: pkg, Name: name}) + inputType = *newType + } else { + inputType.Name.Name = e.InputTypeOverride + } + } + if len(e.ResultTypeOverride) > 0 { + if name, pkg := e.Result(); len(pkg) > 0 { + newType := c.Universe.Type(types.Name{Package: pkg, Name: name}) + resultType = *newType + } else { + resultType.Name.Name = e.ResultTypeOverride + } + } + m["inputType"] = &inputType + m["resultType"] = &resultType + m["subresourcePath"] = e.SubResourcePath + + if e.HasVerb("get") { + if e.IsSubresource() { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, getSubresourceTemplate), m) + } else { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, getTemplate), m) + } + } + + if e.HasVerb("list") { + if e.IsSubresource() { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, listSubresourceTemplate), m) + } else { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, listTemplate), m) + } + } + + // TODO: Figure out schemantic for watching a sub-resource. + if e.HasVerb("watch") { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, watchTemplate), m) + } + + if e.HasVerb("create") { + if e.IsSubresource() { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, createSubresourceTemplate), m) + } else { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, createTemplate), m) + } + } + + if e.HasVerb("update") { + if e.IsSubresource() { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, updateSubresourceTemplate), m) + } else { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, updateTemplate), m) + } + } + + // TODO: Figure out schemantic for deleting a sub-resource (what arguments + // are passed, does it need two names? etc. + if e.HasVerb("delete") { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, deleteTemplate), m) + } + + if e.HasVerb("patch") { + sw.Do(adjustTemplate(e.VerbName, e.VerbType, patchTemplate), m) + } + } + + return sw.Error() +} + +// adjustTemplate adjust the origin verb template using the expansion name. +// TODO: Make the verbs in templates parametrized so the strings.Replace() is +// not needed. +func adjustTemplate(name, verbType, template string) string { + return strings.Replace(template, " "+strings.Title(verbType), " "+name, -1) +} + +func generateInterface(tags util.Tags) string { + // need an ordered list here to guarantee order of generated methods. + out := []string{} + for _, m := range util.SupportedVerbs { + if tags.HasVerb(m) { + out = append(out, defaultVerbTemplates[m]) + } + } + return strings.Join(out, "\n") +} + +var subresourceDefaultVerbTemplates = map[string]string{ + "create": `Create($.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (*$.resultType|raw$, error)`, + "list": `List($.type|private$Name string, opts $.ListOptions|raw$) (*$.resultType|raw$List, error)`, + "update": `Update($.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (*$.resultType|raw$, error)`, + "get": `Get($.type|private$Name string, options $.GetOptions|raw$) (*$.resultType|raw$, error)`, +} + +var defaultVerbTemplates = map[string]string{ + "create": `Create(*$.inputType|raw$) (*$.resultType|raw$, error)`, + "update": `Update(*$.inputType|raw$) (*$.resultType|raw$, error)`, + "updateStatus": `UpdateStatus(*$.type|raw$) (*$.type|raw$, error)`, + "delete": `Delete(name string, options *$.DeleteOptions|raw$) error`, + "deleteCollection": `DeleteCollection(options *$.DeleteOptions|raw$, listOptions $.ListOptions|raw$) error`, + "get": `Get(name string, options $.GetOptions|raw$) (*$.resultType|raw$, error)`, + "list": `List(opts $.ListOptions|raw$) (*$.resultType|raw$List, error)`, + "watch": `Watch(opts $.ListOptions|raw$) ($.watchInterface|raw$, error)`, + "patch": `Patch(name string, pt $.PatchType|raw$, data []byte, subresources ...string) (result *$.resultType|raw$, err error)`, +} + +// group client will implement this interface. +var getterComment = ` +// $.type|publicPlural$Getter has a method to return a $.type|public$Interface. +// A group's client should implement this interface.` + +var getterNamespaced = ` +type $.type|publicPlural$Getter interface { + $.type|publicPlural$(namespace string) $.type|public$Interface +} +` + +var getterNonNamespaced = ` +type $.type|publicPlural$Getter interface { + $.type|publicPlural$() $.type|public$Interface +} +` + +// this type's interface, typed client will implement this interface. +var interfaceTemplate1 = ` +// $.type|public$Interface has methods to work with $.type|public$ resources. +type $.type|public$Interface interface {` + +var interfaceTemplate4 = ` + $.type|public$Expansion +} +` + +// template for the struct that implements the type's interface +var structNamespaced = ` +// $.type|privatePlural$ implements $.type|public$Interface +type $.type|privatePlural$ struct { + client $.RESTClientInterface|raw$ + ns string +} +` + +// template for the struct that implements the type's interface +var structNonNamespaced = ` +// $.type|privatePlural$ implements $.type|public$Interface +type $.type|privatePlural$ struct { + client $.RESTClientInterface|raw$ +} +` + +var newStructNamespaced = ` +// new$.type|publicPlural$ returns a $.type|publicPlural$ +func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client, namespace string) *$.type|privatePlural$ { + return &$.type|privatePlural${ + client: c.RESTClient(), + ns: namespace, + } +} +` + +var newStructNonNamespaced = ` +// new$.type|publicPlural$ returns a $.type|publicPlural$ +func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client) *$.type|privatePlural$ { + return &$.type|privatePlural${ + client: c.RESTClient(), + } +} +` +var listTemplate = ` +// List takes label and field selectors, and returns the list of $.resultType|publicPlural$ that match those selectors. +func (c *$.type|privatePlural$) List(opts $.ListOptions|raw$) (result *$.resultType|raw$List, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil{ + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &$.resultType|raw$List{} + err = c.client.Get(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + VersionedParams(&opts, $.schemeParameterCodec|raw$). + Timeout(timeout). + Do(). + Into(result) + return +} +` + +var listSubresourceTemplate = ` +// List takes $.type|raw$ name, label and field selectors, and returns the list of $.resultType|publicPlural$ that match those selectors. +func (c *$.type|privatePlural$) List($.type|private$Name string, opts $.ListOptions|raw$) (result *$.resultType|raw$List, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil{ + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &$.resultType|raw$List{} + err = c.client.Get(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + Name($.type|private$Name). + SubResource("$.subresourcePath$"). + VersionedParams(&opts, $.schemeParameterCodec|raw$). + Timeout(timeout). + Do(). + Into(result) + return +} +` + +var getTemplate = ` +// Get takes name of the $.type|private$, and returns the corresponding $.resultType|private$ object, and an error if there is any. +func (c *$.type|privatePlural$) Get(name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { + result = &$.resultType|raw${} + err = c.client.Get(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + Name(name). + VersionedParams(&options, $.schemeParameterCodec|raw$). + Do(). + Into(result) + return +} +` + +var getSubresourceTemplate = ` +// Get takes name of the $.type|private$, and returns the corresponding $.resultType|raw$ object, and an error if there is any. +func (c *$.type|privatePlural$) Get($.type|private$Name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { + result = &$.resultType|raw${} + err = c.client.Get(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + Name($.type|private$Name). + SubResource("$.subresourcePath$"). + VersionedParams(&options, $.schemeParameterCodec|raw$). + Do(). + Into(result) + return +} +` + +var deleteTemplate = ` +// Delete takes name of the $.type|private$ and deletes it. Returns an error if one occurs. +func (c *$.type|privatePlural$) Delete(name string, options *$.DeleteOptions|raw$) error { + return c.client.Delete(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + Name(name). + Body(options). + Do(). + Error() +} +` + +var deleteCollectionTemplate = ` +// DeleteCollection deletes a collection of objects. +func (c *$.type|privatePlural$) DeleteCollection(options *$.DeleteOptions|raw$, listOptions $.ListOptions|raw$) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil{ + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + VersionedParams(&listOptions, $.schemeParameterCodec|raw$). + Timeout(timeout). + Body(options). + Do(). + Error() +} +` + +var createSubresourceTemplate = ` +// Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. +func (c *$.type|privatePlural$) Create($.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { + result = &$.resultType|raw${} + err = c.client.Post(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + Name($.type|private$Name). + SubResource("$.subresourcePath$"). + Body($.inputType|private$). + Do(). + Into(result) + return +} +` + +var createTemplate = ` +// Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. +func (c *$.type|privatePlural$) Create($.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { + result = &$.resultType|raw${} + err = c.client.Post(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + Body($.inputType|private$). + Do(). + Into(result) + return +} +` + +var updateSubresourceTemplate = ` +// Update takes the top resource name and the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. +func (c *$.type|privatePlural$) Update($.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { + result = &$.resultType|raw${} + err = c.client.Put(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + Name($.type|private$Name). + SubResource("$.subresourcePath$"). + Body($.inputType|private$). + Do(). + Into(result) + return +} +` + +var updateTemplate = ` +// Update takes the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. +func (c *$.type|privatePlural$) Update($.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) { + result = &$.resultType|raw${} + err = c.client.Put(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + Name($.inputType|private$.Name). + Body($.inputType|private$). + Do(). + Into(result) + return +} +` + +var updateStatusTemplate = ` +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *$.type|privatePlural$) UpdateStatus($.type|private$ *$.type|raw$) (result *$.type|raw$, err error) { + result = &$.type|raw${} + err = c.client.Put(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + Name($.type|private$.Name). + SubResource("status"). + Body($.type|private$). + Do(). + Into(result) + return +} +` + +var watchTemplate = ` +// Watch returns a $.watchInterface|raw$ that watches the requested $.type|privatePlural$. +func (c *$.type|privatePlural$) Watch(opts $.ListOptions|raw$) ($.watchInterface|raw$, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil{ + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + VersionedParams(&opts, $.schemeParameterCodec|raw$). + Timeout(timeout). + Watch() +} +` + +var patchTemplate = ` +// Patch applies the patch and returns the patched $.resultType|private$. +func (c *$.type|privatePlural$) Patch(name string, pt $.PatchType|raw$, data []byte, subresources ...string) (result *$.resultType|raw$, err error) { + result = &$.resultType|raw${} + err = c.client.Patch(pt). + $if .namespaced$Namespace(c.ns).$end$ + Resource("$.type|resource$"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} +` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go new file mode 100644 index 0000000000..a698a28b68 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go @@ -0,0 +1,186 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "k8s.io/code-generator/cmd/client-gen/path" + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +// GenScheme produces a package for a clientset with the scheme, codecs and parameter codecs. +type GenScheme struct { + generator.DefaultGen + OutputPackage string + Groups []clientgentypes.GroupVersions + GroupGoNames map[clientgentypes.GroupVersion]string + InputPackages map[clientgentypes.GroupVersion]string + OutputPath string + ImportTracker namer.ImportTracker + PrivateScheme bool + CreateRegistry bool + schemeGenerated bool +} + +func (g *GenScheme) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.OutputPackage, g.ImportTracker), + } +} + +// We only want to call GenerateType() once. +func (g *GenScheme) Filter(c *generator.Context, t *types.Type) bool { + ret := !g.schemeGenerated + g.schemeGenerated = true + return ret +} + +func (g *GenScheme) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.ImportTracker.ImportLines()...) + for _, group := range g.Groups { + for _, version := range group.Versions { + packagePath := g.InputPackages[clientgentypes.GroupVersion{Group: group.Group, Version: version.Version}] + groupAlias := strings.ToLower(g.GroupGoNames[clientgentypes.GroupVersion{Group: group.Group, Version: version.Version}]) + if g.CreateRegistry { + // import the install package for internal clientsets instead of the type package with register.go + if version.Version != "" { + packagePath = filepath.Dir(packagePath) + } + packagePath = filepath.Join(packagePath, "install") + + imports = append(imports, fmt.Sprintf("%s \"%s\"", groupAlias, path.Vendorless(packagePath))) + break + } else { + imports = append(imports, fmt.Sprintf("%s%s \"%s\"", groupAlias, strings.ToLower(version.Version.NonEmpty()), path.Vendorless(packagePath))) + } + } + } + return +} + +func (g *GenScheme) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + + allGroupVersions := clientgentypes.ToGroupVersionInfo(g.Groups, g.GroupGoNames) + allInstallGroups := clientgentypes.ToGroupInstallPackages(g.Groups, g.GroupGoNames) + + m := map[string]interface{}{ + "allGroupVersions": allGroupVersions, + "allInstallGroups": allInstallGroups, + "customRegister": false, + "runtimeNewParameterCodec": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "NewParameterCodec"}), + "runtimeNewScheme": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "NewScheme"}), + "serializerNewCodecFactory": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/serializer", Name: "NewCodecFactory"}), + "runtimeScheme": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "Scheme"}), + "runtimeSchemeBuilder": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "SchemeBuilder"}), + "runtimeUtilMust": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/util/runtime", Name: "Must"}), + "schemaGroupVersion": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersion"}), + "metav1AddToGroupVersion": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "AddToGroupVersion"}), + } + globals := map[string]string{ + "Scheme": "Scheme", + "Codecs": "Codecs", + "ParameterCodec": "ParameterCodec", + "Registry": "Registry", + } + for k, v := range globals { + if g.PrivateScheme { + m[k] = strings.ToLower(v[0:1]) + v[1:] + } else { + m[k] = v + } + } + + sw.Do(globalsTemplate, m) + + if g.OutputPath != "" { + if _, err := os.Stat(filepath.Join(g.OutputPath, strings.ToLower("register_custom.go"))); err == nil { + m["customRegister"] = true + } + } + + if g.CreateRegistry { + sw.Do(registryRegistration, m) + } else { + sw.Do(simpleRegistration, m) + } + + return sw.Error() +} + +var globalsTemplate = ` +var $.Scheme$ = $.runtimeNewScheme|raw$() +var $.Codecs$ = $.serializerNewCodecFactory|raw$($.Scheme$) +var $.ParameterCodec$ = $.runtimeNewParameterCodec|raw$($.Scheme$)` + +var registryRegistration = ` + +func init() { + $.metav1AddToGroupVersion|raw$($.Scheme$, $.schemaGroupVersion|raw${Version: "v1"}) + Install($.Scheme$) +} + +// Install registers the API group and adds types to a scheme +func Install(scheme *$.runtimeScheme|raw$) { + $- range .allInstallGroups$ + $.InstallPackageAlias$.Install(scheme) + $- end$ + $if .customRegister$ + ExtraInstall(scheme) + $end -$ +} +` + +var simpleRegistration = ` +var localSchemeBuilder = $.runtimeSchemeBuilder|raw${ + $- range .allGroupVersions$ + $.PackageAlias$.AddToScheme, + $- end$ + $if .customRegister$ + ExtraAddToScheme, + $end -$ +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + $.metav1AddToGroupVersion|raw$($.Scheme$, $.schemaGroupVersion|raw${Version: "v1"}) + $.runtimeUtilMust|raw$(AddToScheme($.Scheme$)) +} +` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go new file mode 100644 index 0000000000..426b392731 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go @@ -0,0 +1,341 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "errors" + "fmt" + "strings" + + "k8s.io/gengo/types" +) + +var supportedTags = []string{ + "genclient", + "genclient:nonNamespaced", + "genclient:noVerbs", + "genclient:onlyVerbs", + "genclient:skipVerbs", + "genclient:noStatus", + "genclient:readonly", + "genclient:method", +} + +// SupportedVerbs is a list of supported verbs for +onlyVerbs and +skipVerbs. +var SupportedVerbs = []string{ + "create", + "update", + "updateStatus", + "delete", + "deleteCollection", + "get", + "list", + "watch", + "patch", +} + +// ReadonlyVerbs represents a list of read-only verbs. +var ReadonlyVerbs = []string{ + "get", + "list", + "watch", +} + +// genClientPrefix is the default prefix for all genclient tags. +const genClientPrefix = "genclient:" + +// unsupportedExtensionVerbs is a list of verbs we don't support generating +// extension client functions for. +var unsupportedExtensionVerbs = []string{ + "updateStatus", + "deleteCollection", + "watch", + "delete", +} + +// inputTypeSupportedVerbs is a list of verb types that supports overriding the +// input argument type. +var inputTypeSupportedVerbs = []string{ + "create", + "update", +} + +// resultTypeSupportedVerbs is a list of verb types that supports overriding the +// resulting type. +var resultTypeSupportedVerbs = []string{ + "create", + "update", + "get", + "list", + "patch", +} + +// Extensions allows to extend the default set of client verbs +// (CRUD+watch+patch+list+deleteCollection) for a given type with custom defined +// verbs. Custom verbs can have custom input and result types and also allow to +// use a sub-resource in a request instead of top-level resource type. +// +// Example: +// +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale +// +// type ReplicaSet struct { ... } +// +// The 'method=UpdateScale' is the name of the client function. +// The 'verb=update' here means the client function will use 'PUT' action. +// The 'subresource=scale' means we will use SubResource template to generate this client function. +// The 'input' is the input type used for creation (function argument). +// The 'result' (not needed in this case) is the result type returned from the +// client function. +// +type extension struct { + // VerbName is the name of the custom verb (Scale, Instantiate, etc..) + VerbName string + // VerbType is the type of the verb (only verbs from SupportedVerbs are + // supported) + VerbType string + // SubResourcePath defines a path to a sub-resource to use in the request. + // (optional) + SubResourcePath string + // InputTypeOverride overrides the input parameter type for the verb. By + // default the original type is used. Overriding the input type only works for + // "create" and "update" verb types. The given type must exists in the same + // package as the original type. + // (optional) + InputTypeOverride string + // ResultTypeOverride overrides the resulting object type for the verb. By + // default the original type is used. Overriding the result type works. + // (optional) + ResultTypeOverride string +} + +// IsSubresource indicates if this extension should generate the sub-resource. +func (e *extension) IsSubresource() bool { + return len(e.SubResourcePath) > 0 +} + +// HasVerb checks if the extension matches the given verb. +func (e *extension) HasVerb(verb string) bool { + return e.VerbType == verb +} + +// Input returns the input override package path and the type. +func (e *extension) Input() (string, string) { + parts := strings.Split(e.InputTypeOverride, ".") + return parts[len(parts)-1], strings.Join(parts[0:len(parts)-1], ".") +} + +// Result returns the result override package path and the type. +func (e *extension) Result() (string, string) { + parts := strings.Split(e.ResultTypeOverride, ".") + return parts[len(parts)-1], strings.Join(parts[0:len(parts)-1], ".") +} + +// Tags represents a genclient configuration for a single type. +type Tags struct { + // +genclient + GenerateClient bool + // +genclient:nonNamespaced + NonNamespaced bool + // +genclient:noStatus + NoStatus bool + // +genclient:noVerbs + NoVerbs bool + // +genclient:skipVerbs=get,update + // +genclient:onlyVerbs=create,delete + SkipVerbs []string + // +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale + Extensions []extension +} + +// HasVerb returns true if we should include the given verb in final client interface and +// generate the function for it. +func (t Tags) HasVerb(verb string) bool { + if len(t.SkipVerbs) == 0 { + return true + } + for _, s := range t.SkipVerbs { + if verb == s { + return false + } + } + return true +} + +// MustParseClientGenTags calls ParseClientGenTags but instead of returning error it panics. +func MustParseClientGenTags(lines []string) Tags { + tags, err := ParseClientGenTags(lines) + if err != nil { + panic(err.Error()) + } + return tags +} + +// ParseClientGenTags parse the provided genclient tags and validates that no unknown +// tags are provided. +func ParseClientGenTags(lines []string) (Tags, error) { + ret := Tags{} + values := types.ExtractCommentTags("+", lines) + var value []string + value, ret.GenerateClient = values["genclient"] + // Check the old format and error when used to avoid generating client when //+genclient=false + if len(value) > 0 && len(value[0]) > 0 { + return ret, fmt.Errorf("+genclient=%s is invalid, use //+genclient if you want to generate client or omit it when you want to disable generation", value) + } + _, ret.NonNamespaced = values[genClientPrefix+"nonNamespaced"] + // Check the old format and error when used + if value := values["nonNamespaced"]; len(value) > 0 && len(value[0]) > 0 { + return ret, fmt.Errorf("+nonNamespaced=%s is invalid, use //+genclient:nonNamespaced instead", value[0]) + } + _, ret.NoVerbs = values[genClientPrefix+"noVerbs"] + _, ret.NoStatus = values[genClientPrefix+"noStatus"] + onlyVerbs := []string{} + if _, isReadonly := values[genClientPrefix+"readonly"]; isReadonly { + onlyVerbs = ReadonlyVerbs + } + // Check the old format and error when used + if value := values["readonly"]; len(value) > 0 && len(value[0]) > 0 { + return ret, fmt.Errorf("+readonly=%s is invalid, use //+genclient:readonly instead", value[0]) + } + if v, exists := values[genClientPrefix+"skipVerbs"]; exists { + ret.SkipVerbs = strings.Split(v[0], ",") + } + if v, exists := values[genClientPrefix+"onlyVerbs"]; exists || len(onlyVerbs) > 0 { + if len(v) > 0 { + onlyVerbs = append(onlyVerbs, strings.Split(v[0], ",")...) + } + skipVerbs := []string{} + for _, m := range SupportedVerbs { + skip := true + for _, o := range onlyVerbs { + if o == m { + skip = false + break + } + } + // Check for conflicts + for _, v := range skipVerbs { + if v == m { + return ret, fmt.Errorf("verb %q used both in genclient:skipVerbs and genclient:onlyVerbs", v) + } + } + if skip { + skipVerbs = append(skipVerbs, m) + } + } + ret.SkipVerbs = skipVerbs + } + var err error + if ret.Extensions, err = parseClientExtensions(values); err != nil { + return ret, err + } + return ret, validateClientGenTags(values) +} + +func parseClientExtensions(tags map[string][]string) ([]extension, error) { + var ret []extension + for name, values := range tags { + if !strings.HasPrefix(name, genClientPrefix+"method") { + continue + } + for _, value := range values { + // the value comes in this form: "Foo,verb=create" + ext := extension{} + parts := strings.Split(value, ",") + if len(parts) == 0 { + return nil, fmt.Errorf("invalid of empty extension verb name: %q", value) + } + // The first part represents the name of the extension + ext.VerbName = parts[0] + if len(ext.VerbName) == 0 { + return nil, fmt.Errorf("must specify a verb name (// +genclient:method=Foo,verb=create)") + } + // Parse rest of the arguments + params := parts[1:] + for _, p := range params { + parts := strings.Split(p, "=") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid extension tag specification %q", p) + } + key, val := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) + if len(val) == 0 { + return nil, fmt.Errorf("empty value of %q for %q extension", key, ext.VerbName) + } + switch key { + case "verb": + ext.VerbType = val + case "subresource": + ext.SubResourcePath = val + case "input": + ext.InputTypeOverride = val + case "result": + ext.ResultTypeOverride = val + default: + return nil, fmt.Errorf("unknown extension configuration key %q", key) + } + } + // Validate resulting extension configuration + if len(ext.VerbType) == 0 { + return nil, fmt.Errorf("verb type must be specified (use '// +genclient:method=%s,verb=create')", ext.VerbName) + } + if len(ext.ResultTypeOverride) > 0 { + supported := false + for _, v := range resultTypeSupportedVerbs { + if ext.VerbType == v { + supported = true + break + } + } + if !supported { + return nil, fmt.Errorf("%s: result type is not supported for %q verbs (supported verbs: %#v)", ext.VerbName, ext.VerbType, resultTypeSupportedVerbs) + } + } + if len(ext.InputTypeOverride) > 0 { + supported := false + for _, v := range inputTypeSupportedVerbs { + if ext.VerbType == v { + supported = true + break + } + } + if !supported { + return nil, fmt.Errorf("%s: input type is not supported for %q verbs (supported verbs: %#v)", ext.VerbName, ext.VerbType, inputTypeSupportedVerbs) + } + } + for _, t := range unsupportedExtensionVerbs { + if ext.VerbType == t { + return nil, fmt.Errorf("verb %q is not supported by extension generator", ext.VerbType) + } + } + ret = append(ret, ext) + } + } + return ret, nil +} + +// validateTags validates that only supported genclient tags were provided. +func validateClientGenTags(values map[string][]string) error { + for _, k := range supportedTags { + delete(values, k) + } + for key := range values { + if strings.HasPrefix(key, strings.TrimSuffix(genClientPrefix, ":")) { + return errors.New("unknown tag detected: " + key) + } + } + return nil +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/main.go b/vendor/k8s.io/code-generator/cmd/client-gen/main.go new file mode 100644 index 0000000000..6e0d187f5c --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/main.go @@ -0,0 +1,66 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// client-gen makes the individual typed clients using gengo. +package main + +import ( + "flag" + "path/filepath" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" + "k8s.io/klog" + + generatorargs "k8s.io/code-generator/cmd/client-gen/args" + "k8s.io/code-generator/cmd/client-gen/generators" + "k8s.io/code-generator/pkg/util" +) + +func main() { + klog.InitFlags(nil) + genericArgs, customArgs := generatorargs.NewDefaults() + + // Override defaults. + // TODO: move this out of client-gen + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/clientset_generated/" + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine, "k8s.io/kubernetes/pkg/apis") // TODO: move this input path out of client-gen + flag.Set("logtostderr", "true") + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + // add group version package as input dirs for gengo + for _, pkg := range customArgs.Groups { + for _, v := range pkg.Versions { + genericArgs.InputDirs = append(genericArgs.InputDirs, v.Package) + } + } + + if err := generatorargs.Validate(genericArgs); err != nil { + klog.Fatalf("Error: %v", err) + } + + if err := genericArgs.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + klog.Fatalf("Error: %v", err) + } +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/path/path.go b/vendor/k8s.io/code-generator/cmd/client-gen/path/path.go new file mode 100644 index 0000000000..19b269bdf2 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/path/path.go @@ -0,0 +1,31 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path + +import "strings" + +// Vendorless removes the longest match of "*/vendor/" from the front of p. +// It is useful if a package locates in vendor/, e.g., +// k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1, because gengo +// indexes the package with its import path, e.g., +// k8s.io/apimachinery/pkg/apis/meta/v1, +func Vendorless(p string) string { + if pos := strings.LastIndex(p, "/vendor/"); pos != -1 { + return p[pos+len("/vendor/"):] + } + return p +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go b/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go new file mode 100644 index 0000000000..59f2fd4449 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go @@ -0,0 +1,121 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "fmt" + "regexp" + "sort" + "strings" + + "k8s.io/gengo/namer" +) + +// ToGroupVersion turns "group/version" string into a GroupVersion struct. It reports error +// if it cannot parse the string. +func ToGroupVersion(gv string) (GroupVersion, error) { + // this can be the internal version for the legacy kube types + // TODO once we've cleared the last uses as strings, this special case should be removed. + if (len(gv) == 0) || (gv == "/") { + return GroupVersion{}, nil + } + + switch strings.Count(gv, "/") { + case 0: + return GroupVersion{Group(gv), ""}, nil + case 1: + i := strings.Index(gv, "/") + return GroupVersion{Group(gv[:i]), Version(gv[i+1:])}, nil + default: + return GroupVersion{}, fmt.Errorf("unexpected GroupVersion string: %v", gv) + } +} + +type sortableSliceOfVersions []string + +func (a sortableSliceOfVersions) Len() int { return len(a) } +func (a sortableSliceOfVersions) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a sortableSliceOfVersions) Less(i, j int) bool { + vi, vj := strings.TrimLeft(a[i], "v"), strings.TrimLeft(a[j], "v") + major := regexp.MustCompile("^[0-9]+") + viMajor, vjMajor := major.FindString(vi), major.FindString(vj) + viRemaining, vjRemaining := strings.TrimLeft(vi, viMajor), strings.TrimLeft(vj, vjMajor) + switch { + case len(viRemaining) == 0 && len(vjRemaining) == 0: + return viMajor < vjMajor + case len(viRemaining) == 0 && len(vjRemaining) != 0: + // stable version is greater than unstable version + return false + case len(viRemaining) != 0 && len(vjRemaining) == 0: + // stable version is greater than unstable version + return true + } + // neither are stable versions + if viMajor != vjMajor { + return viMajor < vjMajor + } + // assuming at most we have one alpha or one beta version, so if vi contains "alpha", it's the lesser one. + return strings.Contains(viRemaining, "alpha") +} + +// Determine the default version among versions. If a user calls a group client +// without specifying the version (e.g., c.CoreV1(), instead of c.CoreV1()), the +// default version will be returned. +func defaultVersion(versions []PackageVersion) Version { + var versionStrings []string + for _, version := range versions { + versionStrings = append(versionStrings, version.Version.String()) + } + sort.Sort(sortableSliceOfVersions(versionStrings)) + return Version(versionStrings[len(versionStrings)-1]) +} + +// ToGroupVersionInfo is a helper function used by generators for groups. +func ToGroupVersionInfo(groups []GroupVersions, groupGoNames map[GroupVersion]string) []GroupVersionInfo { + var groupVersionPackages []GroupVersionInfo + for _, group := range groups { + for _, version := range group.Versions { + groupGoName := groupGoNames[GroupVersion{Group: group.Group, Version: version.Version}] + groupVersionPackages = append(groupVersionPackages, GroupVersionInfo{ + Group: Group(namer.IC(group.Group.NonEmpty())), + Version: Version(namer.IC(version.Version.String())), + PackageAlias: strings.ToLower(groupGoName + version.Version.NonEmpty()), + GroupGoName: groupGoName, + LowerCaseGroupGoName: namer.IL(groupGoName), + }) + } + } + return groupVersionPackages +} + +func ToGroupInstallPackages(groups []GroupVersions, groupGoNames map[GroupVersion]string) []GroupInstallPackage { + var groupInstallPackages []GroupInstallPackage + for _, group := range groups { + defaultVersion := defaultVersion(group.Versions) + groupGoName := groupGoNames[GroupVersion{Group: group.Group, Version: defaultVersion}] + groupInstallPackages = append(groupInstallPackages, GroupInstallPackage{ + Group: Group(namer.IC(group.Group.NonEmpty())), + InstallPackageAlias: strings.ToLower(groupGoName), + }) + } + return groupInstallPackages +} + +// NormalizeGroupVersion calls normalizes the GroupVersion. +//func NormalizeGroupVersion(gv GroupVersion) GroupVersion { +// return GroupVersion{Group: gv.Group.NonEmpty(), Version: gv.Version, NonEmptyVersion: normalization.Version(gv.Version)} +//} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go b/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go new file mode 100644 index 0000000000..7d1606c508 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go @@ -0,0 +1,75 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +type Version string + +func (v Version) String() string { + return string(v) +} + +func (v Version) NonEmpty() string { + if v == "" { + return "internalVersion" + } + return v.String() +} + +type Group string + +func (g Group) String() string { + return string(g) +} + +func (g Group) NonEmpty() string { + if g == "api" { + return "core" + } + return string(g) +} + +type PackageVersion struct { + Version + // The fully qualified package, e.g. k8s.io/kubernetes/pkg/apis/apps, where the types.go is found. + Package string +} + +type GroupVersion struct { + Group Group + Version Version +} + +type GroupVersions struct { + // The name of the package for this group, e.g. apps. + PackageName string + Group Group + Versions []PackageVersion +} + +// GroupVersionInfo contains all the info around a group version. +type GroupVersionInfo struct { + Group Group + Version Version + PackageAlias string + GroupGoName string + LowerCaseGroupGoName string +} + +type GroupInstallPackage struct { + Group Group + InstallPackageAlias string +} diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/conversion-gen/args/args.go new file mode 100644 index 0000000000..07ce6e72bf --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/args/args.go @@ -0,0 +1,83 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" +) + +// DefaultBasePeerDirs are the peer-dirs nearly everybody will use, i.e. those coming from +// apimachinery. +var DefaultBasePeerDirs = []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/conversion", + "k8s.io/apimachinery/pkg/runtime", +} + +// CustomArgs is used by the gengo framework to pass args specific to this generator. +type CustomArgs struct { + // Base peer dirs which nearly everybody will use, i.e. outside of Kubernetes core. Peer dirs + // are declared to make the generator pick up manually written conversion funcs from external + // packages. + BasePeerDirs []string + + // Custom peer dirs which are application specific. Peer dirs are declared to make the + // generator pick up manually written conversion funcs from external packages. + ExtraPeerDirs []string + + // SkipUnsafe indicates whether to generate unsafe conversions to improve the efficiency + // of these operations. The unsafe operation is a direct pointer assignment via unsafe + // (within the allowed uses of unsafe) and is equivalent to a proposed Golang change to + // allow structs that are identical to be assigned to each other. + SkipUnsafe bool +} + +// NewDefaults returns default arguments for the generator. +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{ + BasePeerDirs: DefaultBasePeerDirs, + SkipUnsafe: false, + } + genericArgs.CustomArgs = customArgs + genericArgs.OutputFileBaseName = "conversion_generated" + return genericArgs, customArgs +} + +// AddFlags add the generator flags to the flag set. +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { + pflag.CommandLine.StringSliceVar(&ca.BasePeerDirs, "base-peer-dirs", ca.BasePeerDirs, + "Comma-separated list of apimachinery import paths which are considered, after tag-specified peers, for conversions. Only change these if you have very good reasons.") + pflag.CommandLine.StringSliceVar(&ca.ExtraPeerDirs, "extra-peer-dirs", ca.ExtraPeerDirs, + "Application specific comma-separated list of import paths which are considered, after tag-specified peers and base-peer-dirs, for conversions.") + pflag.CommandLine.BoolVar(&ca.SkipUnsafe, "skip-unsafe", ca.SkipUnsafe, + "If true, will not generate code using unsafe pointer conversions; resulting code may be slower.") +} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + _ = genericArgs.CustomArgs.(*CustomArgs) + + if len(genericArgs.OutputFileBaseName) == 0 { + return fmt.Errorf("output file base name cannot be empty") + } + + return nil +} diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go b/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go new file mode 100644 index 0000000000..832b1ceece --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go @@ -0,0 +1,1195 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "reflect" + "sort" + "strings" + + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/klog" + + conversionargs "k8s.io/code-generator/cmd/conversion-gen/args" +) + +// These are the comment tags that carry parameters for conversion generation. +const ( + // e.g., "+k8s:conversion-gen=" in doc.go, where is the + // import path of the package the peer types are defined in. + // e.g., "+k8s:conversion-gen=false" in a type's comment will let + // conversion-gen skip that type. + tagName = "k8s:conversion-gen" + // e.g. "+k8s:conversion-gen:explicit-from=net/url.Values" in the type comment + // will result in generating conversion from net/url.Values. + explicitFromTagName = "k8s:conversion-gen:explicit-from" + // e.g., "+k8s:conversion-gen-external-types=" in doc.go, where + // is the relative path to the package the types are defined in. + externalTypesTagName = "k8s:conversion-gen-external-types" +) + +func extractTag(comments []string) []string { + return types.ExtractCommentTags("+", comments)[tagName] +} + +func extractExplicitFromTag(comments []string) []string { + return types.ExtractCommentTags("+", comments)[explicitFromTagName] +} + +func extractExternalTypesTag(comments []string) []string { + return types.ExtractCommentTags("+", comments)[externalTypesTagName] +} + +func isCopyOnly(comments []string) bool { + values := types.ExtractCommentTags("+", comments)["k8s:conversion-fn"] + return len(values) == 1 && values[0] == "copy-only" +} + +func isDrop(comments []string) bool { + values := types.ExtractCommentTags("+", comments)["k8s:conversion-fn"] + return len(values) == 1 && values[0] == "drop" +} + +// TODO: This is created only to reduce number of changes in a single PR. +// Remove it and use PublicNamer instead. +func conversionNamer() *namer.NameStrategy { + return &namer.NameStrategy{ + Join: func(pre string, in []string, post string) string { + return strings.Join(in, "_") + }, + PrependPackageNames: 1, + } +} + +func defaultFnNamer() *namer.NameStrategy { + return &namer.NameStrategy{ + Prefix: "SetDefaults_", + Join: func(pre string, in []string, post string) string { + return pre + strings.Join(in, "_") + post + }, + } +} + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + return namer.NameSystems{ + "public": conversionNamer(), + "raw": namer.NewRawNamer("", nil), + "defaultfn": defaultFnNamer(), + } +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "public" +} + +func getPeerTypeFor(context *generator.Context, t *types.Type, potenialPeerPkgs []string) *types.Type { + for _, ppp := range potenialPeerPkgs { + p := context.Universe.Package(ppp) + if p == nil { + continue + } + if p.Has(t.Name.Name) { + return p.Type(t.Name.Name) + } + } + return nil +} + +type conversionPair struct { + inType *types.Type + outType *types.Type +} + +// All of the types in conversions map are of type "DeclarationOf" with +// the underlying type being "Func". +type conversionFuncMap map[conversionPair]*types.Type + +// Returns all manually-defined conversion functions in the package. +func getManualConversionFunctions(context *generator.Context, pkg *types.Package, manualMap conversionFuncMap) { + if pkg == nil { + klog.Warningf("Skipping nil package passed to getManualConversionFunctions") + return + } + klog.V(5).Infof("Scanning for conversion functions in %v", pkg.Name) + + scopeName := types.Ref(conversionPackagePath, "Scope").Name + errorName := types.Ref("", "error").Name + buffer := &bytes.Buffer{} + sw := generator.NewSnippetWriter(buffer, context, "$", "$") + + for _, f := range pkg.Functions { + if f.Underlying == nil || f.Underlying.Kind != types.Func { + klog.Errorf("Malformed function: %#v", f) + continue + } + if f.Underlying.Signature == nil { + klog.Errorf("Function without signature: %#v", f) + continue + } + klog.V(8).Infof("Considering function %s", f.Name) + signature := f.Underlying.Signature + // Check whether the function is conversion function. + // Note that all of them have signature: + // func Convert_inType_To_outType(inType, outType, conversion.Scope) error + if signature.Receiver != nil { + klog.V(8).Infof("%s has a receiver", f.Name) + continue + } + if len(signature.Parameters) != 3 || signature.Parameters[2].Name != scopeName { + klog.V(8).Infof("%s has wrong parameters", f.Name) + continue + } + if len(signature.Results) != 1 || signature.Results[0].Name != errorName { + klog.V(8).Infof("%s has wrong results", f.Name) + continue + } + inType := signature.Parameters[0] + outType := signature.Parameters[1] + if inType.Kind != types.Pointer || outType.Kind != types.Pointer { + klog.V(8).Infof("%s has wrong parameter types", f.Name) + continue + } + // Now check if the name satisfies the convention. + // TODO: This should call the Namer directly. + args := argsFromType(inType.Elem, outType.Elem) + sw.Do("Convert_$.inType|public$_To_$.outType|public$", args) + if f.Name.Name == buffer.String() { + klog.V(4).Infof("Found conversion function %s", f.Name) + key := conversionPair{inType.Elem, outType.Elem} + // We might scan the same package twice, and that's OK. + if v, ok := manualMap[key]; ok && v != nil && v.Name.Package != pkg.Path { + panic(fmt.Sprintf("duplicate static conversion defined: %s -> %s from:\n%s.%s\n%s.%s", key.inType, key.outType, v.Name.Package, v.Name.Name, f.Name.Package, f.Name.Name)) + } + manualMap[key] = f + } else { + // prevent user error when they don't get the correct conversion signature + if strings.HasPrefix(f.Name.Name, "Convert_") { + klog.Errorf("Rename function %s %s -> %s to match expected conversion signature", f.Name.Package, f.Name.Name, buffer.String()) + } + klog.V(8).Infof("%s has wrong name", f.Name) + } + buffer.Reset() + } +} + +func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + + packages := generator.Packages{} + header := append([]byte(fmt.Sprintf("// +build !%s\n\n", arguments.GeneratedBuildTag)), boilerplate...) + + // Accumulate pre-existing conversion functions. + // TODO: This is too ad-hoc. We need a better way. + manualConversions := conversionFuncMap{} + + // Record types that are memory equivalent. A type is memory equivalent + // if it has the same memory layout and no nested manual conversion is + // defined. + // TODO: in the future, relax the nested manual conversion requirement + // if we can show that a large enough types are memory identical but + // have non-trivial conversion + memoryEquivalentTypes := equalMemoryTypes{} + + // We are generating conversions only for packages that are explicitly + // passed as InputDir. + processed := map[string]bool{} + for _, i := range context.Inputs { + // skip duplicates + if processed[i] { + continue + } + processed[i] = true + + klog.V(5).Infof("considering pkg %q", i) + pkg := context.Universe[i] + // typesPkg is where the versioned types are defined. Sometimes it is + // different from pkg. For example, kubernetes core/v1 types are defined + // in vendor/k8s.io/api/core/v1, while pkg is at pkg/api/v1. + typesPkg := pkg + if pkg == nil { + // If the input had no Go files, for example. + continue + } + + // Add conversion and defaulting functions. + getManualConversionFunctions(context, pkg, manualConversions) + + // Only generate conversions for packages which explicitly request it + // by specifying one or more "+k8s:conversion-gen=" + // in their doc.go file. + peerPkgs := extractTag(pkg.Comments) + if peerPkgs != nil { + klog.V(5).Infof(" tags: %q", peerPkgs) + if len(peerPkgs) == 1 && peerPkgs[0] == "false" { + // If a single +k8s:conversion-gen=false tag is defined, we still want + // the generator to fire for this package for explicit conversions, but + // we are clearing the peerPkgs to not generate any standard conversions. + peerPkgs = nil + } + } else { + klog.V(5).Infof(" no tag") + continue + } + skipUnsafe := false + if customArgs, ok := arguments.CustomArgs.(*conversionargs.CustomArgs); ok { + if len(peerPkgs) > 0 { + peerPkgs = append(peerPkgs, customArgs.BasePeerDirs...) + peerPkgs = append(peerPkgs, customArgs.ExtraPeerDirs...) + } + skipUnsafe = customArgs.SkipUnsafe + } + + // if the external types are not in the same package where the conversion functions to be generated + externalTypesValues := extractExternalTypesTag(pkg.Comments) + if externalTypesValues != nil { + if len(externalTypesValues) != 1 { + klog.Fatalf(" expect only one value for %q tag, got: %q", externalTypesTagName, externalTypesValues) + } + externalTypes := externalTypesValues[0] + klog.V(5).Infof(" external types tags: %q", externalTypes) + var err error + typesPkg, err = context.AddDirectory(externalTypes) + if err != nil { + klog.Fatalf("cannot import package %s", externalTypes) + } + // update context.Order to the latest context.Universe + orderer := namer.Orderer{Namer: namer.NewPublicNamer(1)} + context.Order = orderer.OrderUniverse(context.Universe) + } + + // if the source path is within a /vendor/ directory (for example, + // k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1), allow + // generation to output to the proper relative path (under vendor). + // Otherwise, the generator will create the file in the wrong location + // in the output directory. + // TODO: build a more fundamental concept in gengo for dealing with modifications + // to vendored packages. + vendorless := func(pkg string) string { + if pos := strings.LastIndex(pkg, "/vendor/"); pos != -1 { + return pkg[pos+len("/vendor/"):] + } + return pkg + } + for i := range peerPkgs { + peerPkgs[i] = vendorless(peerPkgs[i]) + } + + // Make sure our peer-packages are added and fully parsed. + for _, pp := range peerPkgs { + context.AddDir(pp) + p := context.Universe[pp] + if nil == p { + klog.Fatalf("failed to find pkg: %s", pp) + } + getManualConversionFunctions(context, p, manualConversions) + } + + unsafeEquality := TypesEqual(memoryEquivalentTypes) + if skipUnsafe { + unsafeEquality = noEquality{} + } + + path := pkg.Path + // if the source path is within a /vendor/ directory (for example, + // k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1), allow + // generation to output to the proper relative path (under vendor). + // Otherwise, the generator will create the file in the wrong location + // in the output directory. + // TODO: build a more fundamental concept in gengo for dealing with modifications + // to vendored packages. + if strings.HasPrefix(pkg.SourcePath, arguments.OutputBase) { + expandedPath := strings.TrimPrefix(pkg.SourcePath, arguments.OutputBase) + if strings.Contains(expandedPath, "/vendor/") { + path = expandedPath + } + } + packages = append(packages, + &generator.DefaultPackage{ + PackageName: filepath.Base(pkg.Path), + PackagePath: path, + HeaderText: header, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + return []generator.Generator{ + NewGenConversion(arguments.OutputFileBaseName, typesPkg.Path, pkg.Path, manualConversions, peerPkgs, unsafeEquality), + } + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + return t.Name.Package == typesPkg.Path + }, + }) + } + + // If there is a manual conversion defined between two types, exclude it + // from being a candidate for unsafe conversion + for k, v := range manualConversions { + if isCopyOnly(v.CommentLines) { + klog.V(5).Infof("Conversion function %s will not block memory copy because it is copy-only", v.Name) + continue + } + // this type should be excluded from all equivalence, because the converter must be called. + memoryEquivalentTypes.Skip(k.inType, k.outType) + } + + return packages +} + +type equalMemoryTypes map[conversionPair]bool + +func (e equalMemoryTypes) Skip(a, b *types.Type) { + e[conversionPair{a, b}] = false + e[conversionPair{b, a}] = false +} + +func (e equalMemoryTypes) Equal(a, b *types.Type) bool { + // alreadyVisitedTypes holds all the types that have already been checked in the structural type recursion. + alreadyVisitedTypes := make(map[*types.Type]bool) + return e.cachingEqual(a, b, alreadyVisitedTypes) +} + +func (e equalMemoryTypes) cachingEqual(a, b *types.Type, alreadyVisitedTypes map[*types.Type]bool) bool { + if a == b { + return true + } + if equal, ok := e[conversionPair{a, b}]; ok { + return equal + } + if equal, ok := e[conversionPair{b, a}]; ok { + return equal + } + result := e.equal(a, b, alreadyVisitedTypes) + e[conversionPair{a, b}] = result + e[conversionPair{b, a}] = result + return result +} + +func (e equalMemoryTypes) equal(a, b *types.Type, alreadyVisitedTypes map[*types.Type]bool) bool { + in, out := unwrapAlias(a), unwrapAlias(b) + switch { + case in == out: + return true + case in.Kind == out.Kind: + // if the type exists already, return early to avoid recursion + if alreadyVisitedTypes[in] { + return true + } + alreadyVisitedTypes[in] = true + + switch in.Kind { + case types.Struct: + if len(in.Members) != len(out.Members) { + return false + } + for i, inMember := range in.Members { + outMember := out.Members[i] + if !e.cachingEqual(inMember.Type, outMember.Type, alreadyVisitedTypes) { + return false + } + } + return true + case types.Pointer: + return e.cachingEqual(in.Elem, out.Elem, alreadyVisitedTypes) + case types.Map: + return e.cachingEqual(in.Key, out.Key, alreadyVisitedTypes) && e.cachingEqual(in.Elem, out.Elem, alreadyVisitedTypes) + case types.Slice: + return e.cachingEqual(in.Elem, out.Elem, alreadyVisitedTypes) + case types.Interface: + // TODO: determine whether the interfaces are actually equivalent - for now, they must have the + // same type. + return false + case types.Builtin: + return in.Name.Name == out.Name.Name + } + } + return false +} + +func findMember(t *types.Type, name string) (types.Member, bool) { + if t.Kind != types.Struct { + return types.Member{}, false + } + for _, member := range t.Members { + if member.Name == name { + return member, true + } + } + return types.Member{}, false +} + +// unwrapAlias recurses down aliased types to find the bedrock type. +func unwrapAlias(in *types.Type) *types.Type { + for in.Kind == types.Alias { + in = in.Underlying + } + return in +} + +const ( + runtimePackagePath = "k8s.io/apimachinery/pkg/runtime" + conversionPackagePath = "k8s.io/apimachinery/pkg/conversion" +) + +type noEquality struct{} + +func (noEquality) Equal(_, _ *types.Type) bool { return false } + +type TypesEqual interface { + Equal(a, b *types.Type) bool +} + +// genConversion produces a file with a autogenerated conversions. +type genConversion struct { + generator.DefaultGen + // the package that contains the types that conversion func are going to be + // generated for + typesPackage string + // the package that the conversion funcs are going to be output to + outputPackage string + // packages that contain the peer of types in typesPacakge + peerPackages []string + manualConversions conversionFuncMap + imports namer.ImportTracker + types []*types.Type + explicitConversions []conversionPair + skippedFields map[*types.Type][]string + useUnsafe TypesEqual +} + +func NewGenConversion(sanitizedName, typesPackage, outputPackage string, manualConversions conversionFuncMap, peerPkgs []string, useUnsafe TypesEqual) generator.Generator { + return &genConversion{ + DefaultGen: generator.DefaultGen{ + OptionalName: sanitizedName, + }, + typesPackage: typesPackage, + outputPackage: outputPackage, + peerPackages: peerPkgs, + manualConversions: manualConversions, + imports: generator.NewImportTracker(), + types: []*types.Type{}, + explicitConversions: []conversionPair{}, + skippedFields: map[*types.Type][]string{}, + useUnsafe: useUnsafe, + } +} + +func (g *genConversion) Namers(c *generator.Context) namer.NameSystems { + // Have the raw namer for this file track what it imports. + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + "publicIT": &namerPlusImportTracking{ + delegate: conversionNamer(), + tracker: g.imports, + }, + } +} + +type namerPlusImportTracking struct { + delegate namer.Namer + tracker namer.ImportTracker +} + +func (n *namerPlusImportTracking) Name(t *types.Type) string { + n.tracker.AddType(t) + return n.delegate.Name(t) +} + +func (g *genConversion) convertibleOnlyWithinPackage(inType, outType *types.Type) bool { + var t *types.Type + var other *types.Type + if inType.Name.Package == g.typesPackage { + t, other = inType, outType + } else { + t, other = outType, inType + } + + if t.Name.Package != g.typesPackage { + return false + } + // If the type has opted out, skip it. + tagvals := extractTag(t.CommentLines) + if tagvals != nil { + if tagvals[0] != "false" { + klog.Fatalf("Type %v: unsupported %s value: %q", t, tagName, tagvals[0]) + } + klog.V(5).Infof("type %v requests no conversion generation, skipping", t) + return false + } + // TODO: Consider generating functions for other kinds too. + if t.Kind != types.Struct { + return false + } + // Also, filter out private types. + if namer.IsPrivateGoName(other.Name.Name) { + return false + } + return true +} + +func getExplicitFromTypes(t *types.Type) []types.Name { + comments := append(t.SecondClosestCommentLines, t.CommentLines...) + paths := extractExplicitFromTag(comments) + result := []types.Name{} + for _, path := range paths { + items := strings.Split(path, ".") + if len(items) != 2 { + klog.Errorf("Unexpected k8s:conversion-gen:explicit-from tag: %s", path) + continue + } + switch { + case items[0] == "net/url" && items[1] == "Values": + default: + klog.Fatalf("Not supported k8s:conversion-gen:explicit-from tag: %s", path) + } + result = append(result, types.Name{Package: items[0], Name: items[1]}) + } + return result +} + +func (g *genConversion) Filter(c *generator.Context, t *types.Type) bool { + convertibleWithPeer := func() bool { + peerType := getPeerTypeFor(c, t, g.peerPackages) + if peerType == nil { + return false + } + if !g.convertibleOnlyWithinPackage(t, peerType) { + return false + } + g.types = append(g.types, t) + return true + }() + + explicitlyConvertible := func() bool { + inTypes := getExplicitFromTypes(t) + if len(inTypes) == 0 { + return false + } + for i := range inTypes { + pair := conversionPair{ + inType: &types.Type{Name: inTypes[i]}, + outType: t, + } + g.explicitConversions = append(g.explicitConversions, pair) + } + return true + }() + + return convertibleWithPeer || explicitlyConvertible +} + +func (g *genConversion) isOtherPackage(pkg string) bool { + if pkg == g.outputPackage { + return false + } + if strings.HasSuffix(pkg, `"`+g.outputPackage+`"`) { + return false + } + return true +} + +func (g *genConversion) Imports(c *generator.Context) (imports []string) { + var importLines []string + for _, singleImport := range g.imports.ImportLines() { + if g.isOtherPackage(singleImport) { + importLines = append(importLines, singleImport) + } + } + return importLines +} + +func argsFromType(inType, outType *types.Type) generator.Args { + return generator.Args{ + "inType": inType, + "outType": outType, + } +} + +const nameTmpl = "Convert_$.inType|publicIT$_To_$.outType|publicIT$" + +func (g *genConversion) preexists(inType, outType *types.Type) (*types.Type, bool) { + function, ok := g.manualConversions[conversionPair{inType, outType}] + return function, ok +} + +func (g *genConversion) Init(c *generator.Context, w io.Writer) error { + if klog.V(5) { + if m, ok := g.useUnsafe.(equalMemoryTypes); ok { + var result []string + klog.Infof("All objects without identical memory layout:") + for k, v := range m { + if v { + continue + } + result = append(result, fmt.Sprintf(" %s -> %s = %t", k.inType, k.outType, v)) + } + sort.Strings(result) + for _, s := range result { + klog.Infof(s) + } + } + } + sw := generator.NewSnippetWriter(w, c, "$", "$") + sw.Do("func init() {\n", nil) + sw.Do("localSchemeBuilder.Register(RegisterConversions)\n", nil) + sw.Do("}\n", nil) + + scheme := c.Universe.Type(types.Name{Package: runtimePackagePath, Name: "Scheme"}) + schemePtr := &types.Type{ + Kind: types.Pointer, + Elem: scheme, + } + sw.Do("// RegisterConversions adds conversion functions to the given scheme.\n", nil) + sw.Do("// Public to allow building arbitrary schemes.\n", nil) + sw.Do("func RegisterConversions(s $.|raw$) error {\n", schemePtr) + for _, t := range g.types { + peerType := getPeerTypeFor(c, t, g.peerPackages) + if _, found := g.preexists(t, peerType); !found { + args := argsFromType(t, peerType).With("Scope", types.Ref(conversionPackagePath, "Scope")) + sw.Do("if err := s.AddGeneratedConversionFunc((*$.inType|raw$)(nil), (*$.outType|raw$)(nil), func(a, b interface{}, scope $.Scope|raw$) error { return "+nameTmpl+"(a.(*$.inType|raw$), b.(*$.outType|raw$), scope) }); err != nil { return err }\n", args) + } + if _, found := g.preexists(peerType, t); !found { + args := argsFromType(peerType, t).With("Scope", types.Ref(conversionPackagePath, "Scope")) + sw.Do("if err := s.AddGeneratedConversionFunc((*$.inType|raw$)(nil), (*$.outType|raw$)(nil), func(a, b interface{}, scope $.Scope|raw$) error { return "+nameTmpl+"(a.(*$.inType|raw$), b.(*$.outType|raw$), scope) }); err != nil { return err }\n", args) + } + } + + for i := range g.explicitConversions { + args := argsFromType(g.explicitConversions[i].inType, g.explicitConversions[i].outType).With("Scope", types.Ref(conversionPackagePath, "Scope")) + sw.Do("if err := s.AddGeneratedConversionFunc((*$.inType|raw$)(nil), (*$.outType|raw$)(nil), func(a, b interface{}, scope $.Scope|raw$) error { return "+nameTmpl+"(a.(*$.inType|raw$), b.(*$.outType|raw$), scope) }); err != nil { return err }\n", args) + } + + var pairs []conversionPair + for pair, t := range g.manualConversions { + if t.Name.Package != g.outputPackage { + continue + } + pairs = append(pairs, pair) + } + // sort by name of the conversion function + sort.Slice(pairs, func(i, j int) bool { + if g.manualConversions[pairs[i]].Name.Name < g.manualConversions[pairs[j]].Name.Name { + return true + } + return false + }) + for _, pair := range pairs { + args := argsFromType(pair.inType, pair.outType).With("Scope", types.Ref(conversionPackagePath, "Scope")).With("fn", g.manualConversions[pair]) + sw.Do("if err := s.AddConversionFunc((*$.inType|raw$)(nil), (*$.outType|raw$)(nil), func(a, b interface{}, scope $.Scope|raw$) error { return $.fn|raw$(a.(*$.inType|raw$), b.(*$.outType|raw$), scope) }); err != nil { return err }\n", args) + } + + sw.Do("return nil\n", nil) + sw.Do("}\n\n", nil) + return sw.Error() +} + +func (g *genConversion) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + klog.V(5).Infof("generating for type %v", t) + sw := generator.NewSnippetWriter(w, c, "$", "$") + + if peerType := getPeerTypeFor(c, t, g.peerPackages); peerType != nil { + g.generateConversion(t, peerType, sw) + g.generateConversion(peerType, t, sw) + } + + for _, inTypeName := range getExplicitFromTypes(t) { + inPkg, ok := c.Universe[inTypeName.Package] + if !ok { + klog.Errorf("Unrecognized package: %s", inTypeName.Package) + continue + } + inType, ok := inPkg.Types[inTypeName.Name] + if !ok { + klog.Errorf("Unrecognized type in package %s: %s", inTypeName.Package, inTypeName.Name) + continue + } + switch { + case inType.Name.Package == "net/url" && inType.Name.Name == "Values": + g.generateFromUrlValues(inType, t, sw) + default: + klog.Errorf("Not supported input type: %#v", inType.Name) + } + } + + return sw.Error() +} + +func (g *genConversion) generateConversion(inType, outType *types.Type, sw *generator.SnippetWriter) { + args := argsFromType(inType, outType). + With("Scope", types.Ref(conversionPackagePath, "Scope")) + + sw.Do("func auto"+nameTmpl+"(in *$.inType|raw$, out *$.outType|raw$, s $.Scope|raw$) error {\n", args) + g.generateFor(inType, outType, sw) + sw.Do("return nil\n", nil) + sw.Do("}\n\n", nil) + + if _, found := g.preexists(inType, outType); found { + // There is a public manual Conversion method: use it. + } else if skipped := g.skippedFields[inType]; len(skipped) != 0 { + // The inType had some fields we could not generate. + klog.Errorf("Warning: could not find nor generate a final Conversion function for %v -> %v", inType, outType) + klog.Errorf(" the following fields need manual conversion:") + for _, f := range skipped { + klog.Errorf(" - %v", f) + } + } else { + // Emit a public conversion function. + sw.Do("// "+nameTmpl+" is an autogenerated conversion function.\n", args) + sw.Do("func "+nameTmpl+"(in *$.inType|raw$, out *$.outType|raw$, s $.Scope|raw$) error {\n", args) + sw.Do("return auto"+nameTmpl+"(in, out, s)\n", args) + sw.Do("}\n\n", nil) + } +} + +// we use the system of shadowing 'in' and 'out' so that the same code is valid +// at any nesting level. This makes the autogenerator easy to understand, and +// the compiler shouldn't care. +func (g *genConversion) generateFor(inType, outType *types.Type, sw *generator.SnippetWriter) { + klog.V(5).Infof("generating %v -> %v", inType, outType) + var f func(*types.Type, *types.Type, *generator.SnippetWriter) + + switch inType.Kind { + case types.Builtin: + f = g.doBuiltin + case types.Map: + f = g.doMap + case types.Slice: + f = g.doSlice + case types.Struct: + f = g.doStruct + case types.Pointer: + f = g.doPointer + case types.Alias: + f = g.doAlias + default: + f = g.doUnknown + } + + f(inType, outType, sw) +} + +func (g *genConversion) doBuiltin(inType, outType *types.Type, sw *generator.SnippetWriter) { + if inType == outType { + sw.Do("*out = *in\n", nil) + } else { + sw.Do("*out = $.|raw$(*in)\n", outType) + } +} + +func (g *genConversion) doMap(inType, outType *types.Type, sw *generator.SnippetWriter) { + sw.Do("*out = make($.|raw$, len(*in))\n", outType) + if isDirectlyAssignable(inType.Key, outType.Key) { + sw.Do("for key, val := range *in {\n", nil) + if isDirectlyAssignable(inType.Elem, outType.Elem) { + if inType.Key == outType.Key { + sw.Do("(*out)[key] = ", nil) + } else { + sw.Do("(*out)[$.|raw$(key)] = ", outType.Key) + } + if inType.Elem == outType.Elem { + sw.Do("val\n", nil) + } else { + sw.Do("$.|raw$(val)\n", outType.Elem) + } + } else { + sw.Do("newVal := new($.|raw$)\n", outType.Elem) + if function, ok := g.preexists(inType.Elem, outType.Elem); ok { + sw.Do("if err := $.|raw$(&val, newVal, s); err != nil {\n", function) + } else if g.convertibleOnlyWithinPackage(inType.Elem, outType.Elem) { + sw.Do("if err := "+nameTmpl+"(&val, newVal, s); err != nil {\n", argsFromType(inType.Elem, outType.Elem)) + } else { + sw.Do("// TODO: Inefficient conversion - can we improve it?\n", nil) + sw.Do("if err := s.Convert(&val, newVal, 0); err != nil {\n", nil) + } + sw.Do("return err\n", nil) + sw.Do("}\n", nil) + if inType.Key == outType.Key { + sw.Do("(*out)[key] = *newVal\n", nil) + } else { + sw.Do("(*out)[$.|raw$(key)] = *newVal\n", outType.Key) + } + } + } else { + // TODO: Implement it when necessary. + sw.Do("for range *in {\n", nil) + sw.Do("// FIXME: Converting unassignable keys unsupported $.|raw$\n", inType.Key) + } + sw.Do("}\n", nil) +} + +func (g *genConversion) doSlice(inType, outType *types.Type, sw *generator.SnippetWriter) { + sw.Do("*out = make($.|raw$, len(*in))\n", outType) + if inType.Elem == outType.Elem && inType.Elem.Kind == types.Builtin { + sw.Do("copy(*out, *in)\n", nil) + } else { + sw.Do("for i := range *in {\n", nil) + if isDirectlyAssignable(inType.Elem, outType.Elem) { + if inType.Elem == outType.Elem { + sw.Do("(*out)[i] = (*in)[i]\n", nil) + } else { + sw.Do("(*out)[i] = $.|raw$((*in)[i])\n", outType.Elem) + } + } else { + if function, ok := g.preexists(inType.Elem, outType.Elem); ok { + sw.Do("if err := $.|raw$(&(*in)[i], &(*out)[i], s); err != nil {\n", function) + } else if g.convertibleOnlyWithinPackage(inType.Elem, outType.Elem) { + sw.Do("if err := "+nameTmpl+"(&(*in)[i], &(*out)[i], s); err != nil {\n", argsFromType(inType.Elem, outType.Elem)) + } else { + // TODO: This triggers on metav1.ObjectMeta <-> metav1.ObjectMeta and + // similar because neither package is the target package, and + // we really don't know which package will have the conversion + // function defined. This fires on basically every object + // conversion outside of pkg/api/v1. + sw.Do("// TODO: Inefficient conversion - can we improve it?\n", nil) + sw.Do("if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil {\n", nil) + } + sw.Do("return err\n", nil) + sw.Do("}\n", nil) + } + sw.Do("}\n", nil) + } +} + +func (g *genConversion) doStruct(inType, outType *types.Type, sw *generator.SnippetWriter) { + for _, inMember := range inType.Members { + if tagvals := extractTag(inMember.CommentLines); tagvals != nil && tagvals[0] == "false" { + // This field is excluded from conversion. + sw.Do("// INFO: in."+inMember.Name+" opted out of conversion generation\n", nil) + continue + } + outMember, found := findMember(outType, inMember.Name) + if !found { + // This field doesn't exist in the peer. + sw.Do("// WARNING: in."+inMember.Name+" requires manual conversion: does not exist in peer-type\n", nil) + g.skippedFields[inType] = append(g.skippedFields[inType], inMember.Name) + continue + } + + inMemberType, outMemberType := inMember.Type, outMember.Type + // create a copy of both underlying types but give them the top level alias name (since aliases + // are assignable) + if underlying := unwrapAlias(inMemberType); underlying != inMemberType { + copied := *underlying + copied.Name = inMemberType.Name + inMemberType = &copied + } + if underlying := unwrapAlias(outMemberType); underlying != outMemberType { + copied := *underlying + copied.Name = outMemberType.Name + outMemberType = &copied + } + + args := argsFromType(inMemberType, outMemberType).With("name", inMember.Name) + + // try a direct memory copy for any type that has exactly equivalent values + if g.useUnsafe.Equal(inMemberType, outMemberType) { + args = args. + With("Pointer", types.Ref("unsafe", "Pointer")). + With("SliceHeader", types.Ref("reflect", "SliceHeader")) + switch inMemberType.Kind { + case types.Pointer: + sw.Do("out.$.name$ = ($.outType|raw$)($.Pointer|raw$(in.$.name$))\n", args) + continue + case types.Map: + sw.Do("out.$.name$ = *(*$.outType|raw$)($.Pointer|raw$(&in.$.name$))\n", args) + continue + case types.Slice: + sw.Do("out.$.name$ = *(*$.outType|raw$)($.Pointer|raw$(&in.$.name$))\n", args) + continue + } + } + + // check based on the top level name, not the underlying names + if function, ok := g.preexists(inMember.Type, outMember.Type); ok { + if isDrop(function.CommentLines) { + continue + } + // copy-only functions that are directly assignable can be inlined instead of invoked. + // As an example, conversion functions exist that allow types with private fields to be + // correctly copied between types. These functions are equivalent to a memory assignment, + // and are necessary for the reflection path, but should not block memory conversion. + // Convert_unversioned_Time_to_unversioned_Time is an example of this logic. + if !isCopyOnly(function.CommentLines) || !g.isFastConversion(inMemberType, outMemberType) { + args["function"] = function + sw.Do("if err := $.function|raw$(&in.$.name$, &out.$.name$, s); err != nil {\n", args) + sw.Do("return err\n", nil) + sw.Do("}\n", nil) + continue + } + klog.V(5).Infof("Skipped function %s because it is copy-only and we can use direct assignment", function.Name) + } + + // If we can't auto-convert, punt before we emit any code. + if inMemberType.Kind != outMemberType.Kind { + sw.Do("// WARNING: in."+inMember.Name+" requires manual conversion: inconvertible types ("+ + inMemberType.String()+" vs "+outMemberType.String()+")\n", nil) + g.skippedFields[inType] = append(g.skippedFields[inType], inMember.Name) + continue + } + + switch inMemberType.Kind { + case types.Builtin: + if inMemberType == outMemberType { + sw.Do("out.$.name$ = in.$.name$\n", args) + } else { + sw.Do("out.$.name$ = $.outType|raw$(in.$.name$)\n", args) + } + case types.Map, types.Slice, types.Pointer: + if g.isDirectlyAssignable(inMemberType, outMemberType) { + sw.Do("out.$.name$ = in.$.name$\n", args) + continue + } + + sw.Do("if in.$.name$ != nil {\n", args) + sw.Do("in, out := &in.$.name$, &out.$.name$\n", args) + g.generateFor(inMemberType, outMemberType, sw) + sw.Do("} else {\n", nil) + sw.Do("out.$.name$ = nil\n", args) + sw.Do("}\n", nil) + case types.Struct: + if g.isDirectlyAssignable(inMemberType, outMemberType) { + sw.Do("out.$.name$ = in.$.name$\n", args) + continue + } + if g.convertibleOnlyWithinPackage(inMemberType, outMemberType) { + sw.Do("if err := "+nameTmpl+"(&in.$.name$, &out.$.name$, s); err != nil {\n", args) + } else { + sw.Do("// TODO: Inefficient conversion - can we improve it?\n", nil) + sw.Do("if err := s.Convert(&in.$.name$, &out.$.name$, 0); err != nil {\n", args) + } + sw.Do("return err\n", nil) + sw.Do("}\n", nil) + case types.Alias: + if isDirectlyAssignable(inMemberType, outMemberType) { + if inMemberType == outMemberType { + sw.Do("out.$.name$ = in.$.name$\n", args) + } else { + sw.Do("out.$.name$ = $.outType|raw$(in.$.name$)\n", args) + } + } else { + if g.convertibleOnlyWithinPackage(inMemberType, outMemberType) { + sw.Do("if err := "+nameTmpl+"(&in.$.name$, &out.$.name$, s); err != nil {\n", args) + } else { + sw.Do("// TODO: Inefficient conversion - can we improve it?\n", nil) + sw.Do("if err := s.Convert(&in.$.name$, &out.$.name$, 0); err != nil {\n", args) + } + sw.Do("return err\n", nil) + sw.Do("}\n", nil) + } + default: + if g.convertibleOnlyWithinPackage(inMemberType, outMemberType) { + sw.Do("if err := "+nameTmpl+"(&in.$.name$, &out.$.name$, s); err != nil {\n", args) + } else { + sw.Do("// TODO: Inefficient conversion - can we improve it?\n", nil) + sw.Do("if err := s.Convert(&in.$.name$, &out.$.name$, 0); err != nil {\n", args) + } + sw.Do("return err\n", nil) + sw.Do("}\n", nil) + } + } +} + +func (g *genConversion) isFastConversion(inType, outType *types.Type) bool { + switch inType.Kind { + case types.Builtin: + return true + case types.Map, types.Slice, types.Pointer, types.Struct, types.Alias: + return g.isDirectlyAssignable(inType, outType) + default: + return false + } +} + +func (g *genConversion) isDirectlyAssignable(inType, outType *types.Type) bool { + return unwrapAlias(inType) == unwrapAlias(outType) +} + +func (g *genConversion) doPointer(inType, outType *types.Type, sw *generator.SnippetWriter) { + sw.Do("*out = new($.Elem|raw$)\n", outType) + if isDirectlyAssignable(inType.Elem, outType.Elem) { + if inType.Elem == outType.Elem { + sw.Do("**out = **in\n", nil) + } else { + sw.Do("**out = $.|raw$(**in)\n", outType.Elem) + } + } else { + if function, ok := g.preexists(inType.Elem, outType.Elem); ok { + sw.Do("if err := $.|raw$(*in, *out, s); err != nil {\n", function) + } else if g.convertibleOnlyWithinPackage(inType.Elem, outType.Elem) { + sw.Do("if err := "+nameTmpl+"(*in, *out, s); err != nil {\n", argsFromType(inType.Elem, outType.Elem)) + } else { + sw.Do("// TODO: Inefficient conversion - can we improve it?\n", nil) + sw.Do("if err := s.Convert(*in, *out, 0); err != nil {\n", nil) + } + sw.Do("return err\n", nil) + sw.Do("}\n", nil) + } +} + +func (g *genConversion) doAlias(inType, outType *types.Type, sw *generator.SnippetWriter) { + // TODO: Add support for aliases. + g.doUnknown(inType, outType, sw) +} + +func (g *genConversion) doUnknown(inType, outType *types.Type, sw *generator.SnippetWriter) { + sw.Do("// FIXME: Type $.|raw$ is unsupported.\n", inType) +} + +func (g *genConversion) generateFromUrlValues(inType, outType *types.Type, sw *generator.SnippetWriter) { + args := generator.Args{ + "inType": inType, + "outType": outType, + "Scope": types.Ref(conversionPackagePath, "Scope"), + } + sw.Do("func auto"+nameTmpl+"(in *$.inType|raw$, out *$.outType|raw$, s $.Scope|raw$) error {\n", args) + for _, outMember := range outType.Members { + if tagvals := extractTag(outMember.CommentLines); tagvals != nil && tagvals[0] == "false" { + // This field is excluded from conversion. + sw.Do("// INFO: in."+outMember.Name+" opted out of conversion generation\n", nil) + continue + } + jsonTag := reflect.StructTag(outMember.Tags).Get("json") + index := strings.Index(jsonTag, ",") + if index == -1 { + index = len(jsonTag) + } + if index == 0 { + memberArgs := generator.Args{ + "name": outMember.Name, + } + sw.Do("// WARNING: Field $.name$ does not have json tag, skipping.\n\n", memberArgs) + continue + } + memberArgs := generator.Args{ + "name": outMember.Name, + "tag": jsonTag[:index], + } + sw.Do("if values, ok := map[string][]string(*in)[\"$.tag$\"]; ok && len(values) > 0 {\n", memberArgs) + g.fromValuesEntry(inType.Underlying.Elem, outMember, sw) + sw.Do("} else {\n", nil) + g.setZeroValue(outMember, sw) + sw.Do("}\n", nil) + } + sw.Do("return nil\n", nil) + sw.Do("}\n\n", nil) + + if _, found := g.preexists(inType, outType); found { + // There is a public manual Conversion method: use it. + } else { + // Emit a public conversion function. + sw.Do("// "+nameTmpl+" is an autogenerated conversion function.\n", args) + sw.Do("func "+nameTmpl+"(in *$.inType|raw$, out *$.outType|raw$, s $.Scope|raw$) error {\n", args) + sw.Do("return auto"+nameTmpl+"(in, out, s)\n", args) + sw.Do("}\n\n", nil) + } +} + +func (g *genConversion) fromValuesEntry(inType *types.Type, outMember types.Member, sw *generator.SnippetWriter) { + memberArgs := generator.Args{ + "name": outMember.Name, + "type": outMember.Type, + } + if function, ok := g.preexists(inType, outMember.Type); ok { + args := memberArgs.With("function", function) + sw.Do("if err := $.function|raw$(&values, &out.$.name$, s); err != nil {\n", args) + sw.Do("return err\n", nil) + sw.Do("}\n", nil) + return + } + switch { + case outMember.Type == types.String: + sw.Do("out.$.name$ = values[0]\n", memberArgs) + case g.useUnsafe.Equal(inType, outMember.Type): + args := memberArgs.With("Pointer", types.Ref("unsafe", "Pointer")) + switch inType.Kind { + case types.Pointer: + sw.Do("out.$.name$ = ($.type|raw$)($.Pointer|raw$(&values))\n", args) + case types.Map, types.Slice: + sw.Do("out.$.name$ = *(*$.type|raw$)($.Pointer|raw$(&values))\n", args) + default: + // TODO: Support other types to allow more auto-conversions. + sw.Do("// FIXME: out.$.name$ is of not yet supported type and requires manual conversion\n", memberArgs) + } + default: + // TODO: Support other types to allow more auto-conversions. + sw.Do("// FIXME: out.$.name$ is of not yet supported type and requires manual conversion\n", memberArgs) + } +} + +func (g *genConversion) setZeroValue(outMember types.Member, sw *generator.SnippetWriter) { + outMemberType := unwrapAlias(outMember.Type) + memberArgs := generator.Args{ + "name": outMember.Name, + "alias": outMember.Type, + "type": outMemberType, + } + + switch outMemberType.Kind { + case types.Builtin: + switch outMemberType { + case types.String: + sw.Do("out.$.name$ = \"\"\n", memberArgs) + case types.Int64, types.Int32, types.Int16, types.Int, types.Uint64, types.Uint32, types.Uint16, types.Uint: + sw.Do("out.$.name$ = 0\n", memberArgs) + case types.Uintptr, types.Byte: + sw.Do("out.$.name$ = 0\n", memberArgs) + case types.Float64, types.Float32, types.Float: + sw.Do("out.$.name$ = 0\n", memberArgs) + case types.Bool: + sw.Do("out.$.name$ = false\n", memberArgs) + default: + sw.Do("// FIXME: out.$.name$ is of unsupported type and requires manual conversion\n", memberArgs) + } + case types.Struct: + if outMemberType == outMember.Type { + sw.Do("out.$.name$ = $.type|raw${}\n", memberArgs) + } else { + sw.Do("out.$.name$ = $.alias|raw$($.type|raw${})\n", memberArgs) + } + case types.Map, types.Slice, types.Pointer: + sw.Do("out.$.name$ = nil\n", memberArgs) + case types.Alias: + // outMemberType was already unwrapped from aliases - so that should never happen. + sw.Do("// FIXME: unexpected error for out.$.name$\n", memberArgs) + case types.Interface, types.Array: + sw.Do("out.$.name$ = nil\n", memberArgs) + default: + sw.Do("// FIXME: out.$.name$ is of unsupported type and requires manual conversion\n", memberArgs) + } +} + +func isDirectlyAssignable(inType, outType *types.Type) bool { + // TODO: This should maybe check for actual assignability between the two + // types, rather than superficial traits that happen to indicate it is + // assignable in the ways we currently use this code. + return inType.IsAssignable() && (inType.IsPrimitive() || isSamePackage(inType, outType)) +} + +func isSamePackage(inType, outType *types.Type) bool { + return inType.Name.Package == outType.Name.Package +} diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go b/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go new file mode 100644 index 0000000000..7c63418015 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go @@ -0,0 +1,125 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// conversion-gen is a tool for auto-generating functions that convert +// between internal and external types. A general conversion code +// generation task involves three sets of packages: (1) a set of +// packages containing internal types, (2) a single package containing +// the external types, and (3) a single destination package (i.e., +// where the generated conversion functions go, and where the +// developer-authored conversion functions are). The packages +// containing the internal types play the role known as "peer +// packages" in the general code-generation framework of Kubernetes. +// +// For each conversion task, `conversion-gen` will generate functions +// that efficiently convert between same-name types in the two +// (internal, external) packages. The generated functions include +// ones named +// autoConvert___To__ +// for each such pair of types --- both with (pkg1,pkg2) = +// (internal,external) and (pkg1,pkg2) = (external,internal). +// Additionally: if the destination package does not contain one in a +// non-generated file then a function named +// Convert___To__ +// is also generated and it simply calls the `autoConvert...` +// function. The generated conversion functions use standard value +// assignment wherever possible. For compound types, the generated +// conversion functions call the `Convert...` functions for the +// subsidiary types. Thus developers can override the behavior for +// selected types. For a top-level object type (i.e., the type of an +// object that will be input to an apiserver), for such an override to +// be used by the apiserver the developer-maintained conversion +// functions must also be registered by invoking the +// `AddConversionFunc`/`AddGeneratedConversionFunc` method of the +// relevant `Scheme` object from k8s.io/apimachinery/pkg/runtime. +// +// `conversion-gen` will scan its `--input-dirs`, looking at the +// package defined in each of those directories for comment tags that +// define a conversion code generation task. A package requests +// conversion code generation by including one or more comment in the +// package's `doc.go` file (currently anywhere in that file is +// acceptable, but the recommended location is above the `package` +// statement), of the form: +// // +k8s:conversion-gen= +// This introduces a conversion task, for which the destination +// package is the one containing the file with the tag and the tag +// identifies a package containing internal types. If there is also a +// tag of the form +// // +k8s:conversion-gen-external-types= +// then it identifies the package containing the external types; +// otherwise they are in the destination package. +// +// For each conversion code generation task, the full set of internal +// packages (AKA peer packages) consists of the ones specified in the +// `k8s:conversion-gen` tags PLUS any specified in the +// `--base-peer-dirs` and `--extra-peer-dirs` flags on the command +// line. +// +// When generating for a package, individual types or fields of structs may opt +// out of Conversion generation by specifying a comment on the of the form: +// // +k8s:conversion-gen=false +package main + +import ( + "flag" + "path/filepath" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" + "k8s.io/klog" + + generatorargs "k8s.io/code-generator/cmd/conversion-gen/args" + "k8s.io/code-generator/cmd/conversion-gen/generators" + "k8s.io/code-generator/pkg/util" +) + +func main() { + klog.InitFlags(nil) + genericArgs, customArgs := generatorargs.NewDefaults() + + // Override defaults. + // TODO: move this out of conversion-gen + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + // k8s.io/apimachinery/pkg/runtime contains a number of manual conversions, + // that we need to generate conversions. + // Packages being dependencies of explicitly requested packages are only + // partially scanned - only types explicitly used are being traversed. + // Not used functions or types are omitted. + // Adding this explicitly to InputDirs ensures that the package is fully + // scanned and all functions are parsed and processed. + genericArgs.InputDirs = append(genericArgs.InputDirs, "k8s.io/apimachinery/pkg/runtime") + + if err := generatorargs.Validate(genericArgs); err != nil { + klog.Fatalf("Error: %v", err) + } + + // Run it. + if err := genericArgs.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + klog.Fatalf("Error: %v", err) + } + klog.V(2).Info("Completed successfully.") +} diff --git a/vendor/k8s.io/code-generator/cmd/deepcopy-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/args/args.go new file mode 100644 index 0000000000..789713012a --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/args/args.go @@ -0,0 +1,54 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" + "k8s.io/gengo/examples/deepcopy-gen/generators" +) + +// CustomArgs is used by the gengo framework to pass args specific to this generator. +type CustomArgs generators.CustomArgs + +// NewDefaults returns default arguments for the generator. +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{} + genericArgs.CustomArgs = (*generators.CustomArgs)(customArgs) // convert to upstream type to make type-casts work there + genericArgs.OutputFileBaseName = "deepcopy_generated" + return genericArgs, customArgs +} + +// AddFlags add the generator flags to the flag set. +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { + pflag.CommandLine.StringSliceVar(&ca.BoundingDirs, "bounding-dirs", ca.BoundingDirs, + "Comma-separated list of import paths which bound the types for which deep-copies will be generated.") +} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + _ = genericArgs.CustomArgs.(*generators.CustomArgs) + + if len(genericArgs.OutputFileBaseName) == 0 { + return fmt.Errorf("output file base name cannot be empty") + } + + return nil +} diff --git a/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go new file mode 100644 index 0000000000..96fb298734 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go @@ -0,0 +1,85 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// deepcopy-gen is a tool for auto-generating DeepCopy functions. +// +// Given a list of input directories, it will generate functions that +// efficiently perform a full deep-copy of each type. For any type that +// offers a `.DeepCopy()` method, it will simply call that. Otherwise it will +// use standard value assignment whenever possible. If that is not possible it +// will try to call its own generated copy function for the type, if the type is +// within the allowed root packages. Failing that, it will fall back on +// `conversion.Cloner.DeepCopy(val)` to make the copy. The resulting file will +// be stored in the same directory as the processed source package. +// +// Generation is governed by comment tags in the source. Any package may +// request DeepCopy generation by including a comment in the file-comments of +// one file, of the form: +// // +k8s:deepcopy-gen=package +// +// DeepCopy functions can be generated for individual types, rather than the +// entire package by specifying a comment on the type definion of the form: +// // +k8s:deepcopy-gen=true +// +// When generating for a whole package, individual types may opt out of +// DeepCopy generation by specifying a comment on the of the form: +// // +k8s:deepcopy-gen=false +// +// Note that registration is a whole-package option, and is not available for +// individual types. +package main + +import ( + "flag" + "path/filepath" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" + "k8s.io/gengo/examples/deepcopy-gen/generators" + "k8s.io/klog" + + generatorargs "k8s.io/code-generator/cmd/deepcopy-gen/args" + "k8s.io/code-generator/pkg/util" +) + +func main() { + klog.InitFlags(nil) + genericArgs, customArgs := generatorargs.NewDefaults() + + // Override defaults. + // TODO: move this out of deepcopy-gen + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := generatorargs.Validate(genericArgs); err != nil { + klog.Fatalf("Error: %v", err) + } + + // Run it. + if err := genericArgs.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + klog.Fatalf("Error: %v", err) + } + klog.V(2).Info("Completed successfully.") +} diff --git a/vendor/k8s.io/code-generator/cmd/defaulter-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/defaulter-gen/args/args.go new file mode 100644 index 0000000000..3c5a042c7c --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/defaulter-gen/args/args.go @@ -0,0 +1,54 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" + "k8s.io/gengo/examples/defaulter-gen/generators" +) + +// CustomArgs is used by the gengo framework to pass args specific to this generator. +type CustomArgs generators.CustomArgs + +// NewDefaults returns default arguments for the generator. +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{} + genericArgs.CustomArgs = (*generators.CustomArgs)(customArgs) // convert to upstream type to make type-casts work there + genericArgs.OutputFileBaseName = "zz_generated.defaults" + return genericArgs, customArgs +} + +// AddFlags add the generator flags to the flag set. +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { + pflag.CommandLine.StringSliceVar(&ca.ExtraPeerDirs, "extra-peer-dirs", ca.ExtraPeerDirs, + "Comma-separated list of import paths which are considered, after tag-specified peers, for conversions.") +} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + _ = genericArgs.CustomArgs.(*generators.CustomArgs) + + if len(genericArgs.OutputFileBaseName) == 0 { + return fmt.Errorf("output file base name cannot be empty") + } + + return nil +} diff --git a/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go b/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go new file mode 100644 index 0000000000..40bb875e52 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go @@ -0,0 +1,84 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// defaulter-gen is a tool for auto-generating Defaulter functions. +// +// Given a list of input directories, it will scan for top level types +// and generate efficient defaulters for an entire object from the sum +// of the SetDefault_* methods contained in the object tree. +// +// Generation is governed by comment tags in the source. Any package may +// request defaulter generation by including one or more comment tags at +// the package comment level: +// +// // +k8s:defaulter-gen= +// +// which will create defaulters for any type that contains the provided +// field name (if the type has defaulters). Any type may request explicit +// defaulting by providing the comment tag: +// +// // +k8s:defaulter-gen=true|false +// +// An existing defaulter method (`SetDefaults_TYPE`) can provide the +// comment tag: +// +// // +k8s:defaulter-gen=covers +// +// to indicate that the defaulter does not or should not call any nested +// defaulters. +package main + +import ( + "flag" + "path/filepath" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" + "k8s.io/gengo/examples/defaulter-gen/generators" + "k8s.io/klog" + + generatorargs "k8s.io/code-generator/cmd/defaulter-gen/args" + "k8s.io/code-generator/pkg/util" +) + +func main() { + klog.InitFlags(nil) + genericArgs, customArgs := generatorargs.NewDefaults() + + // Override defaults. + // TODO: move this out of defaulter-gen + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := generatorargs.Validate(genericArgs); err != nil { + klog.Fatalf("Error: %v", err) + } + + // Run it. + if err := genericArgs.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + klog.Fatalf("Error: %v", err) + } + klog.V(2).Info("Completed successfully.") +} diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/.gitignore b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/.gitignore new file mode 100644 index 0000000000..0e9aa466bb --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/.gitignore @@ -0,0 +1 @@ +go-to-protobuf diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS new file mode 100644 index 0000000000..613659162a --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- smarterclayton +reviewers: +- smarterclayton diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/main.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/main.go new file mode 100644 index 0000000000..847a6a5a02 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/main.go @@ -0,0 +1,39 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// go-to-protobuf generates a Protobuf IDL from a Go struct, respecting any +// existing IDL tags on the Go struct. +package main + +import ( + goflag "flag" + + flag "github.com/spf13/pflag" + "k8s.io/code-generator/cmd/go-to-protobuf/protobuf" +) + +var g = protobuf.New() + +func init() { + g.BindFlags(flag.CommandLine) + goflag.Set("logtostderr", "true") + flag.CommandLine.AddGoFlagSet(goflag.CommandLine) +} + +func main() { + flag.Parse() + protobuf.Run(g) +} diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go new file mode 100644 index 0000000000..8a7be5260c --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go @@ -0,0 +1,428 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// go-to-protobuf generates a Protobuf IDL from a Go struct, respecting any +// existing IDL tags on the Go struct. +package protobuf + +import ( + "bytes" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" + + flag "github.com/spf13/pflag" + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/simple" + "gonum.org/v1/gonum/graph/topo" + + "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/parser" + "k8s.io/gengo/types" +) + +type Generator struct { + Common args.GeneratorArgs + APIMachineryPackages string + Packages string + OutputBase string + VendorOutputBase string + ProtoImport []string + Conditional string + Clean bool + OnlyIDL bool + KeepGogoproto bool + SkipGeneratedRewrite bool + DropEmbeddedFields string +} + +func New() *Generator { + sourceTree := args.DefaultSourceTree() + common := args.GeneratorArgs{ + OutputBase: sourceTree, + GoHeaderFilePath: filepath.Join(sourceTree, util.BoilerplatePath()), + } + defaultProtoImport := filepath.Join(sourceTree, "k8s.io", "kubernetes", "vendor", "github.com", "gogo", "protobuf", "protobuf") + cwd, err := os.Getwd() + if err != nil { + log.Fatalf("Cannot get current directory.") + } + return &Generator{ + Common: common, + OutputBase: sourceTree, + VendorOutputBase: filepath.Join(cwd, "vendor"), + ProtoImport: []string{defaultProtoImport}, + APIMachineryPackages: strings.Join([]string{ + `+k8s.io/apimachinery/pkg/util/intstr`, + `+k8s.io/apimachinery/pkg/api/resource`, + `+k8s.io/apimachinery/pkg/runtime/schema`, + `+k8s.io/apimachinery/pkg/runtime`, + `k8s.io/apimachinery/pkg/apis/meta/v1`, + `k8s.io/apimachinery/pkg/apis/meta/v1beta1`, + `k8s.io/apimachinery/pkg/apis/testapigroup/v1`, + }, ","), + Packages: "", + DropEmbeddedFields: "k8s.io/apimachinery/pkg/apis/meta/v1.TypeMeta", + } +} + +func (g *Generator) BindFlags(flag *flag.FlagSet) { + flag.StringVarP(&g.Common.GoHeaderFilePath, "go-header-file", "h", g.Common.GoHeaderFilePath, "File containing boilerplate header text. The string YEAR will be replaced with the current 4-digit year.") + flag.BoolVar(&g.Common.VerifyOnly, "verify-only", g.Common.VerifyOnly, "If true, only verify existing output, do not write anything.") + flag.StringVarP(&g.Packages, "packages", "p", g.Packages, "comma-separated list of directories to get input types from. Directories prefixed with '-' are not generated, directories prefixed with '+' only create types with explicit IDL instructions.") + flag.StringVar(&g.APIMachineryPackages, "apimachinery-packages", g.APIMachineryPackages, "comma-separated list of directories to get apimachinery input types from which are needed by any API. Directories prefixed with '-' are not generated, directories prefixed with '+' only create types with explicit IDL instructions.") + flag.StringVarP(&g.OutputBase, "output-base", "o", g.OutputBase, "Output base; defaults to $GOPATH/src/") + flag.StringVar(&g.VendorOutputBase, "vendor-output-base", g.VendorOutputBase, "The vendor/ directory to look for packages in; defaults to $PWD/vendor/.") + flag.StringSliceVar(&g.ProtoImport, "proto-import", g.ProtoImport, "The search path for the core protobuf .protos, required; defaults $GOPATH/src/k8s.io/kubernetes/vendor/github.com/gogo/protobuf/protobuf.") + flag.StringVar(&g.Conditional, "conditional", g.Conditional, "An optional Golang build tag condition to add to the generated Go code") + flag.BoolVar(&g.Clean, "clean", g.Clean, "If true, remove all generated files for the specified Packages.") + flag.BoolVar(&g.OnlyIDL, "only-idl", g.OnlyIDL, "If true, only generate the IDL for each package.") + flag.BoolVar(&g.KeepGogoproto, "keep-gogoproto", g.KeepGogoproto, "If true, the generated IDL will contain gogoprotobuf extensions which are normally removed") + flag.BoolVar(&g.SkipGeneratedRewrite, "skip-generated-rewrite", g.SkipGeneratedRewrite, "If true, skip fixing up the generated.pb.go file (debugging only).") + flag.StringVar(&g.DropEmbeddedFields, "drop-embedded-fields", g.DropEmbeddedFields, "Comma-delimited list of embedded Go types to omit from generated protobufs") +} + +func Run(g *Generator) { + if g.Common.VerifyOnly { + g.OnlyIDL = true + g.Clean = false + } + + b := parser.New() + b.AddBuildTags("proto") + + omitTypes := map[types.Name]struct{}{} + for _, t := range strings.Split(g.DropEmbeddedFields, ",") { + name := types.Name{} + if i := strings.LastIndex(t, "."); i != -1 { + name.Package, name.Name = t[:i], t[i+1:] + } else { + name.Name = t + } + if len(name.Name) == 0 { + log.Fatalf("--drop-embedded-types requires names in the form of [GOPACKAGE.]TYPENAME: %v", t) + } + omitTypes[name] = struct{}{} + } + + boilerplate, err := g.Common.LoadGoBoilerplate() + if err != nil { + log.Fatalf("Failed loading boilerplate (consider using the go-header-file flag): %v", err) + } + + protobufNames := NewProtobufNamer() + outputPackages := generator.Packages{} + nonOutputPackages := map[string]struct{}{} + + var packages []string + if len(g.APIMachineryPackages) != 0 { + packages = append(packages, strings.Split(g.APIMachineryPackages, ",")...) + } + if len(g.Packages) != 0 { + packages = append(packages, strings.Split(g.Packages, ",")...) + } + if len(packages) == 0 { + log.Fatalf("Both apimachinery-packages and packages are empty. At least one package must be specified.") + } + + for _, d := range packages { + generateAllTypes, outputPackage := true, true + switch { + case strings.HasPrefix(d, "+"): + d = d[1:] + generateAllTypes = false + case strings.HasPrefix(d, "-"): + d = d[1:] + outputPackage = false + } + name := protoSafePackage(d) + parts := strings.SplitN(d, "=", 2) + if len(parts) > 1 { + d = parts[0] + name = parts[1] + } + p := newProtobufPackage(d, name, generateAllTypes, omitTypes) + header := append([]byte{}, boilerplate...) + header = append(header, p.HeaderText...) + p.HeaderText = header + protobufNames.Add(p) + if outputPackage { + outputPackages = append(outputPackages, p) + } else { + nonOutputPackages[name] = struct{}{} + } + } + + if !g.Common.VerifyOnly { + for _, p := range outputPackages { + if err := p.(*protobufPackage).Clean(g.OutputBase); err != nil { + log.Fatalf("Unable to clean package %s: %v", p.Name(), err) + } + } + } + + if g.Clean { + return + } + + for _, p := range protobufNames.List() { + if err := b.AddDir(p.Path()); err != nil { + log.Fatalf("Unable to add directory %q: %v", p.Path(), err) + } + } + + c, err := generator.NewContext( + b, + namer.NameSystems{ + "public": namer.NewPublicNamer(3), + "proto": protobufNames, + }, + "public", + ) + if err != nil { + log.Fatalf("Failed making a context: %v", err) + } + + c.Verify = g.Common.VerifyOnly + c.FileTypes["protoidl"] = NewProtoFile() + + // order package by imports, importees first + deps := deps(c, protobufNames.packages) + order, err := importOrder(deps) + if err != nil { + log.Fatalf("Failed to order packages by imports: %v", err) + } + topologicalPos := map[string]int{} + for i, p := range order { + topologicalPos[p] = i + } + sort.Sort(positionOrder{topologicalPos, protobufNames.packages}) + + var vendoredOutputPackages, localOutputPackages generator.Packages + for _, p := range protobufNames.packages { + if _, ok := nonOutputPackages[p.Name()]; ok { + // if we're not outputting the package, don't include it in either package list + continue + } + p.Vendored = strings.Contains(c.Universe[p.PackagePath].SourcePath, "/vendor/") + if p.Vendored { + vendoredOutputPackages = append(vendoredOutputPackages, p) + } else { + localOutputPackages = append(localOutputPackages, p) + } + } + + if err := protobufNames.AssignTypesToPackages(c); err != nil { + log.Fatalf("Failed to identify Common types: %v", err) + } + + if err := c.ExecutePackages(g.VendorOutputBase, vendoredOutputPackages); err != nil { + log.Fatalf("Failed executing vendor generator: %v", err) + } + if err := c.ExecutePackages(g.OutputBase, localOutputPackages); err != nil { + log.Fatalf("Failed executing local generator: %v", err) + } + + if g.OnlyIDL { + return + } + + if _, err := exec.LookPath("protoc"); err != nil { + log.Fatalf("Unable to find 'protoc': %v", err) + } + + searchArgs := []string{"-I", ".", "-I", g.OutputBase} + if len(g.ProtoImport) != 0 { + for _, s := range g.ProtoImport { + searchArgs = append(searchArgs, "-I", s) + } + } + args := append(searchArgs, fmt.Sprintf("--gogo_out=%s", g.OutputBase)) + + buf := &bytes.Buffer{} + if len(g.Conditional) > 0 { + fmt.Fprintf(buf, "// +build %s\n\n", g.Conditional) + } + buf.Write(boilerplate) + + for _, outputPackage := range outputPackages { + p := outputPackage.(*protobufPackage) + + path := filepath.Join(g.OutputBase, p.ImportPath()) + outputPath := filepath.Join(g.OutputBase, p.OutputPath()) + if p.Vendored { + path = filepath.Join(g.VendorOutputBase, p.ImportPath()) + outputPath = filepath.Join(g.VendorOutputBase, p.OutputPath()) + } + + // generate the gogoprotobuf protoc + cmd := exec.Command("protoc", append(args, path)...) + out, err := cmd.CombinedOutput() + if len(out) > 0 { + log.Print(string(out)) + } + if err != nil { + log.Println(strings.Join(cmd.Args, " ")) + log.Fatalf("Unable to generate protoc on %s: %v", p.PackageName, err) + } + + if g.SkipGeneratedRewrite { + continue + } + + // alter the generated protobuf file to remove the generated types (but leave the serializers) and rewrite the + // package statement to match the desired package name + if err := RewriteGeneratedGogoProtobufFile(outputPath, p.ExtractGeneratedType, p.OptionalTypeName, buf.Bytes()); err != nil { + log.Fatalf("Unable to rewrite generated %s: %v", outputPath, err) + } + + // sort imports + cmd = exec.Command("goimports", "-w", outputPath) + out, err = cmd.CombinedOutput() + if len(out) > 0 { + log.Print(string(out)) + } + if err != nil { + log.Println(strings.Join(cmd.Args, " ")) + log.Fatalf("Unable to rewrite imports for %s: %v", p.PackageName, err) + } + + // format and simplify the generated file + cmd = exec.Command("gofmt", "-s", "-w", outputPath) + out, err = cmd.CombinedOutput() + if len(out) > 0 { + log.Print(string(out)) + } + if err != nil { + log.Println(strings.Join(cmd.Args, " ")) + log.Fatalf("Unable to apply gofmt for %s: %v", p.PackageName, err) + } + } + + if g.SkipGeneratedRewrite { + return + } + + if !g.KeepGogoproto { + // generate, but do so without gogoprotobuf extensions + for _, outputPackage := range outputPackages { + p := outputPackage.(*protobufPackage) + p.OmitGogo = true + } + if err := c.ExecutePackages(g.VendorOutputBase, vendoredOutputPackages); err != nil { + log.Fatalf("Failed executing vendor generator: %v", err) + } + if err := c.ExecutePackages(g.OutputBase, localOutputPackages); err != nil { + log.Fatalf("Failed executing local generator: %v", err) + } + } + + for _, outputPackage := range outputPackages { + p := outputPackage.(*protobufPackage) + + if len(p.StructTags) == 0 { + continue + } + + pattern := filepath.Join(g.OutputBase, p.PackagePath, "*.go") + if p.Vendored { + pattern = filepath.Join(g.VendorOutputBase, p.PackagePath, "*.go") + } + files, err := filepath.Glob(pattern) + if err != nil { + log.Fatalf("Can't glob pattern %q: %v", pattern, err) + } + + for _, s := range files { + if strings.HasSuffix(s, "_test.go") { + continue + } + if err := RewriteTypesWithProtobufStructTags(s, p.StructTags); err != nil { + log.Fatalf("Unable to rewrite with struct tags %s: %v", s, err) + } + } + } +} + +func deps(c *generator.Context, pkgs []*protobufPackage) map[string][]string { + ret := map[string][]string{} + for _, p := range pkgs { + for _, d := range c.Universe[p.PackagePath].Imports { + ret[p.PackagePath] = append(ret[p.PackagePath], d.Path) + } + } + return ret +} + +func importOrder(deps map[string][]string) ([]string, error) { + nodes := map[string]graph.Node{} + names := map[int64]string{} + g := simple.NewDirectedGraph() + for pkg, imports := range deps { + for _, imp := range imports { + if _, found := nodes[pkg]; !found { + n := g.NewNode() + g.AddNode(n) + nodes[pkg] = n + names[n.ID()] = pkg + } + if _, found := nodes[imp]; !found { + n := g.NewNode() + g.AddNode(n) + nodes[imp] = n + names[n.ID()] = imp + } + g.SetEdge(g.NewEdge(nodes[imp], nodes[pkg])) + } + } + + ret := []string{} + sorted, err := topo.Sort(g) + if err != nil { + return nil, err + } + for _, n := range sorted { + ret = append(ret, names[n.ID()]) + fmt.Println("topological order", names[n.ID()]) + } + return ret, nil +} + +type positionOrder struct { + pos map[string]int + elements []*protobufPackage +} + +func (o positionOrder) Len() int { + return len(o.elements) +} + +func (o positionOrder) Less(i, j int) bool { + return o.pos[o.elements[i].PackagePath] < o.pos[o.elements[j].PackagePath] +} + +func (o positionOrder) Swap(i, j int) { + x := o.elements[i] + o.elements[i] = o.elements[j] + o.elements[j] = x +} diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go new file mode 100644 index 0000000000..1a9803dc88 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go @@ -0,0 +1,773 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protobuf + +import ( + "fmt" + "io" + "log" + "reflect" + "sort" + "strconv" + "strings" + + "k8s.io/klog" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +// genProtoIDL produces a .proto IDL. +type genProtoIDL struct { + generator.DefaultGen + localPackage types.Name + localGoPackage types.Name + imports namer.ImportTracker + + generateAll bool + omitGogo bool + omitFieldTypes map[types.Name]struct{} +} + +func (g *genProtoIDL) PackageVars(c *generator.Context) []string { + if g.omitGogo { + return []string{ + fmt.Sprintf("option go_package = %q;", g.localGoPackage.Name), + } + } + return []string{ + "option (gogoproto.marshaler_all) = true;", + "option (gogoproto.stable_marshaler_all) = true;", + "option (gogoproto.sizer_all) = true;", + "option (gogoproto.goproto_stringer_all) = false;", + "option (gogoproto.stringer_all) = true;", + "option (gogoproto.unmarshaler_all) = true;", + "option (gogoproto.goproto_unrecognized_all) = false;", + "option (gogoproto.goproto_enum_prefix_all) = false;", + "option (gogoproto.goproto_getters_all) = false;", + fmt.Sprintf("option go_package = %q;", g.localGoPackage.Name), + } +} +func (g *genProtoIDL) Filename() string { return g.OptionalName + ".proto" } +func (g *genProtoIDL) FileType() string { return "protoidl" } +func (g *genProtoIDL) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + // The local namer returns the correct protobuf name for a proto type + // in the context of a package + "local": localNamer{g.localPackage}, + } +} + +// Filter ignores types that are identified as not exportable. +func (g *genProtoIDL) Filter(c *generator.Context, t *types.Type) bool { + tagVals := types.ExtractCommentTags("+", t.CommentLines)["protobuf"] + if tagVals != nil { + if tagVals[0] == "false" { + // Type specified "false". + return false + } + if tagVals[0] == "true" { + // Type specified "true". + return true + } + klog.Fatalf(`Comment tag "protobuf" must be true or false, found: %q`, tagVals[0]) + } + if !g.generateAll { + // We're not generating everything. + return false + } + seen := map[*types.Type]bool{} + ok := isProtoable(seen, t) + return ok +} + +func isProtoable(seen map[*types.Type]bool, t *types.Type) bool { + if seen[t] { + // be optimistic in the case of type cycles. + return true + } + seen[t] = true + switch t.Kind { + case types.Builtin: + return true + case types.Alias: + return isProtoable(seen, t.Underlying) + case types.Slice, types.Pointer: + return isProtoable(seen, t.Elem) + case types.Map: + return isProtoable(seen, t.Key) && isProtoable(seen, t.Elem) + case types.Struct: + if len(t.Members) == 0 { + return true + } + for _, m := range t.Members { + if isProtoable(seen, m.Type) { + return true + } + } + return false + case types.Func, types.Chan: + return false + case types.DeclarationOf, types.Unknown, types.Unsupported: + return false + case types.Interface: + return false + default: + log.Printf("WARNING: type %q is not portable: %s", t.Kind, t.Name) + return false + } +} + +// isOptionalAlias should return true if the specified type has an underlying type +// (is an alias) of a map or slice and has the comment tag protobuf.nullable=true, +// indicating that the type should be nullable in protobuf. +func isOptionalAlias(t *types.Type) bool { + if t.Underlying == nil || (t.Underlying.Kind != types.Map && t.Underlying.Kind != types.Slice) { + return false + } + if extractBoolTagOrDie("protobuf.nullable", t.CommentLines) == false { + return false + } + return true +} + +func (g *genProtoIDL) Imports(c *generator.Context) (imports []string) { + lines := []string{} + // TODO: this could be expressed more cleanly + for _, line := range g.imports.ImportLines() { + if g.omitGogo && line == "github.com/gogo/protobuf/gogoproto/gogo.proto" { + continue + } + lines = append(lines, line) + } + return lines +} + +// GenerateType makes the body of a file implementing a set for type t. +func (g *genProtoIDL) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + b := bodyGen{ + locator: &protobufLocator{ + namer: c.Namers["proto"].(ProtobufFromGoNamer), + tracker: g.imports, + universe: c.Universe, + + localGoPackage: g.localGoPackage.Package, + }, + localPackage: g.localPackage, + + omitGogo: g.omitGogo, + omitFieldTypes: g.omitFieldTypes, + + t: t, + } + switch t.Kind { + case types.Alias: + return b.doAlias(sw) + case types.Struct: + return b.doStruct(sw) + default: + return b.unknown(sw) + } +} + +// ProtobufFromGoNamer finds the protobuf name of a type (and its package, and +// the package path) from its Go name. +type ProtobufFromGoNamer interface { + GoNameToProtoName(name types.Name) types.Name +} + +type ProtobufLocator interface { + ProtoTypeFor(t *types.Type) (*types.Type, error) + GoTypeForName(name types.Name) *types.Type + CastTypeName(name types.Name) string +} + +type protobufLocator struct { + namer ProtobufFromGoNamer + tracker namer.ImportTracker + universe types.Universe + + localGoPackage string +} + +// CastTypeName returns the cast type name of a Go type +// TODO: delegate to a new localgo namer? +func (p protobufLocator) CastTypeName(name types.Name) string { + if name.Package == p.localGoPackage { + return name.Name + } + return name.String() +} + +func (p protobufLocator) GoTypeForName(name types.Name) *types.Type { + if len(name.Package) == 0 { + name.Package = p.localGoPackage + } + return p.universe.Type(name) +} + +// ProtoTypeFor locates a Protobuf type for the provided Go type (if possible). +func (p protobufLocator) ProtoTypeFor(t *types.Type) (*types.Type, error) { + switch { + // we've already converted the type, or it's a map + case t.Kind == types.Protobuf || t.Kind == types.Map: + p.tracker.AddType(t) + return t, nil + } + // it's a fundamental type + if t, ok := isFundamentalProtoType(t); ok { + p.tracker.AddType(t) + return t, nil + } + // it's a message + if t.Kind == types.Struct || isOptionalAlias(t) { + t := &types.Type{ + Name: p.namer.GoNameToProtoName(t.Name), + Kind: types.Protobuf, + + CommentLines: t.CommentLines, + } + p.tracker.AddType(t) + return t, nil + } + return nil, errUnrecognizedType +} + +type bodyGen struct { + locator ProtobufLocator + localPackage types.Name + omitGogo bool + omitFieldTypes map[types.Name]struct{} + + t *types.Type +} + +func (b bodyGen) unknown(sw *generator.SnippetWriter) error { + return fmt.Errorf("not sure how to generate: %#v", b.t) +} + +func (b bodyGen) doAlias(sw *generator.SnippetWriter) error { + if !isOptionalAlias(b.t) { + return nil + } + + var kind string + switch b.t.Underlying.Kind { + case types.Map: + kind = "map" + default: + kind = "slice" + } + optional := &types.Type{ + Name: b.t.Name, + Kind: types.Struct, + + CommentLines: b.t.CommentLines, + SecondClosestCommentLines: b.t.SecondClosestCommentLines, + Members: []types.Member{ + { + Name: "Items", + CommentLines: []string{fmt.Sprintf("items, if empty, will result in an empty %s\n", kind)}, + Type: b.t.Underlying, + }, + }, + } + nested := b + nested.t = optional + return nested.doStruct(sw) +} + +func (b bodyGen) doStruct(sw *generator.SnippetWriter) error { + if len(b.t.Name.Name) == 0 { + return nil + } + if namer.IsPrivateGoName(b.t.Name.Name) { + return nil + } + + var alias *types.Type + var fields []protoField + options := []string{} + allOptions := types.ExtractCommentTags("+", b.t.CommentLines) + for k, v := range allOptions { + switch { + case strings.HasPrefix(k, "protobuf.options."): + key := strings.TrimPrefix(k, "protobuf.options.") + switch key { + case "marshal": + if v[0] == "false" { + if !b.omitGogo { + options = append(options, + "(gogoproto.marshaler) = false", + "(gogoproto.unmarshaler) = false", + "(gogoproto.sizer) = false", + ) + } + } + default: + if !b.omitGogo || !strings.HasPrefix(key, "(gogoproto.") { + if key == "(gogoproto.goproto_stringer)" && v[0] == "false" { + options = append(options, "(gogoproto.stringer) = false") + } + options = append(options, fmt.Sprintf("%s = %s", key, v[0])) + } + } + // protobuf.as allows a type to have the same message contents as another Go type + case k == "protobuf.as": + fields = nil + if alias = b.locator.GoTypeForName(types.Name{Name: v[0]}); alias == nil { + return fmt.Errorf("type %v references alias %q which does not exist", b.t, v[0]) + } + // protobuf.embed instructs the generator to use the named type in this package + // as an embedded message. + case k == "protobuf.embed": + fields = []protoField{ + { + Tag: 1, + Name: v[0], + Type: &types.Type{ + Name: types.Name{ + Name: v[0], + Package: b.localPackage.Package, + Path: b.localPackage.Path, + }, + }, + }, + } + } + } + if alias == nil { + alias = b.t + } + + // If we don't explicitly embed anything, generate fields by traversing fields. + if fields == nil { + memberFields, err := membersToFields(b.locator, alias, b.localPackage, b.omitFieldTypes) + if err != nil { + return fmt.Errorf("type %v cannot be converted to protobuf: %v", b.t, err) + } + fields = memberFields + } + + out := sw.Out() + genComment(out, b.t.CommentLines, "") + sw.Do(`message $.Name.Name$ { +`, b.t) + + if len(options) > 0 { + sort.Sort(sort.StringSlice(options)) + for _, s := range options { + fmt.Fprintf(out, " option %s;\n", s) + } + fmt.Fprintln(out) + } + + for i, field := range fields { + genComment(out, field.CommentLines, " ") + fmt.Fprintf(out, " ") + switch { + case field.Map: + case field.Repeated: + fmt.Fprintf(out, "repeated ") + case field.Required: + fmt.Fprintf(out, "required ") + default: + fmt.Fprintf(out, "optional ") + } + sw.Do(`$.Type|local$ $.Name$ = $.Tag$`, field) + if len(field.Extras) > 0 { + extras := []string{} + for k, v := range field.Extras { + if b.omitGogo && strings.HasPrefix(k, "(gogoproto.") { + continue + } + extras = append(extras, fmt.Sprintf("%s = %s", k, v)) + } + sort.Sort(sort.StringSlice(extras)) + if len(extras) > 0 { + fmt.Fprintf(out, " [") + fmt.Fprint(out, strings.Join(extras, ", ")) + fmt.Fprintf(out, "]") + } + } + fmt.Fprintf(out, ";\n") + if i != len(fields)-1 { + fmt.Fprintf(out, "\n") + } + } + fmt.Fprintf(out, "}\n\n") + return nil +} + +type protoField struct { + LocalPackage types.Name + + Tag int + Name string + Type *types.Type + Map bool + Repeated bool + Optional bool + Required bool + Nullable bool + Extras map[string]string + + CommentLines []string +} + +var ( + errUnrecognizedType = fmt.Errorf("did not recognize the provided type") +) + +func isFundamentalProtoType(t *types.Type) (*types.Type, bool) { + // TODO: when we enable proto3, also include other fundamental types in the google.protobuf package + // switch { + // case t.Kind == types.Struct && t.Name == types.Name{Package: "time", Name: "Time"}: + // return &types.Type{ + // Kind: types.Protobuf, + // Name: types.Name{Path: "google/protobuf/timestamp.proto", Package: "google.protobuf", Name: "Timestamp"}, + // }, true + // } + switch t.Kind { + case types.Slice: + if t.Elem.Name.Name == "byte" && len(t.Elem.Name.Package) == 0 { + return &types.Type{Name: types.Name{Name: "bytes"}, Kind: types.Protobuf}, true + } + case types.Builtin: + switch t.Name.Name { + case "string", "uint32", "int32", "uint64", "int64", "bool": + return &types.Type{Name: types.Name{Name: t.Name.Name}, Kind: types.Protobuf}, true + case "int": + return &types.Type{Name: types.Name{Name: "int64"}, Kind: types.Protobuf}, true + case "uint": + return &types.Type{Name: types.Name{Name: "uint64"}, Kind: types.Protobuf}, true + case "float64", "float": + return &types.Type{Name: types.Name{Name: "double"}, Kind: types.Protobuf}, true + case "float32": + return &types.Type{Name: types.Name{Name: "float"}, Kind: types.Protobuf}, true + case "uintptr": + return &types.Type{Name: types.Name{Name: "uint64"}, Kind: types.Protobuf}, true + } + // TODO: complex? + } + return t, false +} + +func memberTypeToProtobufField(locator ProtobufLocator, field *protoField, t *types.Type) error { + var err error + switch t.Kind { + case types.Protobuf: + field.Type, err = locator.ProtoTypeFor(t) + case types.Builtin: + field.Type, err = locator.ProtoTypeFor(t) + case types.Map: + valueField := &protoField{} + if err := memberTypeToProtobufField(locator, valueField, t.Elem); err != nil { + return err + } + keyField := &protoField{} + if err := memberTypeToProtobufField(locator, keyField, t.Key); err != nil { + return err + } + // All other protobuf types have kind types.Protobuf, so setting types.Map + // here would be very misleading. + field.Type = &types.Type{ + Kind: types.Protobuf, + Key: keyField.Type, + Elem: valueField.Type, + } + if !strings.HasPrefix(t.Name.Name, "map[") { + field.Extras["(gogoproto.casttype)"] = strconv.Quote(locator.CastTypeName(t.Name)) + } + if k, ok := keyField.Extras["(gogoproto.casttype)"]; ok { + field.Extras["(gogoproto.castkey)"] = k + } + if v, ok := valueField.Extras["(gogoproto.casttype)"]; ok { + field.Extras["(gogoproto.castvalue)"] = v + } + field.Map = true + case types.Pointer: + if err := memberTypeToProtobufField(locator, field, t.Elem); err != nil { + return err + } + field.Nullable = true + case types.Alias: + if isOptionalAlias(t) { + field.Type, err = locator.ProtoTypeFor(t) + field.Nullable = true + } else { + if err := memberTypeToProtobufField(locator, field, t.Underlying); err != nil { + log.Printf("failed to alias: %s %s: err %v", t.Name, t.Underlying.Name, err) + return err + } + // If this is not an alias to a slice, cast to the alias + if !field.Repeated { + if field.Extras == nil { + field.Extras = make(map[string]string) + } + field.Extras["(gogoproto.casttype)"] = strconv.Quote(locator.CastTypeName(t.Name)) + } + } + case types.Slice: + if t.Elem.Name.Name == "byte" && len(t.Elem.Name.Package) == 0 { + field.Type = &types.Type{Name: types.Name{Name: "bytes"}, Kind: types.Protobuf} + return nil + } + if err := memberTypeToProtobufField(locator, field, t.Elem); err != nil { + return err + } + field.Repeated = true + case types.Struct: + if len(t.Name.Name) == 0 { + return errUnrecognizedType + } + field.Type, err = locator.ProtoTypeFor(t) + field.Nullable = false + default: + return errUnrecognizedType + } + return err +} + +// protobufTagToField extracts information from an existing protobuf tag +func protobufTagToField(tag string, field *protoField, m types.Member, t *types.Type, localPackage types.Name) error { + if len(tag) == 0 || tag == "-" { + return nil + } + + // protobuf:"bytes,3,opt,name=Id,customtype=github.com/gogo/protobuf/test.Uuid" + parts := strings.Split(tag, ",") + if len(parts) < 3 { + return fmt.Errorf("member %q of %q malformed 'protobuf' tag, not enough segments\n", m.Name, t.Name) + } + protoTag, err := strconv.Atoi(parts[1]) + if err != nil { + return fmt.Errorf("member %q of %q malformed 'protobuf' tag, field ID is %q which is not an integer: %v\n", m.Name, t.Name, parts[1], err) + } + field.Tag = protoTag + + // In general there is doesn't make sense to parse the protobuf tags to get the type, + // as all auto-generated once will have wire type "bytes", "varint" or "fixed64". + // However, sometimes we explicitly set them to have a custom serialization, e.g.: + // type Time struct { + // time.Time `protobuf:"Timestamp,1,req,name=time"` + // } + // to force the generator to use a given type (that we manually wrote serialization & + // deserialization methods for). + switch parts[0] { + case "varint", "fixed32", "fixed64", "bytes", "group": + default: + name := types.Name{} + if last := strings.LastIndex(parts[0], "."); last != -1 { + prefix := parts[0][:last] + name = types.Name{ + Name: parts[0][last+1:], + Package: prefix, + Path: strings.Replace(prefix, ".", "/", -1), + } + } else { + name = types.Name{ + Name: parts[0], + Package: localPackage.Package, + Path: localPackage.Path, + } + } + field.Type = &types.Type{ + Name: name, + Kind: types.Protobuf, + } + } + + protoExtra := make(map[string]string) + for i, extra := range parts[3:] { + parts := strings.SplitN(extra, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("member %q of %q malformed 'protobuf' tag, tag %d should be key=value, got %q\n", m.Name, t.Name, i+4, extra) + } + switch parts[0] { + case "name": + protoExtra[parts[0]] = parts[1] + case "casttype", "castkey", "castvalue": + parts[0] = fmt.Sprintf("(gogoproto.%s)", parts[0]) + protoExtra[parts[0]] = strconv.Quote(parts[1]) + } + } + + field.Extras = protoExtra + if name, ok := protoExtra["name"]; ok { + field.Name = name + delete(protoExtra, "name") + } + + return nil +} + +func membersToFields(locator ProtobufLocator, t *types.Type, localPackage types.Name, omitFieldTypes map[types.Name]struct{}) ([]protoField, error) { + fields := []protoField{} + + for _, m := range t.Members { + if namer.IsPrivateGoName(m.Name) { + // skip private fields + continue + } + if _, ok := omitFieldTypes[types.Name{Name: m.Type.Name.Name, Package: m.Type.Name.Package}]; ok { + continue + } + tags := reflect.StructTag(m.Tags) + field := protoField{ + LocalPackage: localPackage, + + Tag: -1, + Extras: make(map[string]string), + } + + protobufTag := tags.Get("protobuf") + if protobufTag == "-" { + continue + } + + if err := protobufTagToField(protobufTag, &field, m, t, localPackage); err != nil { + return nil, err + } + + // extract information from JSON field tag + if tag := tags.Get("json"); len(tag) > 0 { + parts := strings.Split(tag, ",") + if len(field.Name) == 0 && len(parts[0]) != 0 { + field.Name = parts[0] + } + if field.Tag == -1 && field.Name == "-" { + continue + } + } + + if field.Type == nil { + if err := memberTypeToProtobufField(locator, &field, m.Type); err != nil { + return nil, fmt.Errorf("unable to embed type %q as field %q in %q: %v", m.Type, field.Name, t.Name, err) + } + } + if len(field.Name) == 0 { + field.Name = namer.IL(m.Name) + } + + if field.Map && field.Repeated { + // maps cannot be repeated + field.Repeated = false + field.Nullable = true + } + + if !field.Nullable { + field.Extras["(gogoproto.nullable)"] = "false" + } + if (field.Type.Name.Name == "bytes" && field.Type.Name.Package == "") || (field.Repeated && field.Type.Name.Package == "" && namer.IsPrivateGoName(field.Type.Name.Name)) { + delete(field.Extras, "(gogoproto.nullable)") + } + if field.Name != m.Name { + field.Extras["(gogoproto.customname)"] = strconv.Quote(m.Name) + } + field.CommentLines = m.CommentLines + fields = append(fields, field) + } + + // assign tags + highest := 0 + byTag := make(map[int]*protoField) + // fields are in Go struct order, which we preserve + for i := range fields { + field := &fields[i] + tag := field.Tag + if tag != -1 { + if existing, ok := byTag[tag]; ok { + return nil, fmt.Errorf("field %q and %q both have tag %d", field.Name, existing.Name, tag) + } + byTag[tag] = field + } + if tag > highest { + highest = tag + } + } + // starting from the highest observed tag, assign new field tags + for i := range fields { + field := &fields[i] + if field.Tag != -1 { + continue + } + highest++ + field.Tag = highest + byTag[field.Tag] = field + } + return fields, nil +} + +func genComment(out io.Writer, lines []string, indent string) { + for { + l := len(lines) + if l == 0 || len(lines[l-1]) != 0 { + break + } + lines = lines[:l-1] + } + for _, c := range lines { + if len(c) == 0 { + fmt.Fprintf(out, "%s//\n", indent) // avoid trailing whitespace + continue + } + fmt.Fprintf(out, "%s// %s\n", indent, c) + } +} + +func formatProtoFile(source []byte) ([]byte, error) { + // TODO; Is there any protobuf formatter? + return source, nil +} + +func assembleProtoFile(w io.Writer, f *generator.File) { + w.Write(f.Header) + + fmt.Fprint(w, "syntax = 'proto2';\n\n") + + if len(f.PackageName) > 0 { + fmt.Fprintf(w, "package %s;\n\n", f.PackageName) + } + + if len(f.Imports) > 0 { + imports := []string{} + for i := range f.Imports { + imports = append(imports, i) + } + sort.Strings(imports) + for _, s := range imports { + fmt.Fprintf(w, "import %q;\n", s) + } + fmt.Fprint(w, "\n") + } + + if f.Vars.Len() > 0 { + fmt.Fprintf(w, "%s\n", f.Vars.String()) + } + + w.Write(f.Body.Bytes()) +} + +func NewProtoFile() *generator.DefaultFileType { + return &generator.DefaultFileType{ + Format: formatProtoFile, + Assemble: assembleProtoFile, + } +} diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/import_tracker.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/import_tracker.go new file mode 100644 index 0000000000..08a991b155 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/import_tracker.go @@ -0,0 +1,50 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protobuf + +import ( + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +type ImportTracker struct { + namer.DefaultImportTracker +} + +func NewImportTracker(local types.Name, typesToAdd ...*types.Type) *ImportTracker { + tracker := namer.NewDefaultImportTracker(local) + tracker.IsInvalidType = func(t *types.Type) bool { return t.Kind != types.Protobuf } + tracker.LocalName = func(name types.Name) string { return name.Package } + tracker.PrintImport = func(path, name string) string { return path } + + tracker.AddTypes(typesToAdd...) + return &ImportTracker{ + DefaultImportTracker: tracker, + } +} + +// AddNullable ensures that support for the nullable Gogo-protobuf extension is added. +func (tracker *ImportTracker) AddNullable() { + tracker.AddType(&types.Type{ + Kind: types.Protobuf, + Name: types.Name{ + Name: "nullable", + Package: "gogoproto", + Path: "github.com/gogo/protobuf/gogoproto/gogo.proto", + }, + }) +} diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/namer.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/namer.go new file mode 100644 index 0000000000..e3b21c6703 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/namer.go @@ -0,0 +1,208 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protobuf + +import ( + "fmt" + "reflect" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +type localNamer struct { + localPackage types.Name +} + +func (n localNamer) Name(t *types.Type) string { + if t.Key != nil && t.Elem != nil { + return fmt.Sprintf("map<%s, %s>", n.Name(t.Key), n.Name(t.Elem)) + } + if len(n.localPackage.Package) != 0 && n.localPackage.Package == t.Name.Package { + return t.Name.Name + } + return t.Name.String() +} + +type protobufNamer struct { + packages []*protobufPackage + packagesByPath map[string]*protobufPackage +} + +func NewProtobufNamer() *protobufNamer { + return &protobufNamer{ + packagesByPath: make(map[string]*protobufPackage), + } +} + +func (n *protobufNamer) Name(t *types.Type) string { + if t.Kind == types.Map { + return fmt.Sprintf("map<%s, %s>", n.Name(t.Key), n.Name(t.Elem)) + } + return t.Name.String() +} + +func (n *protobufNamer) List() []generator.Package { + packages := make([]generator.Package, 0, len(n.packages)) + for i := range n.packages { + packages = append(packages, n.packages[i]) + } + return packages +} + +func (n *protobufNamer) Add(p *protobufPackage) { + if _, ok := n.packagesByPath[p.PackagePath]; !ok { + n.packagesByPath[p.PackagePath] = p + n.packages = append(n.packages, p) + } +} + +func (n *protobufNamer) GoNameToProtoName(name types.Name) types.Name { + if p, ok := n.packagesByPath[name.Package]; ok { + return types.Name{ + Name: name.Name, + Package: p.PackageName, + Path: p.ImportPath(), + } + } + for _, p := range n.packages { + if _, ok := p.FilterTypes[name]; ok { + return types.Name{ + Name: name.Name, + Package: p.PackageName, + Path: p.ImportPath(), + } + } + } + return types.Name{Name: name.Name} +} + +func protoSafePackage(name string) string { + pkg := strings.Replace(name, "/", ".", -1) + return strings.Replace(pkg, "-", "_", -1) +} + +type typeNameSet map[types.Name]*protobufPackage + +// assignGoTypeToProtoPackage looks for Go and Protobuf types that are referenced by a type in +// a package. It will not recurse into protobuf types. +func assignGoTypeToProtoPackage(p *protobufPackage, t *types.Type, local, global typeNameSet, optional map[types.Name]struct{}) { + newT, isProto := isFundamentalProtoType(t) + if isProto { + t = newT + } + if otherP, ok := global[t.Name]; ok { + if _, ok := local[t.Name]; !ok { + p.Imports.AddType(&types.Type{ + Kind: types.Protobuf, + Name: otherP.ProtoTypeName(), + }) + } + return + } + if t.Name.Package == p.PackagePath { + // Associate types only to their own package + global[t.Name] = p + } + if _, ok := local[t.Name]; ok { + return + } + // don't recurse into existing proto types + if isProto { + p.Imports.AddType(t) + return + } + + local[t.Name] = p + for _, m := range t.Members { + if namer.IsPrivateGoName(m.Name) { + continue + } + field := &protoField{} + tag := reflect.StructTag(m.Tags).Get("protobuf") + if tag == "-" { + continue + } + if err := protobufTagToField(tag, field, m, t, p.ProtoTypeName()); err == nil && field.Type != nil { + assignGoTypeToProtoPackage(p, field.Type, local, global, optional) + continue + } + assignGoTypeToProtoPackage(p, m.Type, local, global, optional) + } + // TODO: should methods be walked? + if t.Elem != nil { + assignGoTypeToProtoPackage(p, t.Elem, local, global, optional) + } + if t.Key != nil { + assignGoTypeToProtoPackage(p, t.Key, local, global, optional) + } + if t.Underlying != nil { + if t.Kind == types.Alias && isOptionalAlias(t) { + optional[t.Name] = struct{}{} + } + assignGoTypeToProtoPackage(p, t.Underlying, local, global, optional) + } +} + +// isTypeApplicableToProtobuf checks to see if a type is relevant for protobuf processing. +// Currently, it filters out functions and private types. +func isTypeApplicableToProtobuf(t *types.Type) bool { + // skip functions -- we don't care about them for protobuf + if t.Kind == types.Func || (t.Kind == types.DeclarationOf && t.Underlying.Kind == types.Func) { + return false + } + // skip private types + if namer.IsPrivateGoName(t.Name.Name) { + return false + } + + return true +} + +func (n *protobufNamer) AssignTypesToPackages(c *generator.Context) error { + global := make(typeNameSet) + for _, p := range n.packages { + local := make(typeNameSet) + optional := make(map[types.Name]struct{}) + p.Imports = NewImportTracker(p.ProtoTypeName()) + for _, t := range c.Order { + if t.Name.Package != p.PackagePath { + continue + } + if !isTypeApplicableToProtobuf(t) { + // skip types that we don't care about, like functions + continue + } + assignGoTypeToProtoPackage(p, t, local, global, optional) + } + p.FilterTypes = make(map[types.Name]struct{}) + p.LocalNames = make(map[string]struct{}) + p.OptionalTypeNames = make(map[string]struct{}) + for k, v := range local { + if v == p { + p.FilterTypes[k] = struct{}{} + p.LocalNames[k.Name] = struct{}{} + if _, ok := optional[k]; ok { + p.OptionalTypeNames[k.Name] = struct{}{} + } + } + } + } + return nil +} diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/package.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/package.go new file mode 100644 index 0000000000..bed4c3e306 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/package.go @@ -0,0 +1,215 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protobuf + +import ( + "fmt" + "go/ast" + "log" + "os" + "path/filepath" + "reflect" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/types" +) + +func newProtobufPackage(packagePath, packageName string, generateAll bool, omitFieldTypes map[types.Name]struct{}) *protobufPackage { + pkg := &protobufPackage{ + DefaultPackage: generator.DefaultPackage{ + // The protobuf package name (foo.bar.baz) + PackageName: packageName, + // A path segment relative to the GOPATH root (foo/bar/baz) + PackagePath: packagePath, + HeaderText: []byte( + ` +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +`), + PackageDocumentation: []byte(fmt.Sprintf( + `// Package %s is an autogenerated protobuf IDL. +`, packageName)), + }, + GenerateAll: generateAll, + OmitFieldTypes: omitFieldTypes, + } + pkg.FilterFunc = pkg.filterFunc + pkg.GeneratorFunc = pkg.generatorFunc + return pkg +} + +// protobufPackage contains the protobuf implementation of Package. +type protobufPackage struct { + generator.DefaultPackage + + // If true, this package has been vendored into our source tree and thus can + // only be generated by changing the vendor tree. + Vendored bool + + // If true, generate protobuf serializations for all public types. + // If false, only generate protobuf serializations for structs that + // request serialization. + GenerateAll bool + + // A list of types to filter to; if not specified all types will be included. + FilterTypes map[types.Name]struct{} + + // If true, omit any gogoprotobuf extensions not defined as types. + OmitGogo bool + + // A list of field types that will be excluded from the output struct + OmitFieldTypes map[types.Name]struct{} + + // A list of names that this package exports + LocalNames map[string]struct{} + + // A list of type names in this package that will need marshaller rewriting + // to remove synthetic protobuf fields. + OptionalTypeNames map[string]struct{} + + // A list of struct tags to generate onto named struct fields + StructTags map[string]map[string]string + + // An import tracker for this package + Imports *ImportTracker +} + +func (p *protobufPackage) Clean(outputBase string) error { + for _, s := range []string{p.ImportPath(), p.OutputPath()} { + if err := os.Remove(filepath.Join(outputBase, s)); err != nil && !os.IsNotExist(err) { + return err + } + } + return nil +} + +func (p *protobufPackage) ProtoTypeName() types.Name { + return types.Name{ + Name: p.Path(), // the go path "foo/bar/baz" + Package: p.Name(), // the protobuf package "foo.bar.baz" + Path: p.ImportPath(), // the path of the import to get the proto + } +} + +func (p *protobufPackage) filterFunc(c *generator.Context, t *types.Type) bool { + switch t.Kind { + case types.Func, types.Chan: + return false + case types.Struct: + if t.Name.Name == "struct{}" { + return false + } + case types.Builtin: + return false + case types.Alias: + if !isOptionalAlias(t) { + return false + } + case types.Slice, types.Array, types.Map: + return false + case types.Pointer: + return false + } + if _, ok := isFundamentalProtoType(t); ok { + return false + } + _, ok := p.FilterTypes[t.Name] + return ok +} + +func (p *protobufPackage) HasGoType(name string) bool { + _, ok := p.LocalNames[name] + return ok +} + +func (p *protobufPackage) OptionalTypeName(name string) bool { + _, ok := p.OptionalTypeNames[name] + return ok +} + +func (p *protobufPackage) ExtractGeneratedType(t *ast.TypeSpec) bool { + if !p.HasGoType(t.Name.Name) { + return false + } + + switch s := t.Type.(type) { + case *ast.StructType: + for i, f := range s.Fields.List { + if len(f.Tag.Value) == 0 { + continue + } + tag := strings.Trim(f.Tag.Value, "`") + protobufTag := reflect.StructTag(tag).Get("protobuf") + if len(protobufTag) == 0 { + continue + } + if len(f.Names) > 1 { + log.Printf("WARNING: struct %s field %d %s: defined multiple names but single protobuf tag", t.Name.Name, i, f.Names[0].Name) + // TODO hard error? + } + if p.StructTags == nil { + p.StructTags = make(map[string]map[string]string) + } + m := p.StructTags[t.Name.Name] + if m == nil { + m = make(map[string]string) + p.StructTags[t.Name.Name] = m + } + m[f.Names[0].Name] = tag + } + default: + log.Printf("WARNING: unexpected Go AST type definition: %#v", t) + } + + return true +} + +func (p *protobufPackage) generatorFunc(c *generator.Context) []generator.Generator { + generators := []generator.Generator{} + + p.Imports.AddNullable() + + generators = append(generators, &genProtoIDL{ + DefaultGen: generator.DefaultGen{ + OptionalName: "generated", + }, + localPackage: types.Name{Package: p.PackageName, Path: p.PackagePath}, + localGoPackage: types.Name{Package: p.PackagePath, Name: p.GoPackageName()}, + imports: p.Imports, + generateAll: p.GenerateAll, + omitGogo: p.OmitGogo, + omitFieldTypes: p.OmitFieldTypes, + }) + return generators +} + +func (p *protobufPackage) GoPackageName() string { + return filepath.Base(p.PackagePath) +} + +func (p *protobufPackage) ImportPath() string { + return filepath.Join(p.PackagePath, "generated.proto") +} + +func (p *protobufPackage) OutputPath() string { + return filepath.Join(p.PackagePath, "generated.pb.go") +} + +var ( + _ = generator.Package(&protobufPackage{}) +) diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go new file mode 100644 index 0000000000..3115bc688d --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go @@ -0,0 +1,452 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protobuf + +import ( + "bytes" + "errors" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/printer" + "go/token" + "io/ioutil" + "os" + "reflect" + "strings" + + customreflect "k8s.io/code-generator/third_party/forked/golang/reflect" +) + +func rewriteFile(name string, header []byte, rewriteFn func(*token.FileSet, *ast.File) error) error { + fset := token.NewFileSet() + src, err := ioutil.ReadFile(name) + if err != nil { + return err + } + file, err := parser.ParseFile(fset, name, src, parser.DeclarationErrors|parser.ParseComments) + if err != nil { + return err + } + + if err := rewriteFn(fset, file); err != nil { + return err + } + + b := &bytes.Buffer{} + b.Write(header) + if err := printer.Fprint(b, fset, file); err != nil { + return err + } + + body, err := format.Source(b.Bytes()) + if err != nil { + return err + } + + f, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return err + } + defer f.Close() + if _, err := f.Write(body); err != nil { + return err + } + return f.Close() +} + +// ExtractFunc extracts information from the provided TypeSpec and returns true if the type should be +// removed from the destination file. +type ExtractFunc func(*ast.TypeSpec) bool + +// OptionalFunc returns true if the provided local name is a type that has protobuf.nullable=true +// and should have its marshal functions adjusted to remove the 'Items' accessor. +type OptionalFunc func(name string) bool + +func RewriteGeneratedGogoProtobufFile(name string, extractFn ExtractFunc, optionalFn OptionalFunc, header []byte) error { + return rewriteFile(name, header, func(fset *token.FileSet, file *ast.File) error { + cmap := ast.NewCommentMap(fset, file, file.Comments) + + // transform methods that point to optional maps or slices + for _, d := range file.Decls { + rewriteOptionalMethods(d, optionalFn) + } + + // remove types that are already declared + decls := []ast.Decl{} + for _, d := range file.Decls { + if dropExistingTypeDeclarations(d, extractFn) { + continue + } + if dropEmptyImportDeclarations(d) { + continue + } + decls = append(decls, d) + } + file.Decls = decls + + // remove unmapped comments + file.Comments = cmap.Filter(file).Comments() + return nil + }) +} + +// rewriteOptionalMethods makes specific mutations to marshaller methods that belong to types identified +// as being "optional" (they may be nil on the wire). This allows protobuf to serialize a map or slice and +// properly discriminate between empty and nil (which is not possible in protobuf). +// TODO: move into upstream gogo-protobuf once https://github.com/gogo/protobuf/issues/181 +// has agreement +func rewriteOptionalMethods(decl ast.Decl, isOptional OptionalFunc) { + switch t := decl.(type) { + case *ast.FuncDecl: + ident, ptr, ok := receiver(t) + if !ok { + return + } + + // correct initialization of the form `m.Field = &OptionalType{}` to + // `m.Field = OptionalType{}` + if t.Name.Name == "Unmarshal" { + ast.Walk(optionalAssignmentVisitor{fn: isOptional}, t.Body) + } + + if !isOptional(ident.Name) { + return + } + + switch t.Name.Name { + case "Unmarshal": + ast.Walk(&optionalItemsVisitor{}, t.Body) + case "MarshalTo", "Size", "String", "MarshalToSizedBuffer": + ast.Walk(&optionalItemsVisitor{}, t.Body) + fallthrough + case "Marshal": + // if the method has a pointer receiver, set it back to a normal receiver + if ptr { + t.Recv.List[0].Type = ident + } + } + } +} + +type optionalAssignmentVisitor struct { + fn OptionalFunc +} + +// Visit walks the provided node, transforming field initializations of the form +// m.Field = &OptionalType{} -> m.Field = OptionalType{} +func (v optionalAssignmentVisitor) Visit(n ast.Node) ast.Visitor { + switch t := n.(type) { + case *ast.AssignStmt: + if len(t.Lhs) == 1 && len(t.Rhs) == 1 { + if !isFieldSelector(t.Lhs[0], "m", "") { + return nil + } + unary, ok := t.Rhs[0].(*ast.UnaryExpr) + if !ok || unary.Op != token.AND { + return nil + } + composite, ok := unary.X.(*ast.CompositeLit) + if !ok || composite.Type == nil || len(composite.Elts) != 0 { + return nil + } + if ident, ok := composite.Type.(*ast.Ident); ok && v.fn(ident.Name) { + t.Rhs[0] = composite + } + } + return nil + } + return v +} + +type optionalItemsVisitor struct{} + +// Visit walks the provided node, looking for specific patterns to transform that match +// the effective outcome of turning struct{ map[x]y || []x } into map[x]y or []x. +func (v *optionalItemsVisitor) Visit(n ast.Node) ast.Visitor { + switch t := n.(type) { + case *ast.RangeStmt: + if isFieldSelector(t.X, "m", "Items") { + t.X = &ast.Ident{Name: "m"} + } + case *ast.AssignStmt: + if len(t.Lhs) == 1 && len(t.Rhs) == 1 { + switch lhs := t.Lhs[0].(type) { + case *ast.IndexExpr: + if isFieldSelector(lhs.X, "m", "Items") { + lhs.X = &ast.StarExpr{X: &ast.Ident{Name: "m"}} + } + default: + if isFieldSelector(t.Lhs[0], "m", "Items") { + t.Lhs[0] = &ast.StarExpr{X: &ast.Ident{Name: "m"}} + } + } + switch rhs := t.Rhs[0].(type) { + case *ast.CallExpr: + if ident, ok := rhs.Fun.(*ast.Ident); ok && ident.Name == "append" { + ast.Walk(v, rhs) + if len(rhs.Args) > 0 { + switch arg := rhs.Args[0].(type) { + case *ast.Ident: + if arg.Name == "m" { + rhs.Args[0] = &ast.StarExpr{X: &ast.Ident{Name: "m"}} + } + } + } + return nil + } + } + } + case *ast.IfStmt: + switch cond := t.Cond.(type) { + case *ast.BinaryExpr: + if cond.Op == token.EQL { + if isFieldSelector(cond.X, "m", "Items") && isIdent(cond.Y, "nil") { + cond.X = &ast.StarExpr{X: &ast.Ident{Name: "m"}} + } + } + } + if t.Init != nil { + // Find form: + // if err := m[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + // return err + // } + switch s := t.Init.(type) { + case *ast.AssignStmt: + if call, ok := s.Rhs[0].(*ast.CallExpr); ok { + if sel, ok := call.Fun.(*ast.SelectorExpr); ok { + if x, ok := sel.X.(*ast.IndexExpr); ok { + // m[] -> (*m)[] + if sel2, ok := x.X.(*ast.SelectorExpr); ok { + if ident, ok := sel2.X.(*ast.Ident); ok && ident.Name == "m" { + x.X = &ast.StarExpr{X: &ast.Ident{Name: "m"}} + } + } + // len(m.Items) -> len(*m) + if bin, ok := x.Index.(*ast.BinaryExpr); ok { + if call2, ok := bin.X.(*ast.CallExpr); ok && len(call2.Args) == 1 { + if isFieldSelector(call2.Args[0], "m", "Items") { + call2.Args[0] = &ast.StarExpr{X: &ast.Ident{Name: "m"}} + } + } + } + } + } + } + } + } + case *ast.IndexExpr: + if isFieldSelector(t.X, "m", "Items") { + t.X = &ast.Ident{Name: "m"} + return nil + } + case *ast.CallExpr: + changed := false + for i := range t.Args { + if isFieldSelector(t.Args[i], "m", "Items") { + t.Args[i] = &ast.Ident{Name: "m"} + changed = true + } + } + if changed { + return nil + } + } + return v +} + +func isFieldSelector(n ast.Expr, name, field string) bool { + s, ok := n.(*ast.SelectorExpr) + if !ok || s.Sel == nil || (field != "" && s.Sel.Name != field) { + return false + } + return isIdent(s.X, name) +} + +func isIdent(n ast.Expr, value string) bool { + ident, ok := n.(*ast.Ident) + return ok && ident.Name == value +} + +func receiver(f *ast.FuncDecl) (ident *ast.Ident, pointer bool, ok bool) { + if f.Recv == nil || len(f.Recv.List) != 1 { + return nil, false, false + } + switch t := f.Recv.List[0].Type.(type) { + case *ast.StarExpr: + identity, ok := t.X.(*ast.Ident) + if !ok { + return nil, false, false + } + return identity, true, true + case *ast.Ident: + return t, false, true + } + return nil, false, false +} + +// dropExistingTypeDeclarations removes any type declaration for which extractFn returns true. The function +// returns true if the entire declaration should be dropped. +func dropExistingTypeDeclarations(decl ast.Decl, extractFn ExtractFunc) bool { + switch t := decl.(type) { + case *ast.GenDecl: + if t.Tok != token.TYPE { + return false + } + specs := []ast.Spec{} + for _, s := range t.Specs { + switch spec := s.(type) { + case *ast.TypeSpec: + if extractFn(spec) { + continue + } + specs = append(specs, spec) + } + } + if len(specs) == 0 { + return true + } + t.Specs = specs + } + return false +} + +// dropEmptyImportDeclarations strips any generated but no-op imports from the generated code +// to prevent generation from being able to define side-effects. The function returns true +// if the entire declaration should be dropped. +func dropEmptyImportDeclarations(decl ast.Decl) bool { + switch t := decl.(type) { + case *ast.GenDecl: + if t.Tok != token.IMPORT { + return false + } + specs := []ast.Spec{} + for _, s := range t.Specs { + switch spec := s.(type) { + case *ast.ImportSpec: + if spec.Name != nil && spec.Name.Name == "_" { + continue + } + specs = append(specs, spec) + } + } + if len(specs) == 0 { + return true + } + t.Specs = specs + } + return false +} + +func RewriteTypesWithProtobufStructTags(name string, structTags map[string]map[string]string) error { + return rewriteFile(name, []byte{}, func(fset *token.FileSet, file *ast.File) error { + allErrs := []error{} + + // set any new struct tags + for _, d := range file.Decls { + if errs := updateStructTags(d, structTags, []string{"protobuf"}); len(errs) > 0 { + allErrs = append(allErrs, errs...) + } + } + + if len(allErrs) > 0 { + var s string + for _, err := range allErrs { + s += err.Error() + "\n" + } + return errors.New(s) + } + return nil + }) +} + +func updateStructTags(decl ast.Decl, structTags map[string]map[string]string, toCopy []string) []error { + var errs []error + t, ok := decl.(*ast.GenDecl) + if !ok { + return nil + } + if t.Tok != token.TYPE { + return nil + } + + for _, s := range t.Specs { + spec, ok := s.(*ast.TypeSpec) + if !ok { + continue + } + typeName := spec.Name.Name + fieldTags, ok := structTags[typeName] + if !ok { + continue + } + st, ok := spec.Type.(*ast.StructType) + if !ok { + continue + } + + for i := range st.Fields.List { + f := st.Fields.List[i] + var name string + if len(f.Names) == 0 { + switch t := f.Type.(type) { + case *ast.Ident: + name = t.Name + case *ast.SelectorExpr: + name = t.Sel.Name + default: + errs = append(errs, fmt.Errorf("unable to get name for tag from struct %q, field %#v", spec.Name.Name, t)) + continue + } + } else { + name = f.Names[0].Name + } + value, ok := fieldTags[name] + if !ok { + continue + } + var tags customreflect.StructTags + if f.Tag != nil { + oldTags, err := customreflect.ParseStructTags(strings.Trim(f.Tag.Value, "`")) + if err != nil { + errs = append(errs, fmt.Errorf("unable to read struct tag from struct %q, field %q: %v", spec.Name.Name, name, err)) + continue + } + tags = oldTags + } + for _, name := range toCopy { + // don't overwrite existing tags + if tags.Has(name) { + continue + } + // append new tags + if v := reflect.StructTag(value).Get(name); len(v) > 0 { + tags = append(tags, customreflect.StructTag{Name: name, Value: v}) + } + } + if len(tags) == 0 { + continue + } + if f.Tag == nil { + f.Tag = &ast.BasicLit{} + } + f.Tag.Value = tags.String() + } + } + return errs +} diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go new file mode 100644 index 0000000000..8e2a1917d0 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go @@ -0,0 +1,33 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protobuf + +import ( + "k8s.io/gengo/types" + "k8s.io/klog" +) + +// extractBoolTagOrDie gets the comment-tags for the key and asserts that, if +// it exists, the value is boolean. If the tag did not exist, it returns +// false. +func extractBoolTagOrDie(key string, lines []string) bool { + val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines) + if err != nil { + klog.Fatal(err) + } + return val +} diff --git a/vendor/k8s.io/code-generator/cmd/import-boss/.gitignore b/vendor/k8s.io/code-generator/cmd/import-boss/.gitignore new file mode 100644 index 0000000000..a5c47b66f8 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/import-boss/.gitignore @@ -0,0 +1 @@ +import-boss diff --git a/vendor/k8s.io/code-generator/cmd/import-boss/main.go b/vendor/k8s.io/code-generator/cmd/import-boss/main.go new file mode 100644 index 0000000000..0080f01eb0 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/import-boss/main.go @@ -0,0 +1,96 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// import-boss enforces import restrictions in a given repository. +// +// When a directory is verified, import-boss looks for a file called +// ".import-restrictions". If this file is not found, parent directories will be +// recursively searched. +// +// If an ".import-restrictions" file is found, then all imports of the package +// are checked against each "rule" in the file. A rule consists of three parts: +// +// - A SelectorRegexp, to select the import paths that the rule applies to. +// +// - A list of AllowedPrefixes +// +// - A list of ForbiddenPrefixes +// +// An import is allowed if it matches at least one allowed prefix and does not +// match any forbidden prefix. An example file looks like this: +// +// { +// "Rules": [ +// { +// "SelectorRegexp": "k8s[.]io", +// "AllowedPrefixes": [ +// "k8s.io/gengo/examples", +// "k8s.io/kubernetes/third_party" +// ], +// "ForbiddenPrefixes": [ +// "k8s.io/kubernetes/pkg/third_party/deprecated" +// ] +// }, +// { +// "SelectorRegexp": "^unsafe$", +// "AllowedPrefixes": [ +// ], +// "ForbiddenPrefixes": [ +// "" +// ] +// } +// ] +// } +// +// Note the second block explicitly matches the unsafe package, and forbids it +// ("" is a prefix of everything). +package main + +import ( + "os" + "path/filepath" + + "github.com/spf13/pflag" + "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/args" + "k8s.io/gengo/examples/import-boss/generators" + + "k8s.io/klog" +) + +func main() { + klog.InitFlags(nil) + arguments := args.Default() + + // Override defaults. + arguments.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + arguments.InputDirs = []string{ + "k8s.io/kubernetes/pkg/...", + "k8s.io/kubernetes/cmd/...", + "k8s.io/kubernetes/plugin/...", + } + pflag.CommandLine.BoolVar(&arguments.IncludeTestFiles, "include-test-files", false, "If true, include *_test.go files.") + + if err := arguments.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + klog.Errorf("Error: %v", err) + os.Exit(1) + } + klog.V(2).Info("Completed successfully.") +} diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/informer-gen/args/args.go new file mode 100644 index 0000000000..ba7f720917 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/args/args.go @@ -0,0 +1,77 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + "path" + + "github.com/spf13/pflag" + codegenutil "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/args" +) + +// CustomArgs is used by the gengo framework to pass args specific to this generator. +type CustomArgs struct { + VersionedClientSetPackage string + InternalClientSetPackage string + ListersPackage string + SingleDirectory bool +} + +// NewDefaults returns default arguments for the generator. +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{ + SingleDirectory: false, + } + genericArgs.CustomArgs = customArgs + + if pkg := codegenutil.CurrentPackage(); len(pkg) != 0 { + genericArgs.OutputPackagePath = path.Join(pkg, "pkg/client/informers") + customArgs.VersionedClientSetPackage = path.Join(pkg, "pkg/client/clientset/versioned") + customArgs.InternalClientSetPackage = path.Join(pkg, "pkg/client/clientset/internalversion") + customArgs.ListersPackage = path.Join(pkg, "pkg/client/listers") + } + + return genericArgs, customArgs +} + +// AddFlags add the generator flags to the flag set. +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&ca.InternalClientSetPackage, "internal-clientset-package", ca.InternalClientSetPackage, "the full package name for the internal clientset to use") + fs.StringVar(&ca.VersionedClientSetPackage, "versioned-clientset-package", ca.VersionedClientSetPackage, "the full package name for the versioned clientset to use") + fs.StringVar(&ca.ListersPackage, "listers-package", ca.ListersPackage, "the full package name for the listers to use") + fs.BoolVar(&ca.SingleDirectory, "single-directory", ca.SingleDirectory, "if true, omit the intermediate \"internalversion\" and \"externalversions\" subdirectories") +} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + customArgs := genericArgs.CustomArgs.(*CustomArgs) + + if len(genericArgs.OutputPackagePath) == 0 { + return fmt.Errorf("output package cannot be empty") + } + if len(customArgs.VersionedClientSetPackage) == 0 { + return fmt.Errorf("versioned clientset package cannot be empty") + } + if len(customArgs.ListersPackage) == 0 { + return fmt.Errorf("listers package cannot be empty") + } + + return nil +} diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go new file mode 100644 index 0000000000..6e5793109b --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go @@ -0,0 +1,258 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + "path" + + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/klog" +) + +// factoryGenerator produces a file of listers for a given GroupVersion and +// type. +type factoryGenerator struct { + generator.DefaultGen + outputPackage string + imports namer.ImportTracker + groupVersions map[string]clientgentypes.GroupVersions + gvGoNames map[string]string + clientSetPackage string + internalInterfacesPackage string + filtered bool +} + +var _ generator.Generator = &factoryGenerator{} + +func (g *factoryGenerator) Filter(c *generator.Context, t *types.Type) bool { + if !g.filtered { + g.filtered = true + return true + } + return false +} + +func (g *factoryGenerator) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *factoryGenerator) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + return +} + +func (g *factoryGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "{{", "}}") + + klog.V(5).Infof("processing type %v", t) + + gvInterfaces := make(map[string]*types.Type) + gvNewFuncs := make(map[string]*types.Type) + for groupPkgName := range g.groupVersions { + gvInterfaces[groupPkgName] = c.Universe.Type(types.Name{Package: path.Join(g.outputPackage, groupPkgName), Name: "Interface"}) + gvNewFuncs[groupPkgName] = c.Universe.Function(types.Name{Package: path.Join(g.outputPackage, groupPkgName), Name: "New"}) + } + m := map[string]interface{}{ + "cacheSharedIndexInformer": c.Universe.Type(cacheSharedIndexInformer), + "groupVersions": g.groupVersions, + "gvInterfaces": gvInterfaces, + "gvNewFuncs": gvNewFuncs, + "gvGoNames": g.gvGoNames, + "interfacesNewInformerFunc": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "NewInformerFunc"}), + "interfacesTweakListOptionsFunc": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "TweakListOptionsFunc"}), + "informerFactoryInterface": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "SharedInformerFactory"}), + "clientSetInterface": c.Universe.Type(types.Name{Package: g.clientSetPackage, Name: "Interface"}), + "reflectType": c.Universe.Type(reflectType), + "runtimeObject": c.Universe.Type(runtimeObject), + "schemaGroupVersionResource": c.Universe.Type(schemaGroupVersionResource), + "syncMutex": c.Universe.Type(syncMutex), + "timeDuration": c.Universe.Type(timeDuration), + "namespaceAll": c.Universe.Type(metav1NamespaceAll), + "object": c.Universe.Type(metav1Object), + } + + sw.Do(sharedInformerFactoryStruct, m) + sw.Do(sharedInformerFactoryInterface, m) + + return sw.Error() +} + +var sharedInformerFactoryStruct = ` +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client {{.clientSetInterface|raw}} + namespace string + tweakListOptions {{.interfacesTweakListOptionsFunc|raw}} + lock {{.syncMutex|raw}} + defaultResync {{.timeDuration|raw}} + customResync map[{{.reflectType|raw}}]{{.timeDuration|raw}} + + informers map[{{.reflectType|raw}}]{{.cacheSharedIndexInformer|raw}} + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[{{.reflectType|raw}}]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[{{.object|raw}}]{{.timeDuration|raw}}) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client {{.clientSetInterface|raw}}, defaultResync {{.timeDuration|raw}}) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client {{.clientSetInterface|raw}}, defaultResync {{.timeDuration|raw}}, namespace string, tweakListOptions {{.interfacesTweakListOptionsFunc|raw}}) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client {{.clientSetInterface|raw}}, defaultResync {{.timeDuration|raw}}, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[{{.reflectType|raw}}]{{.cacheSharedIndexInformer|raw}}), + startedInformers: make(map[{{.reflectType|raw}}]bool), + customResync: make(map[{{.reflectType|raw}}]{{.timeDuration|raw}}), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func()map[reflect.Type]cache.SharedIndexInformer{ + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj {{.runtimeObject|raw}}, newFunc {{.interfacesNewInformerFunc|raw}}) {{.cacheSharedIndexInformer|raw}} { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +` + +var sharedInformerFactoryInterface = ` +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + {{.informerFactoryInterface|raw}} + ForResource(resource {{.schemaGroupVersionResource|raw}}) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + {{$gvInterfaces := .gvInterfaces}} + {{$gvGoNames := .gvGoNames}} + {{range $groupName, $group := .groupVersions}}{{index $gvGoNames $groupName}}() {{index $gvInterfaces $groupName|raw}} + {{end}} +} + +{{$gvNewFuncs := .gvNewFuncs}} +{{$gvGoNames := .gvGoNames}} +{{range $groupPkgName, $group := .groupVersions}} +func (f *sharedInformerFactory) {{index $gvGoNames $groupPkgName}}() {{index $gvInterfaces $groupPkgName|raw}} { + return {{index $gvNewFuncs $groupPkgName|raw}}(f, f.namespace, f.tweakListOptions) +} +{{end}} +` diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go new file mode 100644 index 0000000000..fc0668c5be --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go @@ -0,0 +1,90 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/klog" +) + +// factoryInterfaceGenerator produces a file of interfaces used to break a dependency cycle for +// informer registration +type factoryInterfaceGenerator struct { + generator.DefaultGen + outputPackage string + imports namer.ImportTracker + clientSetPackage string + filtered bool +} + +var _ generator.Generator = &factoryInterfaceGenerator{} + +func (g *factoryInterfaceGenerator) Filter(c *generator.Context, t *types.Type) bool { + if !g.filtered { + g.filtered = true + return true + } + return false +} + +func (g *factoryInterfaceGenerator) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *factoryInterfaceGenerator) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + return +} + +func (g *factoryInterfaceGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "{{", "}}") + + klog.V(5).Infof("processing type %v", t) + + m := map[string]interface{}{ + "cacheSharedIndexInformer": c.Universe.Type(cacheSharedIndexInformer), + "clientSetPackage": c.Universe.Type(types.Name{Package: g.clientSetPackage, Name: "Interface"}), + "runtimeObject": c.Universe.Type(runtimeObject), + "timeDuration": c.Universe.Type(timeDuration), + "v1ListOptions": c.Universe.Type(v1ListOptions), + } + + sw.Do(externalSharedInformerFactoryInterface, m) + + return sw.Error() +} + +var externalSharedInformerFactoryInterface = ` +// NewInformerFunc takes {{.clientSetPackage|raw}} and {{.timeDuration|raw}} to return a SharedIndexInformer. +type NewInformerFunc func({{.clientSetPackage|raw}}, {{.timeDuration|raw}}) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj {{.runtimeObject|raw}}, newFunc NewInformerFunc) {{.cacheSharedIndexInformer|raw}} +} + +// TweakListOptionsFunc is a function that transforms a {{.v1ListOptions|raw}}. +type TweakListOptionsFunc func(*{{.v1ListOptions|raw}}) +` diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go new file mode 100644 index 0000000000..cad907990f --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go @@ -0,0 +1,184 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + "sort" + "strings" + + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + codegennamer "k8s.io/code-generator/pkg/namer" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +// genericGenerator generates the generic informer. +type genericGenerator struct { + generator.DefaultGen + outputPackage string + imports namer.ImportTracker + groupVersions map[string]clientgentypes.GroupVersions + groupGoNames map[string]string + typesForGroupVersion map[clientgentypes.GroupVersion][]*types.Type + filtered bool +} + +var _ generator.Generator = &genericGenerator{} + +func (g *genericGenerator) Filter(c *generator.Context, t *types.Type) bool { + if !g.filtered { + g.filtered = true + return true + } + return false +} + +func (g *genericGenerator) Namers(c *generator.Context) namer.NameSystems { + pluralExceptions := map[string]string{ + "Endpoints": "Endpoints", + } + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + "allLowercasePlural": namer.NewAllLowercasePluralNamer(pluralExceptions), + "publicPlural": namer.NewPublicPluralNamer(pluralExceptions), + "resource": codegennamer.NewTagOverrideNamer("resourceName", namer.NewAllLowercasePluralNamer(pluralExceptions)), + } +} + +func (g *genericGenerator) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + imports = append(imports, "fmt") + return +} + +type group struct { + GroupGoName string + Name string + Versions []*version +} + +type groupSort []group + +func (g groupSort) Len() int { return len(g) } +func (g groupSort) Less(i, j int) bool { return strings.ToLower(g[i].Name) < strings.ToLower(g[j].Name) } +func (g groupSort) Swap(i, j int) { g[i], g[j] = g[j], g[i] } + +type version struct { + Name string + GoName string + Resources []*types.Type +} + +type versionSort []*version + +func (v versionSort) Len() int { return len(v) } +func (v versionSort) Less(i, j int) bool { + return strings.ToLower(v[i].Name) < strings.ToLower(v[j].Name) +} +func (v versionSort) Swap(i, j int) { v[i], v[j] = v[j], v[i] } + +func (g *genericGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "{{", "}}") + + groups := []group{} + schemeGVs := make(map[*version]*types.Type) + + orderer := namer.Orderer{Namer: namer.NewPrivateNamer(0)} + for groupPackageName, groupVersions := range g.groupVersions { + group := group{ + GroupGoName: g.groupGoNames[groupPackageName], + Name: groupVersions.Group.NonEmpty(), + Versions: []*version{}, + } + for _, v := range groupVersions.Versions { + gv := clientgentypes.GroupVersion{Group: groupVersions.Group, Version: v.Version} + version := &version{ + Name: v.Version.NonEmpty(), + GoName: namer.IC(v.Version.NonEmpty()), + Resources: orderer.OrderTypes(g.typesForGroupVersion[gv]), + } + func() { + schemeGVs[version] = c.Universe.Variable(types.Name{Package: g.typesForGroupVersion[gv][0].Name.Package, Name: "SchemeGroupVersion"}) + }() + group.Versions = append(group.Versions, version) + } + sort.Sort(versionSort(group.Versions)) + groups = append(groups, group) + } + sort.Sort(groupSort(groups)) + + m := map[string]interface{}{ + "cacheGenericLister": c.Universe.Type(cacheGenericLister), + "cacheNewGenericLister": c.Universe.Function(cacheNewGenericLister), + "cacheSharedIndexInformer": c.Universe.Type(cacheSharedIndexInformer), + "groups": groups, + "schemeGVs": schemeGVs, + "schemaGroupResource": c.Universe.Type(schemaGroupResource), + "schemaGroupVersionResource": c.Universe.Type(schemaGroupVersionResource), + } + + sw.Do(genericInformer, m) + sw.Do(forResource, m) + + return sw.Error() +} + +var genericInformer = ` +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() {{.cacheSharedIndexInformer|raw}} + Lister() {{.cacheGenericLister|raw}} +} + +type genericInformer struct { + informer {{.cacheSharedIndexInformer|raw}} + resource {{.schemaGroupResource|raw}} +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() {{.cacheSharedIndexInformer|raw}} { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() {{.cacheGenericLister|raw}} { + return {{.cacheNewGenericLister|raw}}(f.Informer().GetIndexer(), f.resource) +} +` + +var forResource = ` +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource {{.schemaGroupVersionResource|raw}}) (GenericInformer, error) { + switch resource { + {{range $group := .groups -}}{{$GroupGoName := .GroupGoName -}} + {{range $version := .Versions -}} + // Group={{$group.Name}}, Version={{.Name}} + {{range .Resources -}} + case {{index $.schemeGVs $version|raw}}.WithResource("{{.|resource}}"): + return &genericInformer{resource: resource.GroupResource(), informer: f.{{$GroupGoName}}().{{$version.GoName}}().{{.|publicPlural}}().Informer()}, nil + {{end}} + {{end}} + {{end -}} + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} +` diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/groupinterface.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/groupinterface.go new file mode 100644 index 0000000000..0bba93c4b2 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/groupinterface.go @@ -0,0 +1,118 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + "path/filepath" + "strings" + + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +// groupInterfaceGenerator generates the per-group interface file. +type groupInterfaceGenerator struct { + generator.DefaultGen + outputPackage string + imports namer.ImportTracker + groupVersions clientgentypes.GroupVersions + filtered bool + internalInterfacesPackage string +} + +var _ generator.Generator = &groupInterfaceGenerator{} + +func (g *groupInterfaceGenerator) Filter(c *generator.Context, t *types.Type) bool { + if !g.filtered { + g.filtered = true + return true + } + return false +} + +func (g *groupInterfaceGenerator) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *groupInterfaceGenerator) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + return +} + +type versionData struct { + Name string + Interface *types.Type + New *types.Type +} + +func (g *groupInterfaceGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + + versions := make([]versionData, 0, len(g.groupVersions.Versions)) + for _, version := range g.groupVersions.Versions { + gv := clientgentypes.GroupVersion{Group: g.groupVersions.Group, Version: version.Version} + versionPackage := filepath.Join(g.outputPackage, strings.ToLower(gv.Version.NonEmpty())) + iface := c.Universe.Type(types.Name{Package: versionPackage, Name: "Interface"}) + versions = append(versions, versionData{ + Name: namer.IC(version.Version.NonEmpty()), + Interface: iface, + New: c.Universe.Function(types.Name{Package: versionPackage, Name: "New"}), + }) + } + m := map[string]interface{}{ + "interfacesTweakListOptionsFunc": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "TweakListOptionsFunc"}), + "interfacesSharedInformerFactory": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "SharedInformerFactory"}), + "versions": versions, + } + + sw.Do(groupTemplate, m) + + return sw.Error() +} + +var groupTemplate = ` +// Interface provides access to each of this group's versions. +type Interface interface { + $range .versions -$ + // $.Name$ provides access to shared informers for resources in $.Name$. + $.Name$() $.Interface|raw$ + $end$ +} + +type group struct { + factory $.interfacesSharedInformerFactory|raw$ + namespace string + tweakListOptions $.interfacesTweakListOptionsFunc|raw$ +} + +// New returns a new Interface. +func New(f $.interfacesSharedInformerFactory|raw$, namespace string, tweakListOptions $.interfacesTweakListOptionsFunc|raw$) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +$range .versions$ +// $.Name$ returns a new $.Interface|raw$. +func (g *group) $.Name$() $.Interface|raw$ { + return $.New|raw$(g.factory, g.namespace, g.tweakListOptions) +} +$end$ +` diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go new file mode 100644 index 0000000000..9204d6215a --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go @@ -0,0 +1,186 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "io" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/code-generator/cmd/client-gen/generators/util" + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + + "k8s.io/klog" +) + +// informerGenerator produces a file of listers for a given GroupVersion and +// type. +type informerGenerator struct { + generator.DefaultGen + outputPackage string + groupPkgName string + groupVersion clientgentypes.GroupVersion + groupGoName string + typeToGenerate *types.Type + imports namer.ImportTracker + clientSetPackage string + listersPackage string + internalInterfacesPackage string +} + +var _ generator.Generator = &informerGenerator{} + +func (g *informerGenerator) Filter(c *generator.Context, t *types.Type) bool { + return t == g.typeToGenerate +} + +func (g *informerGenerator) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *informerGenerator) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + return +} + +func (g *informerGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + + klog.V(5).Infof("processing type %v", t) + + listerPackage := fmt.Sprintf("%s/%s/%s", g.listersPackage, g.groupPkgName, strings.ToLower(g.groupVersion.Version.NonEmpty())) + clientSetInterface := c.Universe.Type(types.Name{Package: g.clientSetPackage, Name: "Interface"}) + informerFor := "InformerFor" + + tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + if err != nil { + return err + } + + m := map[string]interface{}{ + "apiScheme": c.Universe.Type(apiScheme), + "cacheIndexers": c.Universe.Type(cacheIndexers), + "cacheListWatch": c.Universe.Type(cacheListWatch), + "cacheMetaNamespaceIndexFunc": c.Universe.Function(cacheMetaNamespaceIndexFunc), + "cacheNamespaceIndex": c.Universe.Variable(cacheNamespaceIndex), + "cacheNewSharedIndexInformer": c.Universe.Function(cacheNewSharedIndexInformer), + "cacheSharedIndexInformer": c.Universe.Type(cacheSharedIndexInformer), + "clientSetInterface": clientSetInterface, + "group": namer.IC(g.groupGoName), + "informerFor": informerFor, + "interfacesTweakListOptionsFunc": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "TweakListOptionsFunc"}), + "interfacesSharedInformerFactory": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "SharedInformerFactory"}), + "listOptions": c.Universe.Type(listOptions), + "lister": c.Universe.Type(types.Name{Package: listerPackage, Name: t.Name.Name + "Lister"}), + "namespaceAll": c.Universe.Type(metav1NamespaceAll), + "namespaced": !tags.NonNamespaced, + "newLister": c.Universe.Function(types.Name{Package: listerPackage, Name: "New" + t.Name.Name + "Lister"}), + "runtimeObject": c.Universe.Type(runtimeObject), + "timeDuration": c.Universe.Type(timeDuration), + "type": t, + "v1ListOptions": c.Universe.Type(v1ListOptions), + "version": namer.IC(g.groupVersion.Version.String()), + "watchInterface": c.Universe.Type(watchInterface), + } + + sw.Do(typeInformerInterface, m) + sw.Do(typeInformerStruct, m) + sw.Do(typeInformerPublicConstructor, m) + sw.Do(typeFilteredInformerPublicConstructor, m) + sw.Do(typeInformerConstructor, m) + sw.Do(typeInformerInformer, m) + sw.Do(typeInformerLister, m) + + return sw.Error() +} + +var typeInformerInterface = ` +// $.type|public$Informer provides access to a shared informer and lister for +// $.type|publicPlural$. +type $.type|public$Informer interface { + Informer() $.cacheSharedIndexInformer|raw$ + Lister() $.lister|raw$ +} +` + +var typeInformerStruct = ` +type $.type|private$Informer struct { + factory $.interfacesSharedInformerFactory|raw$ + tweakListOptions $.interfacesTweakListOptionsFunc|raw$ + $if .namespaced$namespace string$end$ +} +` + +var typeInformerPublicConstructor = ` +// New$.type|public$Informer constructs a new informer for $.type|public$ type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func New$.type|public$Informer(client $.clientSetInterface|raw$$if .namespaced$, namespace string$end$, resyncPeriod $.timeDuration|raw$, indexers $.cacheIndexers|raw$) $.cacheSharedIndexInformer|raw$ { + return NewFiltered$.type|public$Informer(client$if .namespaced$, namespace$end$, resyncPeriod, indexers, nil) +} +` + +var typeFilteredInformerPublicConstructor = ` +// NewFiltered$.type|public$Informer constructs a new informer for $.type|public$ type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFiltered$.type|public$Informer(client $.clientSetInterface|raw$$if .namespaced$, namespace string$end$, resyncPeriod $.timeDuration|raw$, indexers $.cacheIndexers|raw$, tweakListOptions $.interfacesTweakListOptionsFunc|raw$) $.cacheSharedIndexInformer|raw$ { + return $.cacheNewSharedIndexInformer|raw$( + &$.cacheListWatch|raw${ + ListFunc: func(options $.v1ListOptions|raw$) ($.runtimeObject|raw$, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.$.group$$.version$().$.type|publicPlural$($if .namespaced$namespace$end$).List(options) + }, + WatchFunc: func(options $.v1ListOptions|raw$) ($.watchInterface|raw$, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.$.group$$.version$().$.type|publicPlural$($if .namespaced$namespace$end$).Watch(options) + }, + }, + &$.type|raw${}, + resyncPeriod, + indexers, + ) +} +` + +var typeInformerConstructor = ` +func (f *$.type|private$Informer) defaultInformer(client $.clientSetInterface|raw$, resyncPeriod $.timeDuration|raw$) $.cacheSharedIndexInformer|raw$ { + return NewFiltered$.type|public$Informer(client$if .namespaced$, f.namespace$end$, resyncPeriod, $.cacheIndexers|raw${$.cacheNamespaceIndex|raw$: $.cacheMetaNamespaceIndexFunc|raw$}, f.tweakListOptions) +} +` + +var typeInformerInformer = ` +func (f *$.type|private$Informer) Informer() $.cacheSharedIndexInformer|raw$ { + return f.factory.$.informerFor$(&$.type|raw${}, f.defaultInformer) +} +` + +var typeInformerLister = ` +func (f *$.type|private$Informer) Lister() $.lister|raw$ { + return $.newLister|raw$(f.Informer().GetIndexer()) +} +` diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go new file mode 100644 index 0000000000..e936e29f02 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go @@ -0,0 +1,352 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "path" + "path/filepath" + "strings" + + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + "k8s.io/klog" + + "k8s.io/code-generator/cmd/client-gen/generators/util" + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + informergenargs "k8s.io/code-generator/cmd/informer-gen/args" +) + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + pluralExceptions := map[string]string{ + "Endpoints": "Endpoints", + } + return namer.NameSystems{ + "public": namer.NewPublicNamer(0), + "private": namer.NewPrivateNamer(0), + "raw": namer.NewRawNamer("", nil), + "publicPlural": namer.NewPublicPluralNamer(pluralExceptions), + "allLowercasePlural": namer.NewAllLowercasePluralNamer(pluralExceptions), + "lowercaseSingular": &lowercaseSingularNamer{}, + } +} + +// lowercaseSingularNamer implements Namer +type lowercaseSingularNamer struct{} + +// Name returns t's name in all lowercase. +func (n *lowercaseSingularNamer) Name(t *types.Type) string { + return strings.ToLower(t.Name.Name) +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "public" +} + +// objectMetaForPackage returns the type of ObjectMeta used by package p. +func objectMetaForPackage(p *types.Package) (*types.Type, bool, error) { + generatingForPackage := false + for _, t := range p.Types { + if !util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).GenerateClient { + continue + } + generatingForPackage = true + for _, member := range t.Members { + if member.Name == "ObjectMeta" { + return member.Type, isInternal(member), nil + } + } + } + if generatingForPackage { + return nil, false, fmt.Errorf("unable to find ObjectMeta for any types in package %s", p.Path) + } + return nil, false, nil +} + +// isInternal returns true if the tags for a member do not contain a json tag +func isInternal(m types.Member) bool { + return !strings.Contains(m.Tags, "json") +} + +func packageForInternalInterfaces(base string) string { + return filepath.Join(base, "internalinterfaces") +} + +func vendorless(p string) string { + if pos := strings.LastIndex(p, "/vendor/"); pos != -1 { + return p[pos+len("/vendor/"):] + } + return p +} + +// Packages makes the client package definition. +func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + + customArgs, ok := arguments.CustomArgs.(*informergenargs.CustomArgs) + if !ok { + klog.Fatalf("Wrong CustomArgs type: %T", arguments.CustomArgs) + } + + internalVersionPackagePath := filepath.Join(arguments.OutputPackagePath) + externalVersionPackagePath := filepath.Join(arguments.OutputPackagePath) + if !customArgs.SingleDirectory { + internalVersionPackagePath = filepath.Join(arguments.OutputPackagePath, "internalversion") + externalVersionPackagePath = filepath.Join(arguments.OutputPackagePath, "externalversions") + } + + var packageList generator.Packages + typesForGroupVersion := make(map[clientgentypes.GroupVersion][]*types.Type) + + externalGroupVersions := make(map[string]clientgentypes.GroupVersions) + internalGroupVersions := make(map[string]clientgentypes.GroupVersions) + groupGoNames := make(map[string]string) + for _, inputDir := range arguments.InputDirs { + p := context.Universe.Package(vendorless(inputDir)) + + objectMeta, internal, err := objectMetaForPackage(p) + if err != nil { + klog.Fatal(err) + } + if objectMeta == nil { + // no types in this package had genclient + continue + } + + var gv clientgentypes.GroupVersion + var targetGroupVersions map[string]clientgentypes.GroupVersions + + if internal { + lastSlash := strings.LastIndex(p.Path, "/") + if lastSlash == -1 { + klog.Fatalf("error constructing internal group version for package %q", p.Path) + } + gv.Group = clientgentypes.Group(p.Path[lastSlash+1:]) + targetGroupVersions = internalGroupVersions + } else { + parts := strings.Split(p.Path, "/") + gv.Group = clientgentypes.Group(parts[len(parts)-2]) + gv.Version = clientgentypes.Version(parts[len(parts)-1]) + targetGroupVersions = externalGroupVersions + } + groupPackageName := gv.Group.NonEmpty() + gvPackage := path.Clean(p.Path) + + // If there's a comment of the form "// +groupName=somegroup" or + // "// +groupName=somegroup.foo.bar.io", use the first field (somegroup) as the name of the + // group when generating. + if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { + gv.Group = clientgentypes.Group(override[0]) + } + + // If there's a comment of the form "// +groupGoName=SomeUniqueShortName", use that as + // the Go group identifier in CamelCase. It defaults + groupGoNames[groupPackageName] = namer.IC(strings.Split(gv.Group.NonEmpty(), ".")[0]) + if override := types.ExtractCommentTags("+", p.Comments)["groupGoName"]; override != nil { + groupGoNames[groupPackageName] = namer.IC(override[0]) + } + + var typesToGenerate []*types.Type + for _, t := range p.Types { + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + if !tags.GenerateClient || tags.NoVerbs || !tags.HasVerb("list") || !tags.HasVerb("watch") { + continue + } + + typesToGenerate = append(typesToGenerate, t) + + if _, ok := typesForGroupVersion[gv]; !ok { + typesForGroupVersion[gv] = []*types.Type{} + } + typesForGroupVersion[gv] = append(typesForGroupVersion[gv], t) + } + if len(typesToGenerate) == 0 { + continue + } + + groupVersionsEntry, ok := targetGroupVersions[groupPackageName] + if !ok { + groupVersionsEntry = clientgentypes.GroupVersions{ + PackageName: groupPackageName, + Group: gv.Group, + } + } + groupVersionsEntry.Versions = append(groupVersionsEntry.Versions, clientgentypes.PackageVersion{Version: gv.Version, Package: gvPackage}) + targetGroupVersions[groupPackageName] = groupVersionsEntry + + orderer := namer.Orderer{Namer: namer.NewPrivateNamer(0)} + typesToGenerate = orderer.OrderTypes(typesToGenerate) + + if internal { + packageList = append(packageList, versionPackage(internalVersionPackagePath, groupPackageName, gv, groupGoNames[groupPackageName], boilerplate, typesToGenerate, customArgs.InternalClientSetPackage, customArgs.ListersPackage)) + } else { + packageList = append(packageList, versionPackage(externalVersionPackagePath, groupPackageName, gv, groupGoNames[groupPackageName], boilerplate, typesToGenerate, customArgs.VersionedClientSetPackage, customArgs.ListersPackage)) + } + } + + if len(externalGroupVersions) != 0 { + packageList = append(packageList, factoryInterfacePackage(externalVersionPackagePath, boilerplate, customArgs.VersionedClientSetPackage)) + packageList = append(packageList, factoryPackage(externalVersionPackagePath, boilerplate, groupGoNames, externalGroupVersions, customArgs.VersionedClientSetPackage, typesForGroupVersion)) + for _, gvs := range externalGroupVersions { + packageList = append(packageList, groupPackage(externalVersionPackagePath, gvs, boilerplate)) + } + } + + if len(internalGroupVersions) != 0 { + packageList = append(packageList, factoryInterfacePackage(internalVersionPackagePath, boilerplate, customArgs.InternalClientSetPackage)) + packageList = append(packageList, factoryPackage(internalVersionPackagePath, boilerplate, groupGoNames, internalGroupVersions, customArgs.InternalClientSetPackage, typesForGroupVersion)) + for _, gvs := range internalGroupVersions { + packageList = append(packageList, groupPackage(internalVersionPackagePath, gvs, boilerplate)) + } + } + + return packageList +} + +func factoryPackage(basePackage string, boilerplate []byte, groupGoNames map[string]string, groupVersions map[string]clientgentypes.GroupVersions, clientSetPackage string, typesForGroupVersion map[clientgentypes.GroupVersion][]*types.Type) generator.Package { + return &generator.DefaultPackage{ + PackageName: filepath.Base(basePackage), + PackagePath: basePackage, + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = append(generators, &factoryGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: "factory", + }, + outputPackage: basePackage, + imports: generator.NewImportTracker(), + groupVersions: groupVersions, + clientSetPackage: clientSetPackage, + internalInterfacesPackage: packageForInternalInterfaces(basePackage), + gvGoNames: groupGoNames, + }) + + generators = append(generators, &genericGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: "generic", + }, + outputPackage: basePackage, + imports: generator.NewImportTracker(), + groupVersions: groupVersions, + typesForGroupVersion: typesForGroupVersion, + groupGoNames: groupGoNames, + }) + + return generators + }, + } +} + +func factoryInterfacePackage(basePackage string, boilerplate []byte, clientSetPackage string) generator.Package { + packagePath := packageForInternalInterfaces(basePackage) + + return &generator.DefaultPackage{ + PackageName: filepath.Base(packagePath), + PackagePath: packagePath, + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = append(generators, &factoryInterfaceGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: "factory_interfaces", + }, + outputPackage: packagePath, + imports: generator.NewImportTracker(), + clientSetPackage: clientSetPackage, + }) + + return generators + }, + } +} + +func groupPackage(basePackage string, groupVersions clientgentypes.GroupVersions, boilerplate []byte) generator.Package { + packagePath := filepath.Join(basePackage, groupVersions.PackageName) + groupPkgName := strings.Split(string(groupVersions.PackageName), ".")[0] + + return &generator.DefaultPackage{ + PackageName: groupPkgName, + PackagePath: packagePath, + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = append(generators, &groupInterfaceGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: "interface", + }, + outputPackage: packagePath, + groupVersions: groupVersions, + imports: generator.NewImportTracker(), + internalInterfacesPackage: packageForInternalInterfaces(basePackage), + }) + return generators + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + return tags.GenerateClient && tags.HasVerb("list") && tags.HasVerb("watch") + }, + } +} + +func versionPackage(basePackage string, groupPkgName string, gv clientgentypes.GroupVersion, groupGoName string, boilerplate []byte, typesToGenerate []*types.Type, clientSetPackage, listersPackage string) generator.Package { + packagePath := filepath.Join(basePackage, groupPkgName, strings.ToLower(gv.Version.NonEmpty())) + + return &generator.DefaultPackage{ + PackageName: strings.ToLower(gv.Version.NonEmpty()), + PackagePath: packagePath, + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = append(generators, &versionInterfaceGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: "interface", + }, + outputPackage: packagePath, + imports: generator.NewImportTracker(), + types: typesToGenerate, + internalInterfacesPackage: packageForInternalInterfaces(basePackage), + }) + + for _, t := range typesToGenerate { + generators = append(generators, &informerGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: strings.ToLower(t.Name.Name), + }, + outputPackage: packagePath, + groupPkgName: groupPkgName, + groupVersion: gv, + groupGoName: groupGoName, + typeToGenerate: t, + imports: generator.NewImportTracker(), + clientSetPackage: clientSetPackage, + listersPackage: listersPackage, + internalInterfacesPackage: packageForInternalInterfaces(basePackage), + }) + } + return generators + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + return tags.GenerateClient && tags.HasVerb("list") && tags.HasVerb("watch") + }, + } +} diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go new file mode 100644 index 0000000000..27d4bd51ab --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go @@ -0,0 +1,42 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import "k8s.io/gengo/types" + +var ( + apiScheme = types.Name{Package: "k8s.io/kubernetes/pkg/api/legacyscheme", Name: "Scheme"} + cacheGenericLister = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "GenericLister"} + cacheIndexers = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "Indexers"} + cacheListWatch = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "ListWatch"} + cacheMetaNamespaceIndexFunc = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "MetaNamespaceIndexFunc"} + cacheNamespaceIndex = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "NamespaceIndex"} + cacheNewGenericLister = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "NewGenericLister"} + cacheNewSharedIndexInformer = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "NewSharedIndexInformer"} + cacheSharedIndexInformer = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "SharedIndexInformer"} + listOptions = types.Name{Package: "k8s.io/kubernetes/pkg/apis/core", Name: "ListOptions"} + reflectType = types.Name{Package: "reflect", Name: "Type"} + runtimeObject = types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "Object"} + schemaGroupResource = types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupResource"} + schemaGroupVersionResource = types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersionResource"} + syncMutex = types.Name{Package: "sync", Name: "Mutex"} + timeDuration = types.Name{Package: "time", Name: "Duration"} + v1ListOptions = types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"} + metav1NamespaceAll = types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "NamespaceAll"} + metav1Object = types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "Object"} + watchInterface = types.Name{Package: "k8s.io/apimachinery/pkg/watch", Name: "Interface"} +) diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go new file mode 100644 index 0000000000..3b51f8dc82 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go @@ -0,0 +1,109 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/code-generator/cmd/client-gen/generators/util" +) + +// versionInterfaceGenerator generates the per-version interface file. +type versionInterfaceGenerator struct { + generator.DefaultGen + outputPackage string + imports namer.ImportTracker + types []*types.Type + filtered bool + internalInterfacesPackage string +} + +var _ generator.Generator = &versionInterfaceGenerator{} + +func (g *versionInterfaceGenerator) Filter(c *generator.Context, t *types.Type) bool { + if !g.filtered { + g.filtered = true + return true + } + return false +} + +func (g *versionInterfaceGenerator) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *versionInterfaceGenerator) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + return +} + +func (g *versionInterfaceGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + + m := map[string]interface{}{ + "interfacesTweakListOptionsFunc": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "TweakListOptionsFunc"}), + "interfacesSharedInformerFactory": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "SharedInformerFactory"}), + "types": g.types, + } + + sw.Do(versionTemplate, m) + for _, typeDef := range g.types { + tags, err := util.ParseClientGenTags(append(typeDef.SecondClosestCommentLines, typeDef.CommentLines...)) + if err != nil { + return err + } + m["namespaced"] = !tags.NonNamespaced + m["type"] = typeDef + sw.Do(versionFuncTemplate, m) + } + + return sw.Error() +} + +var versionTemplate = ` +// Interface provides access to all the informers in this group version. +type Interface interface { + $range .types -$ + // $.|publicPlural$ returns a $.|public$Informer. + $.|publicPlural$() $.|public$Informer + $end$ +} + +type version struct { + factory $.interfacesSharedInformerFactory|raw$ + namespace string + tweakListOptions $.interfacesTweakListOptionsFunc|raw$ +} + +// New returns a new Interface. +func New(f $.interfacesSharedInformerFactory|raw$, namespace string, tweakListOptions $.interfacesTweakListOptionsFunc|raw$) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} +` + +var versionFuncTemplate = ` +// $.type|publicPlural$ returns a $.type|public$Informer. +func (v *version) $.type|publicPlural$() $.type|public$Informer { + return &$.type|private$Informer{factory: v.factory$if .namespaced$, namespace: v.namespace$end$, tweakListOptions: v.tweakListOptions} +} +` diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/main.go b/vendor/k8s.io/code-generator/cmd/informer-gen/main.go new file mode 100644 index 0000000000..14f3e923e6 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/main.go @@ -0,0 +1,63 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "path/filepath" + + "github.com/spf13/pflag" + "k8s.io/code-generator/cmd/informer-gen/generators" + "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/args" + "k8s.io/klog" + + generatorargs "k8s.io/code-generator/cmd/informer-gen/args" +) + +func main() { + klog.InitFlags(nil) + genericArgs, customArgs := generatorargs.NewDefaults() + + // Override defaults. + // TODO: move out of informer-gen + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/informers/informers_generated" + customArgs.VersionedClientSetPackage = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + customArgs.InternalClientSetPackage = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + customArgs.ListersPackage = "k8s.io/kubernetes/pkg/client/listers" + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := generatorargs.Validate(genericArgs); err != nil { + klog.Fatalf("Error: %v", err) + } + + // Run it. + if err := genericArgs.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + klog.Fatalf("Error: %v", err) + } + klog.V(2).Info("Completed successfully.") +} diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/.import-restrictions b/vendor/k8s.io/code-generator/cmd/lister-gen/.import-restrictions new file mode 100644 index 0000000000..0967ef424b --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/.import-restrictions @@ -0,0 +1 @@ +{} diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/lister-gen/args/args.go new file mode 100644 index 0000000000..34914ea8c9 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/args/args.go @@ -0,0 +1,56 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + "path" + + "github.com/spf13/pflag" + codegenutil "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/args" +) + +// CustomArgs is used by the gengo framework to pass args specific to this generator. +type CustomArgs struct{} + +// NewDefaults returns default arguments for the generator. +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{} + genericArgs.CustomArgs = customArgs + + if pkg := codegenutil.CurrentPackage(); len(pkg) != 0 { + genericArgs.OutputPackagePath = path.Join(pkg, "pkg/client/listers") + } + + return genericArgs, customArgs +} + +// AddFlags add the generator flags to the flag set. +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) {} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + _ = genericArgs.CustomArgs.(*CustomArgs) + + if len(genericArgs.OutputPackagePath) == 0 { + return fmt.Errorf("output package cannot be empty") + } + + return nil +} diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/expansion.go b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/expansion.go new file mode 100644 index 0000000000..dd45d7749c --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/expansion.go @@ -0,0 +1,67 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + "os" + "path/filepath" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/types" + + "k8s.io/code-generator/cmd/client-gen/generators/util" +) + +// expansionGenerator produces a file for a expansion interfaces. +type expansionGenerator struct { + generator.DefaultGen + packagePath string + types []*types.Type +} + +// We only want to call GenerateType() once per group. +func (g *expansionGenerator) Filter(c *generator.Context, t *types.Type) bool { + return t == g.types[0] +} + +func (g *expansionGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + for _, t := range g.types { + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + if _, err := os.Stat(filepath.Join(g.packagePath, strings.ToLower(t.Name.Name+"_expansion.go"))); os.IsNotExist(err) { + sw.Do(expansionInterfaceTemplate, t) + if !tags.NonNamespaced { + sw.Do(namespacedExpansionInterfaceTemplate, t) + } + } + } + return sw.Error() +} + +var expansionInterfaceTemplate = ` +// $.|public$ListerExpansion allows custom methods to be added to +// $.|public$Lister. +type $.|public$ListerExpansion interface {} +` + +var namespacedExpansionInterfaceTemplate = ` +// $.|public$NamespaceListerExpansion allows custom methods to be added to +// $.|public$NamespaceLister. +type $.|public$NamespaceListerExpansion interface {} +` diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go new file mode 100644 index 0000000000..c8ed5ad4d3 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go @@ -0,0 +1,371 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "io" + "path/filepath" + "strings" + + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/code-generator/cmd/client-gen/generators/util" + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + + "k8s.io/klog" +) + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + pluralExceptions := map[string]string{ + "Endpoints": "Endpoints", + } + return namer.NameSystems{ + "public": namer.NewPublicNamer(0), + "private": namer.NewPrivateNamer(0), + "raw": namer.NewRawNamer("", nil), + "publicPlural": namer.NewPublicPluralNamer(pluralExceptions), + "allLowercasePlural": namer.NewAllLowercasePluralNamer(pluralExceptions), + "lowercaseSingular": &lowercaseSingularNamer{}, + } +} + +// lowercaseSingularNamer implements Namer +type lowercaseSingularNamer struct{} + +// Name returns t's name in all lowercase. +func (n *lowercaseSingularNamer) Name(t *types.Type) string { + return strings.ToLower(t.Name.Name) +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "public" +} + +// Packages makes the client package definition. +func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + + var packageList generator.Packages + for _, inputDir := range arguments.InputDirs { + p := context.Universe.Package(inputDir) + + objectMeta, internal, err := objectMetaForPackage(p) + if err != nil { + klog.Fatal(err) + } + if objectMeta == nil { + // no types in this package had genclient + continue + } + + var gv clientgentypes.GroupVersion + var internalGVPkg string + + if internal { + lastSlash := strings.LastIndex(p.Path, "/") + if lastSlash == -1 { + klog.Fatalf("error constructing internal group version for package %q", p.Path) + } + gv.Group = clientgentypes.Group(p.Path[lastSlash+1:]) + internalGVPkg = p.Path + } else { + parts := strings.Split(p.Path, "/") + gv.Group = clientgentypes.Group(parts[len(parts)-2]) + gv.Version = clientgentypes.Version(parts[len(parts)-1]) + + internalGVPkg = strings.Join(parts[0:len(parts)-1], "/") + } + groupPackageName := strings.ToLower(gv.Group.NonEmpty()) + + // If there's a comment of the form "// +groupName=somegroup" or + // "// +groupName=somegroup.foo.bar.io", use the first field (somegroup) as the name of the + // group when generating. + if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { + gv.Group = clientgentypes.Group(strings.SplitN(override[0], ".", 2)[0]) + } + + var typesToGenerate []*types.Type + for _, t := range p.Types { + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + if !tags.GenerateClient || !tags.HasVerb("list") || !tags.HasVerb("get") { + continue + } + typesToGenerate = append(typesToGenerate, t) + } + if len(typesToGenerate) == 0 { + continue + } + orderer := namer.Orderer{Namer: namer.NewPrivateNamer(0)} + typesToGenerate = orderer.OrderTypes(typesToGenerate) + + packagePath := filepath.Join(arguments.OutputPackagePath, groupPackageName, strings.ToLower(gv.Version.NonEmpty())) + packageList = append(packageList, &generator.DefaultPackage{ + PackageName: strings.ToLower(gv.Version.NonEmpty()), + PackagePath: packagePath, + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = append(generators, &expansionGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: "expansion_generated", + }, + packagePath: filepath.Join(arguments.OutputBase, packagePath), + types: typesToGenerate, + }) + + for _, t := range typesToGenerate { + generators = append(generators, &listerGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: strings.ToLower(t.Name.Name), + }, + outputPackage: arguments.OutputPackagePath, + groupVersion: gv, + internalGVPkg: internalGVPkg, + typeToGenerate: t, + imports: generator.NewImportTracker(), + objectMeta: objectMeta, + }) + } + return generators + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + return tags.GenerateClient && tags.HasVerb("list") && tags.HasVerb("get") + }, + }) + } + + return packageList +} + +// objectMetaForPackage returns the type of ObjectMeta used by package p. +func objectMetaForPackage(p *types.Package) (*types.Type, bool, error) { + generatingForPackage := false + for _, t := range p.Types { + // filter out types which dont have genclient. + if !util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).GenerateClient { + continue + } + generatingForPackage = true + for _, member := range t.Members { + if member.Name == "ObjectMeta" { + return member.Type, isInternal(member), nil + } + } + } + if generatingForPackage { + return nil, false, fmt.Errorf("unable to find ObjectMeta for any types in package %s", p.Path) + } + return nil, false, nil +} + +// isInternal returns true if the tags for a member do not contain a json tag +func isInternal(m types.Member) bool { + return !strings.Contains(m.Tags, "json") +} + +// listerGenerator produces a file of listers for a given GroupVersion and +// type. +type listerGenerator struct { + generator.DefaultGen + outputPackage string + groupVersion clientgentypes.GroupVersion + internalGVPkg string + typeToGenerate *types.Type + imports namer.ImportTracker + objectMeta *types.Type +} + +var _ generator.Generator = &listerGenerator{} + +func (g *listerGenerator) Filter(c *generator.Context, t *types.Type) bool { + return t == g.typeToGenerate +} + +func (g *listerGenerator) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *listerGenerator) Imports(c *generator.Context) (imports []string) { + imports = append(imports, g.imports.ImportLines()...) + imports = append(imports, "k8s.io/apimachinery/pkg/api/errors") + imports = append(imports, "k8s.io/apimachinery/pkg/labels") + // for Indexer + imports = append(imports, "k8s.io/client-go/tools/cache") + return +} + +func (g *listerGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + + klog.V(5).Infof("processing type %v", t) + m := map[string]interface{}{ + "Resource": c.Universe.Function(types.Name{Package: t.Name.Package, Name: "Resource"}), + "type": t, + "objectMeta": g.objectMeta, + } + + tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + if err != nil { + return err + } + + if tags.NonNamespaced { + sw.Do(typeListerInterface_NonNamespaced, m) + } else { + sw.Do(typeListerInterface, m) + } + + sw.Do(typeListerStruct, m) + sw.Do(typeListerConstructor, m) + sw.Do(typeLister_List, m) + + if tags.NonNamespaced { + sw.Do(typeLister_NonNamespacedGet, m) + return sw.Error() + } + + sw.Do(typeLister_NamespaceLister, m) + sw.Do(namespaceListerInterface, m) + sw.Do(namespaceListerStruct, m) + sw.Do(namespaceLister_List, m) + sw.Do(namespaceLister_Get, m) + + return sw.Error() +} + +var typeListerInterface = ` +// $.type|public$Lister helps list $.type|publicPlural$. +type $.type|public$Lister interface { + // List lists all $.type|publicPlural$ in the indexer. + List(selector labels.Selector) (ret []*$.type|raw$, err error) + // $.type|publicPlural$ returns an object that can list and get $.type|publicPlural$. + $.type|publicPlural$(namespace string) $.type|public$NamespaceLister + $.type|public$ListerExpansion +} +` + +var typeListerInterface_NonNamespaced = ` +// $.type|public$Lister helps list $.type|publicPlural$. +type $.type|public$Lister interface { + // List lists all $.type|publicPlural$ in the indexer. + List(selector labels.Selector) (ret []*$.type|raw$, err error) + // Get retrieves the $.type|public$ from the index for a given name. + Get(name string) (*$.type|raw$, error) + $.type|public$ListerExpansion +} +` + +var typeListerStruct = ` +// $.type|private$Lister implements the $.type|public$Lister interface. +type $.type|private$Lister struct { + indexer cache.Indexer +} +` + +var typeListerConstructor = ` +// New$.type|public$Lister returns a new $.type|public$Lister. +func New$.type|public$Lister(indexer cache.Indexer) $.type|public$Lister { + return &$.type|private$Lister{indexer: indexer} +} +` + +var typeLister_List = ` +// List lists all $.type|publicPlural$ in the indexer. +func (s *$.type|private$Lister) List(selector labels.Selector) (ret []*$.type|raw$, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*$.type|raw$)) + }) + return ret, err +} +` + +var typeLister_NamespaceLister = ` +// $.type|publicPlural$ returns an object that can list and get $.type|publicPlural$. +func (s *$.type|private$Lister) $.type|publicPlural$(namespace string) $.type|public$NamespaceLister { + return $.type|private$NamespaceLister{indexer: s.indexer, namespace: namespace} +} +` + +var typeLister_NonNamespacedGet = ` +// Get retrieves the $.type|public$ from the index for a given name. +func (s *$.type|private$Lister) Get(name string) (*$.type|raw$, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound($.Resource|raw$("$.type|lowercaseSingular$"), name) + } + return obj.(*$.type|raw$), nil +} +` + +var namespaceListerInterface = ` +// $.type|public$NamespaceLister helps list and get $.type|publicPlural$. +type $.type|public$NamespaceLister interface { + // List lists all $.type|publicPlural$ in the indexer for a given namespace. + List(selector labels.Selector) (ret []*$.type|raw$, err error) + // Get retrieves the $.type|public$ from the indexer for a given namespace and name. + Get(name string) (*$.type|raw$, error) + $.type|public$NamespaceListerExpansion +} +` + +var namespaceListerStruct = ` +// $.type|private$NamespaceLister implements the $.type|public$NamespaceLister +// interface. +type $.type|private$NamespaceLister struct { + indexer cache.Indexer + namespace string +} +` + +var namespaceLister_List = ` +// List lists all $.type|publicPlural$ in the indexer for a given namespace. +func (s $.type|private$NamespaceLister) List(selector labels.Selector) (ret []*$.type|raw$, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*$.type|raw$)) + }) + return ret, err +} +` + +var namespaceLister_Get = ` +// Get retrieves the $.type|public$ from the indexer for a given namespace and name. +func (s $.type|private$NamespaceLister) Get(name string) (*$.type|raw$, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound($.Resource|raw$("$.type|lowercaseSingular$"), name) + } + return obj.(*$.type|raw$), nil +} +` diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/main.go b/vendor/k8s.io/code-generator/cmd/lister-gen/main.go new file mode 100644 index 0000000000..aca16b2bda --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/main.go @@ -0,0 +1,60 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "path/filepath" + + "github.com/spf13/pflag" + "k8s.io/code-generator/cmd/lister-gen/generators" + "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/args" + "k8s.io/klog" + + generatorargs "k8s.io/code-generator/cmd/lister-gen/args" +) + +func main() { + klog.InitFlags(nil) + genericArgs, customArgs := generatorargs.NewDefaults() + + // Override defaults. + // TODO: move this out of lister-gen + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/listers" + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := generatorargs.Validate(genericArgs); err != nil { + klog.Fatalf("Error: %v", err) + } + + // Run it. + if err := genericArgs.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + klog.Fatalf("Error: %v", err) + } + klog.V(2).Info("Completed successfully.") +} diff --git a/vendor/k8s.io/code-generator/cmd/openapi-gen/main.go b/vendor/k8s.io/code-generator/cmd/openapi-gen/main.go new file mode 100644 index 0000000000..b1098c014c --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/openapi-gen/main.go @@ -0,0 +1,57 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package generates openAPI definition file to be used in open API spec generation on API servers. To generate +// definition for a specific type or package add "+k8s:openapi-gen=true" tag to the type/package comment lines. To +// exclude a type from a tagged package, add "+k8s:openapi-gen=false" tag to the type comment lines. + +package main + +import ( + "flag" + "log" + + generatorargs "k8s.io/kube-openapi/cmd/openapi-gen/args" + "k8s.io/kube-openapi/pkg/generators" + + "github.com/spf13/pflag" + + "k8s.io/klog" +) + +func main() { + klog.InitFlags(nil) + genericArgs, customArgs := generatorargs.NewDefaults() + + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := generatorargs.Validate(genericArgs); err != nil { + log.Fatalf("Arguments validation error: %v", err) + } + + // Generates the code for the OpenAPIDefinitions. + if err := genericArgs.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + log.Fatalf("OpenAPI code generation error: %v", err) + } +} diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/register-gen/args/args.go new file mode 100644 index 0000000000..2e3ab084e2 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/register-gen/args/args.go @@ -0,0 +1,39 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + + "k8s.io/gengo/args" +) + +// NewDefaults returns default arguments for the generator. +func NewDefaults() *args.GeneratorArgs { + genericArgs := args.Default().WithoutDefaultFlagParsing() + genericArgs.OutputFileBaseName = "zz_generated.register" + return genericArgs +} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + if len(genericArgs.OutputFileBaseName) == 0 { + return fmt.Errorf("output file base name cannot be empty") + } + + return nil +} diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go b/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go new file mode 100644 index 0000000000..5186e421f2 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go @@ -0,0 +1,137 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "os" + "path" + "strings" + + "k8s.io/klog" + + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + return namer.NameSystems{} +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "public" +} + +// Packages makes packages to generate. +func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + + packages := generator.Packages{} + for _, inputDir := range arguments.InputDirs { + pkg := context.Universe.Package(inputDir) + internal, err := isInternal(pkg) + if err != nil { + klog.V(5).Infof("skipping the generation of %s file, due to err %v", arguments.OutputFileBaseName, err) + continue + } + if internal { + klog.V(5).Infof("skipping the generation of %s file because %s package contains internal types, note that internal types don't have \"json\" tags", arguments.OutputFileBaseName, pkg.Name) + continue + } + registerFileName := "register.go" + searchPath := path.Join(args.DefaultSourceTree(), inputDir, registerFileName) + if _, err := os.Stat(path.Join(searchPath)); err == nil { + klog.V(5).Infof("skipping the generation of %s file because %s already exists in the path %s", arguments.OutputFileBaseName, registerFileName, searchPath) + continue + } else if err != nil && !os.IsNotExist(err) { + klog.Fatalf("an error %v has occurred while checking if %s exists", err, registerFileName) + } + + gv := clientgentypes.GroupVersion{} + { + pathParts := strings.Split(pkg.Path, "/") + if len(pathParts) < 2 { + klog.Errorf("the path of the package must contain the group name and the version, path = %s", pkg.Path) + continue + } + gv.Group = clientgentypes.Group(pathParts[len(pathParts)-2]) + gv.Version = clientgentypes.Version(pathParts[len(pathParts)-1]) + + // if there is a comment of the form "// +groupName=somegroup" or "// +groupName=somegroup.foo.bar.io", + // extract the fully qualified API group name from it and overwrite the group inferred from the package path + if override := types.ExtractCommentTags("+", pkg.DocComments)["groupName"]; override != nil { + groupName := override[0] + klog.V(5).Infof("overriding the group name with = %s", groupName) + gv.Group = clientgentypes.Group(groupName) + } + } + + typesToRegister := []*types.Type{} + for _, t := range pkg.Types { + klog.V(5).Infof("considering type = %s", t.Name.String()) + for _, typeMember := range t.Members { + if typeMember.Name == "TypeMeta" && typeMember.Embedded == true { + typesToRegister = append(typesToRegister, t) + } + } + } + + packages = append(packages, + &generator.DefaultPackage{ + PackageName: pkg.Name, + PackagePath: pkg.Path, + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + return []generator.Generator{ + ®isterExternalGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: arguments.OutputFileBaseName, + }, + gv: gv, + typesToGenerate: typesToRegister, + outputPackage: pkg.Path, + imports: generator.NewImportTracker(), + }, + } + }, + }) + } + + return packages +} + +// isInternal determines whether the given package +// contains the internal types or not +func isInternal(p *types.Package) (bool, error) { + for _, t := range p.Types { + for _, member := range t.Members { + if member.Name == "TypeMeta" { + return !strings.Contains(member.Tags, "json"), nil + } + } + } + return false, fmt.Errorf("unable to find TypeMeta for any types in package %s", p.Path) +} diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/generators/register_external.go b/vendor/k8s.io/code-generator/cmd/register-gen/generators/register_external.go new file mode 100644 index 0000000000..c831c575d6 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/register-gen/generators/register_external.go @@ -0,0 +1,117 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + "sort" + + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +type registerExternalGenerator struct { + generator.DefaultGen + outputPackage string + gv clientgentypes.GroupVersion + typesToGenerate []*types.Type + imports namer.ImportTracker +} + +var _ generator.Generator = ®isterExternalGenerator{} + +func (g *registerExternalGenerator) Filter(_ *generator.Context, _ *types.Type) bool { + return false +} + +func (g *registerExternalGenerator) Imports(c *generator.Context) (imports []string) { + return g.imports.ImportLines() +} + +func (g *registerExternalGenerator) Namers(_ *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *registerExternalGenerator) Finalize(context *generator.Context, w io.Writer) error { + typesToGenerateOnlyNames := make([]string, len(g.typesToGenerate)) + for index, typeToGenerate := range g.typesToGenerate { + typesToGenerateOnlyNames[index] = typeToGenerate.Name.Name + } + + // sort the list of types to register, so that the generator produces stable output + sort.Strings(typesToGenerateOnlyNames) + + sw := generator.NewSnippetWriter(w, context, "$", "$") + m := map[string]interface{}{ + "groupName": g.gv.Group, + "version": g.gv.Version, + "types": typesToGenerateOnlyNames, + "addToGroupVersion": context.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "AddToGroupVersion"}), + "groupVersion": context.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GroupVersion"}), + } + sw.Do(registerExternalTypesTemplate, m) + return sw.Error() +} + +var registerExternalTypesTemplate = ` +// GroupName specifies the group name used to register the objects. +const GroupName = "$.groupName$" + +// GroupVersion specifies the group and the version used to register the objects. +var GroupVersion = $.groupVersion|raw${Group: GroupName, Version: "$.version$"} + +// SchemeGroupVersion is group version used to register these objects +// Deprecated: use GroupVersion instead. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "$.version$"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + // Depreciated: use Install instead + AddToScheme = localSchemeBuilder.AddToScheme + Install = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + $range .types -$ + &$.${}, + $end$ + ) + // AddToGroupVersion allows the serialization of client types like ListOptions. + $.addToGroupVersion|raw$(scheme, SchemeGroupVersion) + return nil +} +` diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/main.go b/vendor/k8s.io/code-generator/cmd/register-gen/main.go new file mode 100644 index 0000000000..30a175d8d6 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/register-gen/main.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "path/filepath" + + "github.com/spf13/pflag" + "k8s.io/klog" + + generatorargs "k8s.io/code-generator/cmd/register-gen/args" + "k8s.io/code-generator/cmd/register-gen/generators" + "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/args" +) + +func main() { + klog.InitFlags(nil) + genericArgs := generatorargs.NewDefaults() + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + genericArgs.AddFlags(pflag.CommandLine) + flag.Set("logtostderr", "true") + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + + pflag.Parse() + if err := generatorargs.Validate(genericArgs); err != nil { + klog.Fatalf("Error: %v", err) + } + + if err := genericArgs.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + klog.Fatalf("Error: %v", err) + } + klog.V(2).Info("Completed successfully.") +} diff --git a/vendor/k8s.io/code-generator/cmd/set-gen/.gitignore b/vendor/k8s.io/code-generator/cmd/set-gen/.gitignore new file mode 100644 index 0000000000..ffe6458c96 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/set-gen/.gitignore @@ -0,0 +1 @@ +set-gen diff --git a/vendor/k8s.io/code-generator/cmd/set-gen/main.go b/vendor/k8s.io/code-generator/cmd/set-gen/main.go new file mode 100644 index 0000000000..45694d4f33 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/set-gen/main.go @@ -0,0 +1,56 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// set-gen is an example usage of gengo. +// +// Structs in the input directories with the below line in their comments will +// have sets generated for them. +// // +genset +// +// Any builtin type referenced anywhere in the input directories will have a +// set generated for it. +package main + +import ( + "os" + "path/filepath" + + "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/args" + "k8s.io/gengo/examples/set-gen/generators" + + "k8s.io/klog" +) + +func main() { + klog.InitFlags(nil) + arguments := args.Default() + + // Override defaults. + arguments.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) + arguments.InputDirs = []string{"k8s.io/kubernetes/pkg/util/sets/types"} + arguments.OutputPackagePath = "k8s.io/apimachinery/pkg/util/sets" + + if err := arguments.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + klog.Errorf("Error: %v", err) + os.Exit(1) + } + klog.V(2).Info("Completed successfully.") +} diff --git a/vendor/k8s.io/code-generator/code-of-conduct.md b/vendor/k8s.io/code-generator/code-of-conduct.md new file mode 100644 index 0000000000..0d15c00cf3 --- /dev/null +++ b/vendor/k8s.io/code-generator/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/k8s.io/code-generator/generate-groups.sh b/vendor/k8s.io/code-generator/generate-groups.sh new file mode 100644 index 0000000000..d82002ddaf --- /dev/null +++ b/vendor/k8s.io/code-generator/generate-groups.sh @@ -0,0 +1,92 @@ +#!/usr/bin/env bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +# generate-groups generates everything for a project with external types only, e.g. a project based +# on CustomResourceDefinitions. + +if [ "$#" -lt 4 ] || [ "${1}" == "--help" ]; then + cat < ... + + the generators comma separated to run (deepcopy,defaulter,client,lister,informer) or "all". + the output package name (e.g. github.com/example/project/pkg/generated). + the external types dir (e.g. github.com/example/api or github.com/example/project/pkg/apis). + the groups and their versions in the format "groupA:v1,v2 groupB:v1 groupC:v2", relative + to . + ... arbitrary flags passed to all generator binaries. + + +Examples: + $(basename "$0") all github.com/example/project/pkg/client github.com/example/project/pkg/apis "foo:v1 bar:v1alpha1,v1beta1" + $(basename "$0") deepcopy,client github.com/example/project/pkg/client github.com/example/project/pkg/apis "foo:v1 bar:v1alpha1,v1beta1" +EOF + exit 0 +fi + +GENS="$1" +OUTPUT_PKG="$2" +APIS_PKG="$3" +GROUPS_WITH_VERSIONS="$4" +shift 4 + +( + # To support running this script from anywhere, we have to first cd into this directory + # so we can install the tools. + cd "$(dirname "${0}")" + go install ./cmd/{defaulter-gen,client-gen,lister-gen,informer-gen,deepcopy-gen} +) + +function codegen::join() { local IFS="$1"; shift; echo "$*"; } + +# enumerate group versions +FQ_APIS=() # e.g. k8s.io/api/apps/v1 +for GVs in ${GROUPS_WITH_VERSIONS}; do + IFS=: read -r G Vs <<<"${GVs}" + + # enumerate versions + for V in ${Vs//,/ }; do + FQ_APIS+=("${APIS_PKG}/${G}/${V}") + done +done + +if [ "${GENS}" = "all" ] || grep -qw "deepcopy" <<<"${GENS}"; then + echo "Generating deepcopy funcs" + "${GOPATH}/bin/deepcopy-gen" --input-dirs "$(codegen::join , "${FQ_APIS[@]}")" -O zz_generated.deepcopy --bounding-dirs "${APIS_PKG}" "$@" +fi + +if [ "${GENS}" = "all" ] || grep -qw "client" <<<"${GENS}"; then + echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" + "${GOPATH}/bin/client-gen" --clientset-name "${CLIENTSET_NAME_VERSIONED:-versioned}" --input-base "" --input "$(codegen::join , "${FQ_APIS[@]}")" --output-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" "$@" +fi + +if [ "${GENS}" = "all" ] || grep -qw "lister" <<<"${GENS}"; then + echo "Generating listers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/listers" + "${GOPATH}/bin/lister-gen" --input-dirs "$(codegen::join , "${FQ_APIS[@]}")" --output-package "${OUTPUT_PKG}/listers" "$@" +fi + +if [ "${GENS}" = "all" ] || grep -qw "informer" <<<"${GENS}"; then + echo "Generating informers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/informers" + "${GOPATH}/bin/informer-gen" \ + --input-dirs "$(codegen::join , "${FQ_APIS[@]}")" \ + --versioned-clientset-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}/${CLIENTSET_NAME_VERSIONED:-versioned}" \ + --listers-package "${OUTPUT_PKG}/listers" \ + --output-package "${OUTPUT_PKG}/informers" \ + "$@" +fi diff --git a/vendor/k8s.io/code-generator/generate-internal-groups.sh b/vendor/k8s.io/code-generator/generate-internal-groups.sh new file mode 100644 index 0000000000..8c31d93370 --- /dev/null +++ b/vendor/k8s.io/code-generator/generate-internal-groups.sh @@ -0,0 +1,122 @@ +#!/usr/bin/env bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +# generate-internal-groups generates everything for a project with internal types, e.g. an +# user-provided API server based on k8s.io/apiserver. + +if [ "$#" -lt 5 ] || [ "${1}" == "--help" ]; then + cat < ... + + the generators comma separated to run (deepcopy,defaulter,conversion,client,lister,informer,openapi) or "all". + the output package name (e.g. github.com/example/project/pkg/generated). + the internal types dir (e.g. github.com/example/project/pkg/apis). + the external types dir (e.g. github.com/example/project/pkg/apis or githubcom/example/apis). + the groups and their versions in the format "groupA:v1,v2 groupB:v1 groupC:v2", relative + to . + ... arbitrary flags passed to all generator binaries. + +Examples: + $(basename "$0") all github.com/example/project/pkg/client github.com/example/project/pkg/apis github.com/example/project/pkg/apis "foo:v1 bar:v1alpha1,v1beta1" + $(basename "$0") deepcopy,defaulter,conversion github.com/example/project/pkg/client github.com/example/project/pkg/apis github.com/example/project/apis "foo:v1 bar:v1alpha1,v1beta1" +EOF + exit 0 +fi + +GENS="$1" +OUTPUT_PKG="$2" +INT_APIS_PKG="$3" +EXT_APIS_PKG="$4" +GROUPS_WITH_VERSIONS="$5" +shift 5 + +go install ./"$(dirname "${0}")"/cmd/{defaulter-gen,conversion-gen,client-gen,lister-gen,informer-gen,deepcopy-gen,openapi-gen} + +function codegen::join() { local IFS="$1"; shift; echo "$*"; } + +# enumerate group versions +ALL_FQ_APIS=() # e.g. k8s.io/kubernetes/pkg/apis/apps k8s.io/api/apps/v1 +INT_FQ_APIS=() # e.g. k8s.io/kubernetes/pkg/apis/apps +EXT_FQ_APIS=() # e.g. k8s.io/api/apps/v1 +for GVs in ${GROUPS_WITH_VERSIONS}; do + IFS=: read -r G Vs <<<"${GVs}" + + if [ -n "${INT_APIS_PKG}" ]; then + ALL_FQ_APIS+=("${INT_APIS_PKG}/${G}") + INT_FQ_APIS+=("${INT_APIS_PKG}/${G}") + fi + + # enumerate versions + for V in ${Vs//,/ }; do + ALL_FQ_APIS+=("${EXT_APIS_PKG}/${G}/${V}") + EXT_FQ_APIS+=("${EXT_APIS_PKG}/${G}/${V}") + done +done + +if [ "${GENS}" = "all" ] || grep -qw "deepcopy" <<<"${GENS}"; then + echo "Generating deepcopy funcs" + "${GOPATH}/bin/deepcopy-gen" --input-dirs "$(codegen::join , "${ALL_FQ_APIS[@]}")" -O zz_generated.deepcopy --bounding-dirs "${INT_APIS_PKG},${EXT_APIS_PKG}" "$@" +fi + +if [ "${GENS}" = "all" ] || grep -qw "defaulter" <<<"${GENS}"; then + echo "Generating defaulters" + "${GOPATH}/bin/defaulter-gen" --input-dirs "$(codegen::join , "${EXT_FQ_APIS[@]}")" -O zz_generated.defaults "$@" +fi + +if [ "${GENS}" = "all" ] || grep -qw "conversion" <<<"${GENS}"; then + echo "Generating conversions" + "${GOPATH}/bin/conversion-gen" --input-dirs "$(codegen::join , "${ALL_FQ_APIS[@]}")" -O zz_generated.conversion "$@" +fi + +if [ "${GENS}" = "all" ] || grep -qw "client" <<<"${GENS}"; then + echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" + if [ -n "${INT_APIS_PKG}" ]; then + IFS=" " read -r -a APIS <<< "$(printf '%s/ ' "${INT_FQ_APIS[@]}")" + "${GOPATH}/bin/client-gen" --clientset-name "${CLIENTSET_NAME_INTERNAL:-internalversion}" --input-base "" --input "$(codegen::join , "${APIS[@]}")" --output-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" "$@" + fi + "${GOPATH}/bin/client-gen" --clientset-name "${CLIENTSET_NAME_VERSIONED:-versioned}" --input-base "" --input "$(codegen::join , "${EXT_FQ_APIS[@]}")" --output-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" "$@" +fi + +if [ "${GENS}" = "all" ] || grep -qw "lister" <<<"${GENS}"; then + echo "Generating listers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/listers" + "${GOPATH}/bin/lister-gen" --input-dirs "$(codegen::join , "${ALL_FQ_APIS[@]}")" --output-package "${OUTPUT_PKG}/listers" "$@" +fi + +if [ "${GENS}" = "all" ] || grep -qw "informer" <<<"${GENS}"; then + echo "Generating informers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/informers" + "${GOPATH}/bin/informer-gen" \ + --input-dirs "$(codegen::join , "${ALL_FQ_APIS[@]}")" \ + --versioned-clientset-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}/${CLIENTSET_NAME_VERSIONED:-versioned}" \ + --internal-clientset-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}/${CLIENTSET_NAME_INTERNAL:-internalversion}" \ + --listers-package "${OUTPUT_PKG}/listers" \ + --output-package "${OUTPUT_PKG}/informers" \ + "$@" +fi + +if [ "${GENS}" = "all" ] || grep -qw "openapi" <<<"${GENS}"; then + echo "Generating OpenAPI definitions for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/openapi" + declare -a OPENAPI_EXTRA_PACKAGES + "${GOPATH}/bin/openapi-gen" \ + --input-dirs "$(codegen::join , "${EXT_FQ_APIS[@]}" "${OPENAPI_EXTRA_PACKAGES[@]}")" \ + --input-dirs "k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/version" \ + --output-package "${OUTPUT_PKG}/openapi" \ + -O zz_generated.openapi \ + "$@" +fi diff --git a/vendor/k8s.io/code-generator/go.mod b/vendor/k8s.io/code-generator/go.mod new file mode 100644 index 0000000000..891f72ef5b --- /dev/null +++ b/vendor/k8s.io/code-generator/go.mod @@ -0,0 +1,30 @@ +// This is a generated file. Do not edit directly. + +module k8s.io/code-generator + +go 1.12 + +require ( + github.com/emicklei/go-restful v2.9.5+incompatible // indirect + github.com/go-openapi/jsonreference v0.19.3 // indirect + github.com/go-openapi/spec v0.19.3 // indirect + github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d + github.com/json-iterator/go v1.1.8 // indirect + github.com/mailru/easyjson v0.7.0 // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.4.0 // indirect + golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 // indirect + golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72 // indirect + gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 + gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e // indirect + gopkg.in/yaml.v2 v2.2.8 // indirect + k8s.io/gengo v0.0.0-20190822140433-26a664648505 + k8s.io/klog v1.0.0 + k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a +) + +replace ( + golang.org/x/sys => golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a // pinned to release-branch.go1.13 + golang.org/x/tools => golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7 // pinned to release-branch.go1.13 +) diff --git a/vendor/k8s.io/code-generator/go.sum b/vendor/k8s.io/code-generator/go.sum new file mode 100644 index 0000000000..afc21aae56 --- /dev/null +++ b/vendor/k8s.io/code-generator/go.sum @@ -0,0 +1,134 @@ +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7 h1:PVCvyir09Xgta5zksNZDkrL+eSm/Y+gQxRG3IfqNQ3A= +golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6 h1:4s3/R4+OYYYUKptXPhZKjQ04WJ6EhQQVFdjOFvCazDk= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20190822140433-26a664648505 h1:ZY6yclUKVbZ+SdWnkfY+Je5vrMpKOxmGeKRbsXVmqYM= +k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/vendor/k8s.io/code-generator/pkg/namer/tag-override.go b/vendor/k8s.io/code-generator/pkg/namer/tag-override.go new file mode 100644 index 0000000000..fd8c3a8553 --- /dev/null +++ b/vendor/k8s.io/code-generator/pkg/namer/tag-override.go @@ -0,0 +1,58 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package namer + +import ( + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +// TagOverrideNamer is a namer which pulls names from a given tag, if specified, +// and otherwise falls back to a different namer. +type TagOverrideNamer struct { + tagName string + fallback namer.Namer +} + +// Name returns the tag value if it exists. It no tag was found the fallback namer will be used +func (n *TagOverrideNamer) Name(t *types.Type) string { + if nameOverride := extractTag(n.tagName, append(t.SecondClosestCommentLines, t.CommentLines...)); nameOverride != "" { + return nameOverride + } + + return n.fallback.Name(t) +} + +// NewTagOverrideNamer creates a namer.Namer which uses the contents of the given tag as +// the name, or falls back to another Namer if the tag is not present. +func NewTagOverrideNamer(tagName string, fallback namer.Namer) namer.Namer { + return &TagOverrideNamer{ + tagName: tagName, + fallback: fallback, + } +} + +// extractTag gets the comment-tags for the key. If the tag did not exist, it +// returns the empty string. +func extractTag(key string, lines []string) string { + val, present := types.ExtractCommentTags("+", lines)[key] + if !present || len(val) < 1 { + return "" + } + + return val[0] +} diff --git a/vendor/k8s.io/code-generator/pkg/util/build.go b/vendor/k8s.io/code-generator/pkg/util/build.go new file mode 100644 index 0000000000..6ea8f52ee0 --- /dev/null +++ b/vendor/k8s.io/code-generator/pkg/util/build.go @@ -0,0 +1,61 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + gobuild "go/build" + "path" + "path/filepath" + "reflect" + "strings" +) + +type empty struct{} + +// CurrentPackage returns the go package of the current directory, or "" if it cannot +// be derived from the GOPATH. +func CurrentPackage() string { + for _, root := range gobuild.Default.SrcDirs() { + if pkg, ok := hasSubdir(root, "."); ok { + return pkg + } + } + return "" +} + +func hasSubdir(root, dir string) (rel string, ok bool) { + // ensure a tailing separator to properly compare on word-boundaries + const sep = string(filepath.Separator) + root = filepath.Clean(root) + if !strings.HasSuffix(root, sep) { + root += sep + } + + // check whether root dir starts with root + dir = filepath.Clean(dir) + if !strings.HasPrefix(dir, root) { + return "", false + } + + // cut off root + return filepath.ToSlash(dir[len(root):]), true +} + +// BoilerplatePath uses the boilerplate in code-generator by calculating the relative path to it. +func BoilerplatePath() string { + return path.Join(reflect.TypeOf(empty{}).PkgPath(), "/../../hack/boilerplate.go.txt") +} diff --git a/vendor/k8s.io/code-generator/third_party/forked/golang/reflect/type.go b/vendor/k8s.io/code-generator/third_party/forked/golang/reflect/type.go new file mode 100644 index 0000000000..67957ee33e --- /dev/null +++ b/vendor/k8s.io/code-generator/third_party/forked/golang/reflect/type.go @@ -0,0 +1,91 @@ +//This package is copied from Go library reflect/type.go. +//The struct tag library provides no way to extract the list of struct tags, only +//a specific tag +package reflect + +import ( + "fmt" + + "strconv" + "strings" +) + +type StructTag struct { + Name string + Value string +} + +func (t StructTag) String() string { + return fmt.Sprintf("%s:%q", t.Name, t.Value) +} + +type StructTags []StructTag + +func (tags StructTags) String() string { + s := make([]string, 0, len(tags)) + for _, tag := range tags { + s = append(s, tag.String()) + } + return "`" + strings.Join(s, " ") + "`" +} + +func (tags StructTags) Has(name string) bool { + for i := range tags { + if tags[i].Name == name { + return true + } + } + return false +} + +// ParseStructTags returns the full set of fields in a struct tag in the order they appear in +// the struct tag. +func ParseStructTags(tag string) (StructTags, error) { + tags := StructTags{} + for tag != "" { + // Skip leading space. + i := 0 + for i < len(tag) && tag[i] == ' ' { + i++ + } + tag = tag[i:] + if tag == "" { + break + } + + // Scan to colon. A space, a quote or a control character is a syntax error. + // Strictly speaking, control chars include the range [0x7f, 0x9f], not just + // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters + // as it is simpler to inspect the tag's bytes than the tag's runes. + i = 0 + for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { + i++ + } + if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { + break + } + name := string(tag[:i]) + tag = tag[i+1:] + + // Scan quoted string to find value. + i = 1 + for i < len(tag) && tag[i] != '"' { + if tag[i] == '\\' { + i++ + } + i++ + } + if i >= len(tag) { + break + } + qvalue := string(tag[:i+1]) + tag = tag[i+1:] + + value, err := strconv.Unquote(qvalue) + if err != nil { + return nil, err + } + tags = append(tags, StructTag{Name: name, Value: value}) + } + return tags, nil +} diff --git a/vendor/k8s.io/code-generator/tools.go b/vendor/k8s.io/code-generator/tools.go new file mode 100644 index 0000000000..7d13de5a19 --- /dev/null +++ b/vendor/k8s.io/code-generator/tools.go @@ -0,0 +1,35 @@ +// +build tools + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package contains code generation utilities +// This package imports things required by build scripts, to force `go mod` to see them as dependencies +package tools + +import ( + _ "k8s.io/code-generator/cmd/client-gen" + _ "k8s.io/code-generator/cmd/conversion-gen" + _ "k8s.io/code-generator/cmd/deepcopy-gen" + _ "k8s.io/code-generator/cmd/defaulter-gen" + _ "k8s.io/code-generator/cmd/go-to-protobuf" + _ "k8s.io/code-generator/cmd/import-boss" + _ "k8s.io/code-generator/cmd/informer-gen" + _ "k8s.io/code-generator/cmd/lister-gen" + _ "k8s.io/code-generator/cmd/openapi-gen" + _ "k8s.io/code-generator/cmd/register-gen" + _ "k8s.io/code-generator/cmd/set-gen" +) diff --git a/vendor/k8s.io/gengo/LICENSE b/vendor/k8s.io/gengo/LICENSE new file mode 100644 index 0000000000..00b2401109 --- /dev/null +++ b/vendor/k8s.io/gengo/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/gengo/args/args.go b/vendor/k8s.io/gengo/args/args.go new file mode 100644 index 0000000000..49cc76dac9 --- /dev/null +++ b/vendor/k8s.io/gengo/args/args.go @@ -0,0 +1,212 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package args has common command-line flags for generation programs. +package args + +import ( + "bytes" + goflag "flag" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/parser" + "k8s.io/gengo/types" + + "github.com/spf13/pflag" +) + +// Default returns a defaulted GeneratorArgs. You may change the defaults +// before calling AddFlags. +func Default() *GeneratorArgs { + return &GeneratorArgs{ + OutputBase: DefaultSourceTree(), + GoHeaderFilePath: filepath.Join(DefaultSourceTree(), "k8s.io/gengo/boilerplate/boilerplate.go.txt"), + GeneratedBuildTag: "ignore_autogenerated", + GeneratedByCommentTemplate: "// Code generated by GENERATOR_NAME. DO NOT EDIT.", + defaultCommandLineFlags: true, + } +} + +// GeneratorArgs has arguments that are passed to generators. +type GeneratorArgs struct { + // Which directories to parse. + InputDirs []string + + // Source tree to write results to. + OutputBase string + + // Package path within the source tree. + OutputPackagePath string + + // Output file name. + OutputFileBaseName string + + // Where to get copyright header text. + GoHeaderFilePath string + + // If GeneratedByCommentTemplate is set, generate a "Code generated by" comment + // below the bloilerplate, of the format defined by this string. + // Any instances of "GENERATOR_NAME" will be replaced with the name of the code generator. + GeneratedByCommentTemplate string + + // If true, only verify, don't write anything. + VerifyOnly bool + + // If true, include *_test.go files + IncludeTestFiles bool + + // GeneratedBuildTag is the tag used to identify code generated by execution + // of this type. Each generator should use a different tag, and different + // groups of generators (external API that depends on Kube generations) should + // keep tags distinct as well. + GeneratedBuildTag string + + // Any custom arguments go here + CustomArgs interface{} + + // Whether to use default command line flags + defaultCommandLineFlags bool +} + +// WithoutDefaultFlagParsing disables implicit addition of command line flags and parsing. +func (g *GeneratorArgs) WithoutDefaultFlagParsing() *GeneratorArgs { + g.defaultCommandLineFlags = false + return g +} + +func (g *GeneratorArgs) AddFlags(fs *pflag.FlagSet) { + fs.StringSliceVarP(&g.InputDirs, "input-dirs", "i", g.InputDirs, "Comma-separated list of import paths to get input types from.") + fs.StringVarP(&g.OutputBase, "output-base", "o", g.OutputBase, "Output base; defaults to $GOPATH/src/ or ./ if $GOPATH is not set.") + fs.StringVarP(&g.OutputPackagePath, "output-package", "p", g.OutputPackagePath, "Base package path.") + fs.StringVarP(&g.OutputFileBaseName, "output-file-base", "O", g.OutputFileBaseName, "Base name (without .go suffix) for output files.") + fs.StringVarP(&g.GoHeaderFilePath, "go-header-file", "h", g.GoHeaderFilePath, "File containing boilerplate header text. The string YEAR will be replaced with the current 4-digit year.") + fs.BoolVar(&g.VerifyOnly, "verify-only", g.VerifyOnly, "If true, only verify existing output, do not write anything.") + fs.StringVar(&g.GeneratedBuildTag, "build-tag", g.GeneratedBuildTag, "A Go build tag to use to identify files generated by this command. Should be unique.") +} + +// LoadGoBoilerplate loads the boilerplate file passed to --go-header-file. +func (g *GeneratorArgs) LoadGoBoilerplate() ([]byte, error) { + b, err := ioutil.ReadFile(g.GoHeaderFilePath) + if err != nil { + return nil, err + } + b = bytes.Replace(b, []byte("YEAR"), []byte(strconv.Itoa(time.Now().UTC().Year())), -1) + + if g.GeneratedByCommentTemplate != "" { + if len(b) != 0 { + b = append(b, byte('\n')) + } + generatorName := path.Base(os.Args[0]) + generatedByComment := strings.Replace(g.GeneratedByCommentTemplate, "GENERATOR_NAME", generatorName, -1) + s := fmt.Sprintf("%s\n\n", generatedByComment) + b = append(b, []byte(s)...) + } + return b, nil +} + +// NewBuilder makes a new parser.Builder and populates it with the input +// directories. +func (g *GeneratorArgs) NewBuilder() (*parser.Builder, error) { + b := parser.New() + + // flag for including *_test.go + b.IncludeTestFiles = g.IncludeTestFiles + + // Ignore all auto-generated files. + b.AddBuildTags(g.GeneratedBuildTag) + + for _, d := range g.InputDirs { + var err error + if strings.HasSuffix(d, "/...") { + err = b.AddDirRecursive(strings.TrimSuffix(d, "/...")) + } else { + err = b.AddDir(d) + } + if err != nil { + return nil, fmt.Errorf("unable to add directory %q: %v", d, err) + } + } + return b, nil +} + +// InputIncludes returns true if the given package is a (sub) package of one of +// the InputDirs. +func (g *GeneratorArgs) InputIncludes(p *types.Package) bool { + for _, dir := range g.InputDirs { + d := dir + if strings.HasSuffix(d, "...") { + d = strings.TrimSuffix(d, "...") + } + if strings.HasPrefix(d, "./vendor/") { + d = strings.TrimPrefix(d, "./vendor/") + } + if strings.HasPrefix(p.Path, d) { + return true + } + } + return false +} + +// DefaultSourceTree returns the /src directory of the first entry in $GOPATH. +// If $GOPATH is empty, it returns "./". Useful as a default output location. +func DefaultSourceTree() string { + paths := strings.Split(os.Getenv("GOPATH"), string(filepath.ListSeparator)) + if len(paths) > 0 && len(paths[0]) > 0 { + return filepath.Join(paths[0], "src") + } + return "./" +} + +// Execute implements main(). +// If you don't need any non-default behavior, use as: +// args.Default().Execute(...) +func (g *GeneratorArgs) Execute(nameSystems namer.NameSystems, defaultSystem string, pkgs func(*generator.Context, *GeneratorArgs) generator.Packages) error { + if g.defaultCommandLineFlags { + g.AddFlags(pflag.CommandLine) + pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) + pflag.Parse() + } + + b, err := g.NewBuilder() + if err != nil { + return fmt.Errorf("Failed making a parser: %v", err) + } + + // pass through the flag on whether to include *_test.go files + b.IncludeTestFiles = g.IncludeTestFiles + + c, err := generator.NewContext(b, nameSystems, defaultSystem) + if err != nil { + return fmt.Errorf("Failed making a context: %v", err) + } + + c.Verify = g.VerifyOnly + packages := pkgs(c, g) + if err := c.ExecutePackages(g.OutputBase, packages); err != nil { + return fmt.Errorf("Failed executing generator: %v", err) + } + + return nil +} diff --git a/vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go b/vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go new file mode 100644 index 0000000000..40f1306d5d --- /dev/null +++ b/vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go @@ -0,0 +1,924 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "io" + "path/filepath" + "sort" + "strings" + + "k8s.io/gengo/args" + "k8s.io/gengo/examples/set-gen/sets" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/klog" +) + +// CustomArgs is used tby the go2idl framework to pass args specific to this +// generator. +type CustomArgs struct { + BoundingDirs []string // Only deal with types rooted under these dirs. +} + +// This is the comment tag that carries parameters for deep-copy generation. +const ( + tagEnabledName = "k8s:deepcopy-gen" + interfacesTagName = tagEnabledName + ":interfaces" + interfacesNonPointerTagName = tagEnabledName + ":nonpointer-interfaces" // attach the DeepCopy methods to the +) + +// Known values for the comment tag. +const tagValuePackage = "package" + +// enabledTagValue holds parameters from a tagName tag. +type enabledTagValue struct { + value string + register bool +} + +func extractEnabledTypeTag(t *types.Type) *enabledTagValue { + comments := append(append([]string{}, t.SecondClosestCommentLines...), t.CommentLines...) + return extractEnabledTag(comments) +} + +func extractEnabledTag(comments []string) *enabledTagValue { + tagVals := types.ExtractCommentTags("+", comments)[tagEnabledName] + if tagVals == nil { + // No match for the tag. + return nil + } + // If there are multiple values, abort. + if len(tagVals) > 1 { + klog.Fatalf("Found %d %s tags: %q", len(tagVals), tagEnabledName, tagVals) + } + + // If we got here we are returning something. + tag := &enabledTagValue{} + + // Get the primary value. + parts := strings.Split(tagVals[0], ",") + if len(parts) >= 1 { + tag.value = parts[0] + } + + // Parse extra arguments. + parts = parts[1:] + for i := range parts { + kv := strings.SplitN(parts[i], "=", 2) + k := kv[0] + v := "" + if len(kv) == 2 { + v = kv[1] + } + switch k { + case "register": + if v != "false" { + tag.register = true + } + default: + klog.Fatalf("Unsupported %s param: %q", tagEnabledName, parts[i]) + } + } + return tag +} + +// TODO: This is created only to reduce number of changes in a single PR. +// Remove it and use PublicNamer instead. +func deepCopyNamer() *namer.NameStrategy { + return &namer.NameStrategy{ + Join: func(pre string, in []string, post string) string { + return strings.Join(in, "_") + }, + PrependPackageNames: 1, + } +} + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + return namer.NameSystems{ + "public": deepCopyNamer(), + "raw": namer.NewRawNamer("", nil), + } +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "public" +} + +func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + + inputs := sets.NewString(context.Inputs...) + packages := generator.Packages{} + header := append([]byte(fmt.Sprintf("// +build !%s\n\n", arguments.GeneratedBuildTag)), boilerplate...) + + boundingDirs := []string{} + if customArgs, ok := arguments.CustomArgs.(*CustomArgs); ok { + if customArgs.BoundingDirs == nil { + customArgs.BoundingDirs = context.Inputs + } + for i := range customArgs.BoundingDirs { + // Strip any trailing slashes - they are not exactly "correct" but + // this is friendlier. + boundingDirs = append(boundingDirs, strings.TrimRight(customArgs.BoundingDirs[i], "/")) + } + } + + for i := range inputs { + klog.V(5).Infof("Considering pkg %q", i) + pkg := context.Universe[i] + if pkg == nil { + // If the input had no Go files, for example. + continue + } + + ptag := extractEnabledTag(pkg.Comments) + ptagValue := "" + ptagRegister := false + if ptag != nil { + ptagValue = ptag.value + if ptagValue != tagValuePackage { + klog.Fatalf("Package %v: unsupported %s value: %q", i, tagEnabledName, ptagValue) + } + ptagRegister = ptag.register + klog.V(5).Infof(" tag.value: %q, tag.register: %t", ptagValue, ptagRegister) + } else { + klog.V(5).Infof(" no tag") + } + + // If the pkg-scoped tag says to generate, we can skip scanning types. + pkgNeedsGeneration := (ptagValue == tagValuePackage) + if !pkgNeedsGeneration { + // If the pkg-scoped tag did not exist, scan all types for one that + // explicitly wants generation. + for _, t := range pkg.Types { + klog.V(5).Infof(" considering type %q", t.Name.String()) + ttag := extractEnabledTypeTag(t) + if ttag != nil && ttag.value == "true" { + klog.V(5).Infof(" tag=true") + if !copyableType(t) { + klog.Fatalf("Type %v requests deepcopy generation but is not copyable", t) + } + pkgNeedsGeneration = true + break + } + } + } + + if pkgNeedsGeneration { + klog.V(3).Infof("Package %q needs generation", i) + path := pkg.Path + // if the source path is within a /vendor/ directory (for example, + // k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1), allow + // generation to output to the proper relative path (under vendor). + // Otherwise, the generator will create the file in the wrong location + // in the output directory. + // TODO: build a more fundamental concept in gengo for dealing with modifications + // to vendored packages. + if strings.HasPrefix(pkg.SourcePath, arguments.OutputBase) { + expandedPath := strings.TrimPrefix(pkg.SourcePath, arguments.OutputBase) + if strings.Contains(expandedPath, "/vendor/") { + path = expandedPath + } + } + packages = append(packages, + &generator.DefaultPackage{ + PackageName: strings.Split(filepath.Base(pkg.Path), ".")[0], + PackagePath: path, + HeaderText: header, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + return []generator.Generator{ + NewGenDeepCopy(arguments.OutputFileBaseName, pkg.Path, boundingDirs, (ptagValue == tagValuePackage), ptagRegister), + } + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + return t.Name.Package == pkg.Path + }, + }) + } + } + return packages +} + +// genDeepCopy produces a file with autogenerated deep-copy functions. +type genDeepCopy struct { + generator.DefaultGen + targetPackage string + boundingDirs []string + allTypes bool + registerTypes bool + imports namer.ImportTracker + typesForInit []*types.Type +} + +func NewGenDeepCopy(sanitizedName, targetPackage string, boundingDirs []string, allTypes, registerTypes bool) generator.Generator { + return &genDeepCopy{ + DefaultGen: generator.DefaultGen{ + OptionalName: sanitizedName, + }, + targetPackage: targetPackage, + boundingDirs: boundingDirs, + allTypes: allTypes, + registerTypes: registerTypes, + imports: generator.NewImportTracker(), + typesForInit: make([]*types.Type, 0), + } +} + +func (g *genDeepCopy) Namers(c *generator.Context) namer.NameSystems { + // Have the raw namer for this file track what it imports. + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.targetPackage, g.imports), + } +} + +func (g *genDeepCopy) Filter(c *generator.Context, t *types.Type) bool { + // Filter out types not being processed or not copyable within the package. + enabled := g.allTypes + if !enabled { + ttag := extractEnabledTypeTag(t) + if ttag != nil && ttag.value == "true" { + enabled = true + } + } + if !enabled { + return false + } + if !copyableType(t) { + klog.V(2).Infof("Type %v is not copyable", t) + return false + } + klog.V(4).Infof("Type %v is copyable", t) + g.typesForInit = append(g.typesForInit, t) + return true +} + +func (g *genDeepCopy) copyableAndInBounds(t *types.Type) bool { + if !copyableType(t) { + return false + } + // Only packages within the restricted range can be processed. + if !isRootedUnder(t.Name.Package, g.boundingDirs) { + return false + } + return true +} + +// deepCopyMethod returns the signature of a DeepCopy() method, nil or an error +// if the type does not match. This allows more efficient deep copy +// implementations to be defined by the type's author. The correct signature +// for a type T is: +// func (t T) DeepCopy() T +// or: +// func (t *T) DeepCopy() *T +func deepCopyMethod(t *types.Type) (*types.Signature, error) { + f, found := t.Methods["DeepCopy"] + if !found { + return nil, nil + } + if len(f.Signature.Parameters) != 0 { + return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected no parameters", t) + } + if len(f.Signature.Results) != 1 { + return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected exactly one result", t) + } + + ptrResult := f.Signature.Results[0].Kind == types.Pointer && f.Signature.Results[0].Elem.Name == t.Name + nonPtrResult := f.Signature.Results[0].Name == t.Name + + if !ptrResult && !nonPtrResult { + return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected to return %s or *%s", t, t.Name.Name, t.Name.Name) + } + + ptrRcvr := f.Signature.Receiver != nil && f.Signature.Receiver.Kind == types.Pointer && f.Signature.Receiver.Elem.Name == t.Name + nonPtrRcvr := f.Signature.Receiver != nil && f.Signature.Receiver.Name == t.Name + + if ptrRcvr && !ptrResult { + return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected a *%s result for a *%s receiver", t, t.Name.Name, t.Name.Name) + } + if nonPtrRcvr && !nonPtrResult { + return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected a %s result for a %s receiver", t, t.Name.Name, t.Name.Name) + } + + return f.Signature, nil +} + +// deepCopyMethodOrDie returns the signatrue of a DeepCopy method, nil or calls klog.Fatalf +// if the type does not match. +func deepCopyMethodOrDie(t *types.Type) *types.Signature { + ret, err := deepCopyMethod(t) + if err != nil { + klog.Fatal(err) + } + return ret +} + +// deepCopyIntoMethod returns the signature of a DeepCopyInto() method, nil or an error +// if the type is wrong. DeepCopyInto allows more efficient deep copy +// implementations to be defined by the type's author. The correct signature +// for a type T is: +// func (t T) DeepCopyInto(t *T) +// or: +// func (t *T) DeepCopyInto(t *T) +func deepCopyIntoMethod(t *types.Type) (*types.Signature, error) { + f, found := t.Methods["DeepCopyInto"] + if !found { + return nil, nil + } + if len(f.Signature.Parameters) != 1 { + return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected exactly one parameter", t) + } + if len(f.Signature.Results) != 0 { + return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected no result type", t) + } + + ptrParam := f.Signature.Parameters[0].Kind == types.Pointer && f.Signature.Parameters[0].Elem.Name == t.Name + + if !ptrParam { + return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected parameter of type *%s", t, t.Name.Name) + } + + ptrRcvr := f.Signature.Receiver != nil && f.Signature.Receiver.Kind == types.Pointer && f.Signature.Receiver.Elem.Name == t.Name + nonPtrRcvr := f.Signature.Receiver != nil && f.Signature.Receiver.Name == t.Name + + if !ptrRcvr && !nonPtrRcvr { + // this should never happen + return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected a receiver of type %s or *%s", t, t.Name.Name, t.Name.Name) + } + + return f.Signature, nil +} + +// deepCopyIntoMethodOrDie returns the signature of a DeepCopyInto() method, nil or calls klog.Fatalf +// if the type is wrong. +func deepCopyIntoMethodOrDie(t *types.Type) *types.Signature { + ret, err := deepCopyIntoMethod(t) + if err != nil { + klog.Fatal(err) + } + return ret +} + +func isRootedUnder(pkg string, roots []string) bool { + // Add trailing / to avoid false matches, e.g. foo/bar vs foo/barn. This + // assumes that bounding dirs do not have trailing slashes. + pkg = pkg + "/" + for _, root := range roots { + if strings.HasPrefix(pkg, root+"/") { + return true + } + } + return false +} + +func copyableType(t *types.Type) bool { + // If the type opts out of copy-generation, stop. + ttag := extractEnabledTypeTag(t) + if ttag != nil && ttag.value == "false" { + return false + } + + // Filter out private types. + if namer.IsPrivateGoName(t.Name.Name) { + return false + } + + if t.Kind == types.Alias { + // if the underlying built-in is not deepcopy-able, deepcopy is opt-in through definition of custom methods. + // Note that aliases of builtins, maps, slices can have deepcopy methods. + if deepCopyMethodOrDie(t) != nil || deepCopyIntoMethodOrDie(t) != nil { + return true + } else { + return t.Underlying.Kind != types.Builtin || copyableType(t.Underlying) + } + } + + if t.Kind != types.Struct { + return false + } + + return true +} + +func underlyingType(t *types.Type) *types.Type { + for t.Kind == types.Alias { + t = t.Underlying + } + return t +} + +func (g *genDeepCopy) isOtherPackage(pkg string) bool { + if pkg == g.targetPackage { + return false + } + if strings.HasSuffix(pkg, "\""+g.targetPackage+"\"") { + return false + } + return true +} + +func (g *genDeepCopy) Imports(c *generator.Context) (imports []string) { + importLines := []string{} + for _, singleImport := range g.imports.ImportLines() { + if g.isOtherPackage(singleImport) { + importLines = append(importLines, singleImport) + } + } + return importLines +} + +func argsFromType(ts ...*types.Type) generator.Args { + a := generator.Args{ + "type": ts[0], + } + for i, t := range ts { + a[fmt.Sprintf("type%d", i+1)] = t + } + return a +} + +func (g *genDeepCopy) Init(c *generator.Context, w io.Writer) error { + return nil +} + +func (g *genDeepCopy) needsGeneration(t *types.Type) bool { + tag := extractEnabledTypeTag(t) + tv := "" + if tag != nil { + tv = tag.value + if tv != "true" && tv != "false" { + klog.Fatalf("Type %v: unsupported %s value: %q", t, tagEnabledName, tag.value) + } + } + if g.allTypes && tv == "false" { + // The whole package is being generated, but this type has opted out. + klog.V(5).Infof("Not generating for type %v because type opted out", t) + return false + } + if !g.allTypes && tv != "true" { + // The whole package is NOT being generated, and this type has NOT opted in. + klog.V(5).Infof("Not generating for type %v because type did not opt in", t) + return false + } + return true +} + +func extractInterfacesTag(t *types.Type) []string { + var result []string + comments := append(append([]string{}, t.SecondClosestCommentLines...), t.CommentLines...) + values := types.ExtractCommentTags("+", comments)[interfacesTagName] + for _, v := range values { + if len(v) == 0 { + continue + } + intfs := strings.Split(v, ",") + for _, intf := range intfs { + if intf == "" { + continue + } + result = append(result, intf) + } + } + return result +} + +func extractNonPointerInterfaces(t *types.Type) (bool, error) { + comments := append(append([]string{}, t.SecondClosestCommentLines...), t.CommentLines...) + values := types.ExtractCommentTags("+", comments)[interfacesNonPointerTagName] + if len(values) == 0 { + return false, nil + } + result := values[0] == "true" + for _, v := range values { + if v == "true" != result { + return false, fmt.Errorf("contradicting %v value %q found to previous value %v", interfacesNonPointerTagName, v, result) + } + } + return result, nil +} + +func (g *genDeepCopy) deepCopyableInterfacesInner(c *generator.Context, t *types.Type) ([]*types.Type, error) { + if t.Kind != types.Struct { + return nil, nil + } + + intfs := extractInterfacesTag(t) + + var ts []*types.Type + for _, intf := range intfs { + t := types.ParseFullyQualifiedName(intf) + c.AddDir(t.Package) + intfT := c.Universe.Type(t) + if intfT == nil { + return nil, fmt.Errorf("unknown type %q in %s tag of type %s", intf, interfacesTagName, intfT) + } + if intfT.Kind != types.Interface { + return nil, fmt.Errorf("type %q in %s tag of type %s is not an interface, but: %q", intf, interfacesTagName, t, intfT.Kind) + } + g.imports.AddType(intfT) + ts = append(ts, intfT) + } + + return ts, nil +} + +// deepCopyableInterfaces returns the interface types to implement and whether they apply to a non-pointer receiver. +func (g *genDeepCopy) deepCopyableInterfaces(c *generator.Context, t *types.Type) ([]*types.Type, bool, error) { + ts, err := g.deepCopyableInterfacesInner(c, t) + if err != nil { + return nil, false, err + } + + set := map[string]*types.Type{} + for _, t := range ts { + set[t.String()] = t + } + + result := []*types.Type{} + for _, t := range set { + result = append(result, t) + } + + TypeSlice(result).Sort() // we need a stable sorting because it determines the order in generation + + nonPointerReceiver, err := extractNonPointerInterfaces(t) + if err != nil { + return nil, false, err + } + + return result, nonPointerReceiver, nil +} + +type TypeSlice []*types.Type + +func (s TypeSlice) Len() int { return len(s) } +func (s TypeSlice) Less(i, j int) bool { return s[i].String() < s[j].String() } +func (s TypeSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s TypeSlice) Sort() { sort.Sort(s) } + +func (g *genDeepCopy) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + if !g.needsGeneration(t) { + return nil + } + klog.V(5).Infof("Generating deepcopy function for type %v", t) + + sw := generator.NewSnippetWriter(w, c, "$", "$") + args := argsFromType(t) + + if deepCopyIntoMethodOrDie(t) == nil { + sw.Do("// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.\n", args) + if isReference(t) { + sw.Do("func (in $.type|raw$) DeepCopyInto(out *$.type|raw$) {\n", args) + sw.Do("{in:=&in\n", nil) + } else { + sw.Do("func (in *$.type|raw$) DeepCopyInto(out *$.type|raw$) {\n", args) + } + if deepCopyMethodOrDie(t) != nil { + if t.Methods["DeepCopy"].Signature.Receiver.Kind == types.Pointer { + sw.Do("clone := in.DeepCopy()\n", nil) + sw.Do("*out = *clone\n", nil) + } else { + sw.Do("*out = in.DeepCopy()\n", nil) + } + sw.Do("return\n", nil) + } else { + g.generateFor(t, sw) + sw.Do("return\n", nil) + } + if isReference(t) { + sw.Do("}\n", nil) + } + sw.Do("}\n\n", nil) + } + + if deepCopyMethodOrDie(t) == nil { + sw.Do("// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new $.type|raw$.\n", args) + if isReference(t) { + sw.Do("func (in $.type|raw$) DeepCopy() $.type|raw$ {\n", args) + } else { + sw.Do("func (in *$.type|raw$) DeepCopy() *$.type|raw$ {\n", args) + } + sw.Do("if in == nil { return nil }\n", nil) + sw.Do("out := new($.type|raw$)\n", args) + sw.Do("in.DeepCopyInto(out)\n", nil) + if isReference(t) { + sw.Do("return *out\n", nil) + } else { + sw.Do("return out\n", nil) + } + sw.Do("}\n\n", nil) + } + + intfs, nonPointerReceiver, err := g.deepCopyableInterfaces(c, t) + if err != nil { + return err + } + for _, intf := range intfs { + sw.Do(fmt.Sprintf("// DeepCopy%s is an autogenerated deepcopy function, copying the receiver, creating a new $.type2|raw$.\n", intf.Name.Name), argsFromType(t, intf)) + if nonPointerReceiver { + sw.Do(fmt.Sprintf("func (in $.type|raw$) DeepCopy%s() $.type2|raw$ {\n", intf.Name.Name), argsFromType(t, intf)) + sw.Do("return *in.DeepCopy()", nil) + sw.Do("}\n\n", nil) + } else { + sw.Do(fmt.Sprintf("func (in *$.type|raw$) DeepCopy%s() $.type2|raw$ {\n", intf.Name.Name), argsFromType(t, intf)) + sw.Do("if c := in.DeepCopy(); c != nil {\n", nil) + sw.Do("return c\n", nil) + sw.Do("}\n", nil) + sw.Do("return nil\n", nil) + sw.Do("}\n\n", nil) + } + } + + return sw.Error() +} + +// isReference return true for pointer, maps, slices and aliases of those. +func isReference(t *types.Type) bool { + if t.Kind == types.Pointer || t.Kind == types.Map || t.Kind == types.Slice { + return true + } + return t.Kind == types.Alias && isReference(underlyingType(t)) +} + +// we use the system of shadowing 'in' and 'out' so that the same code is valid +// at any nesting level. This makes the autogenerator easy to understand, and +// the compiler shouldn't care. +func (g *genDeepCopy) generateFor(t *types.Type, sw *generator.SnippetWriter) { + // derive inner types if t is an alias. We call the do* methods below with the alias type. + // basic rule: generate according to inner type, but construct objects with the alias type. + ut := underlyingType(t) + + var f func(*types.Type, *generator.SnippetWriter) + switch ut.Kind { + case types.Builtin: + f = g.doBuiltin + case types.Map: + f = g.doMap + case types.Slice: + f = g.doSlice + case types.Struct: + f = g.doStruct + case types.Pointer: + f = g.doPointer + case types.Interface: + // interfaces are handled in-line in the other cases + klog.Fatalf("Hit an interface type %v. This should never happen.", t) + case types.Alias: + // can never happen because we branch on the underlying type which is never an alias + klog.Fatalf("Hit an alias type %v. This should never happen.", t) + default: + klog.Fatalf("Hit an unsupported type %v.", t) + } + f(t, sw) +} + +// doBuiltin generates code for a builtin or an alias to a builtin. The generated code is +// is the same for both cases, i.e. it's the code for the underlying type. +func (g *genDeepCopy) doBuiltin(t *types.Type, sw *generator.SnippetWriter) { + if deepCopyMethodOrDie(t) != nil || deepCopyIntoMethodOrDie(t) != nil { + sw.Do("*out = in.DeepCopy()\n", nil) + return + } + + sw.Do("*out = *in\n", nil) +} + +// doMap generates code for a map or an alias to a map. The generated code is +// is the same for both cases, i.e. it's the code for the underlying type. +func (g *genDeepCopy) doMap(t *types.Type, sw *generator.SnippetWriter) { + ut := underlyingType(t) + uet := underlyingType(ut.Elem) + + if deepCopyMethodOrDie(t) != nil || deepCopyIntoMethodOrDie(t) != nil { + sw.Do("*out = in.DeepCopy()\n", nil) + return + } + + if !ut.Key.IsAssignable() { + klog.Fatalf("Hit an unsupported type %v for: %v", uet, t) + } + + sw.Do("*out = make($.|raw$, len(*in))\n", t) + sw.Do("for key, val := range *in {\n", nil) + dc, dci := deepCopyMethodOrDie(ut.Elem), deepCopyIntoMethodOrDie(ut.Elem) + switch { + case dc != nil || dci != nil: + // Note: a DeepCopy exists because it is added if DeepCopyInto is manually defined + leftPointer := ut.Elem.Kind == types.Pointer + rightPointer := !isReference(ut.Elem) + if dc != nil { + rightPointer = dc.Results[0].Kind == types.Pointer + } + if leftPointer == rightPointer { + sw.Do("(*out)[key] = val.DeepCopy()\n", nil) + } else if leftPointer { + sw.Do("x := val.DeepCopy()\n", nil) + sw.Do("(*out)[key] = &x\n", nil) + } else { + sw.Do("(*out)[key] = *val.DeepCopy()\n", nil) + } + case ut.Elem.IsAnonymousStruct(): // not uet here because it needs type cast + sw.Do("(*out)[key] = val\n", nil) + case uet.IsAssignable(): + sw.Do("(*out)[key] = val\n", nil) + case uet.Kind == types.Interface: + // Note: do not generate code that won't compile as `DeepCopyinterface{}()` is not a valid function + if uet.Name.Name == "interface{}" { + klog.Fatalf("DeepCopy of %q is unsupported. Instead, use named interfaces with DeepCopy as one of the methods.", uet.Name.Name) + } + sw.Do("if val == nil {(*out)[key]=nil} else {\n", nil) + // Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it + // as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang + // parser does not give us the underlying interface name. So we cannot do any better. + sw.Do(fmt.Sprintf("(*out)[key] = val.DeepCopy%s()\n", uet.Name.Name), nil) + sw.Do("}\n", nil) + case uet.Kind == types.Slice || uet.Kind == types.Map || uet.Kind == types.Pointer: + sw.Do("var outVal $.|raw$\n", uet) + sw.Do("if val == nil { (*out)[key] = nil } else {\n", nil) + sw.Do("in, out := &val, &outVal\n", uet) + g.generateFor(ut.Elem, sw) + sw.Do("}\n", nil) + sw.Do("(*out)[key] = outVal\n", nil) + case uet.Kind == types.Struct: + sw.Do("(*out)[key] = *val.DeepCopy()\n", uet) + default: + klog.Fatalf("Hit an unsupported type %v for %v", uet, t) + } + sw.Do("}\n", nil) +} + +// doSlice generates code for a slice or an alias to a slice. The generated code is +// is the same for both cases, i.e. it's the code for the underlying type. +func (g *genDeepCopy) doSlice(t *types.Type, sw *generator.SnippetWriter) { + ut := underlyingType(t) + uet := underlyingType(ut.Elem) + + if deepCopyMethodOrDie(t) != nil || deepCopyIntoMethodOrDie(t) != nil { + sw.Do("*out = in.DeepCopy()\n", nil) + return + } + + sw.Do("*out = make($.|raw$, len(*in))\n", t) + if deepCopyMethodOrDie(ut.Elem) != nil || deepCopyIntoMethodOrDie(ut.Elem) != nil { + sw.Do("for i := range *in {\n", nil) + // Note: a DeepCopyInto exists because it is added if DeepCopy is manually defined + sw.Do("(*in)[i].DeepCopyInto(&(*out)[i])\n", nil) + sw.Do("}\n", nil) + } else if uet.Kind == types.Builtin || uet.IsAssignable() { + sw.Do("copy(*out, *in)\n", nil) + } else { + sw.Do("for i := range *in {\n", nil) + if uet.Kind == types.Slice || uet.Kind == types.Map || uet.Kind == types.Pointer || deepCopyMethodOrDie(ut.Elem) != nil || deepCopyIntoMethodOrDie(ut.Elem) != nil { + sw.Do("if (*in)[i] != nil {\n", nil) + sw.Do("in, out := &(*in)[i], &(*out)[i]\n", nil) + g.generateFor(ut.Elem, sw) + sw.Do("}\n", nil) + } else if uet.Kind == types.Interface { + // Note: do not generate code that won't compile as `DeepCopyinterface{}()` is not a valid function + if uet.Name.Name == "interface{}" { + klog.Fatalf("DeepCopy of %q is unsupported. Instead, use named interfaces with DeepCopy as one of the methods.", uet.Name.Name) + } + sw.Do("if (*in)[i] != nil {\n", nil) + // Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it + // as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang + // parser does not give us the underlying interface name. So we cannot do any better. + sw.Do(fmt.Sprintf("(*out)[i] = (*in)[i].DeepCopy%s()\n", uet.Name.Name), nil) + sw.Do("}\n", nil) + } else if uet.Kind == types.Struct { + sw.Do("(*in)[i].DeepCopyInto(&(*out)[i])\n", nil) + } else { + klog.Fatalf("Hit an unsupported type %v for %v", uet, t) + } + sw.Do("}\n", nil) + } +} + +// doStruct generates code for a struct or an alias to a struct. The generated code is +// is the same for both cases, i.e. it's the code for the underlying type. +func (g *genDeepCopy) doStruct(t *types.Type, sw *generator.SnippetWriter) { + ut := underlyingType(t) + + if deepCopyMethodOrDie(t) != nil || deepCopyIntoMethodOrDie(t) != nil { + sw.Do("*out = in.DeepCopy()\n", nil) + return + } + + // Simple copy covers a lot of cases. + sw.Do("*out = *in\n", nil) + + // Now fix-up fields as needed. + for _, m := range ut.Members { + ft := m.Type + uft := underlyingType(ft) + + args := generator.Args{ + "type": ft, + "kind": ft.Kind, + "name": m.Name, + } + dc, dci := deepCopyMethodOrDie(ft), deepCopyIntoMethodOrDie(ft) + switch { + case dc != nil || dci != nil: + // Note: a DeepCopyInto exists because it is added if DeepCopy is manually defined + leftPointer := ft.Kind == types.Pointer + rightPointer := !isReference(ft) + if dc != nil { + rightPointer = dc.Results[0].Kind == types.Pointer + } + if leftPointer == rightPointer { + sw.Do("out.$.name$ = in.$.name$.DeepCopy()\n", args) + } else if leftPointer { + sw.Do("x := in.$.name$.DeepCopy()\n", args) + sw.Do("out.$.name$ = = &x\n", args) + } else { + sw.Do("in.$.name$.DeepCopyInto(&out.$.name$)\n", args) + } + case uft.Kind == types.Builtin: + // the initial *out = *in was enough + case uft.Kind == types.Map, uft.Kind == types.Slice, uft.Kind == types.Pointer: + // Fixup non-nil reference-semantic types. + sw.Do("if in.$.name$ != nil {\n", args) + sw.Do("in, out := &in.$.name$, &out.$.name$\n", args) + g.generateFor(ft, sw) + sw.Do("}\n", nil) + case uft.Kind == types.Struct: + if ft.IsAssignable() { + sw.Do("out.$.name$ = in.$.name$\n", args) + } else { + sw.Do("in.$.name$.DeepCopyInto(&out.$.name$)\n", args) + } + case uft.Kind == types.Interface: + // Note: do not generate code that won't compile as `DeepCopyinterface{}()` is not a valid function + if uft.Name.Name == "interface{}" { + klog.Fatalf("DeepCopy of %q is unsupported. Instead, use named interfaces with DeepCopy as one of the methods.", uft.Name.Name) + } + sw.Do("if in.$.name$ != nil {\n", args) + // Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it + // as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang + // parser does not give us the underlying interface name. So we cannot do any better. + sw.Do(fmt.Sprintf("out.$.name$ = in.$.name$.DeepCopy%s()\n", uft.Name.Name), args) + sw.Do("}\n", nil) + default: + klog.Fatalf("Hit an unsupported type %v for %v, from %v", uft, ft, t) + } + } +} + +// doPointer generates code for a pointer or an alias to a pointer. The generated code is +// is the same for both cases, i.e. it's the code for the underlying type. +func (g *genDeepCopy) doPointer(t *types.Type, sw *generator.SnippetWriter) { + ut := underlyingType(t) + uet := underlyingType(ut.Elem) + + dc, dci := deepCopyMethodOrDie(ut.Elem), deepCopyIntoMethodOrDie(ut.Elem) + switch { + case dc != nil || dci != nil: + rightPointer := !isReference(ut.Elem) + if dc != nil { + rightPointer = dc.Results[0].Kind == types.Pointer + } + if rightPointer { + sw.Do("*out = (*in).DeepCopy()\n", nil) + } else { + sw.Do("x := (*in).DeepCopy()\n", nil) + sw.Do("*out = &x\n", nil) + } + case uet.IsAssignable(): + sw.Do("*out = new($.Elem|raw$)\n", ut) + sw.Do("**out = **in", nil) + case uet.Kind == types.Map, uet.Kind == types.Slice, uet.Kind == types.Pointer: + sw.Do("*out = new($.Elem|raw$)\n", ut) + sw.Do("if **in != nil {\n", nil) + sw.Do("in, out := *in, *out\n", nil) + g.generateFor(uet, sw) + sw.Do("}\n", nil) + case uet.Kind == types.Struct: + sw.Do("*out = new($.Elem|raw$)\n", ut) + sw.Do("(*in).DeepCopyInto(*out)\n", nil) + default: + klog.Fatalf("Hit an unsupported type %v for %v", uet, t) + } +} diff --git a/vendor/k8s.io/gengo/examples/defaulter-gen/generators/defaulter.go b/vendor/k8s.io/gengo/examples/defaulter-gen/generators/defaulter.go new file mode 100644 index 0000000000..9ee7b79f50 --- /dev/null +++ b/vendor/k8s.io/gengo/examples/defaulter-gen/generators/defaulter.go @@ -0,0 +1,832 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "reflect" + "strings" + + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/klog" +) + +// CustomArgs is used tby the go2idl framework to pass args specific to this +// generator. +type CustomArgs struct { + ExtraPeerDirs []string // Always consider these as last-ditch possibilities for conversions. +} + +// These are the comment tags that carry parameters for defaulter generation. +const tagName = "k8s:defaulter-gen" +const intputTagName = "k8s:defaulter-gen-input" + +func extractTag(comments []string) []string { + return types.ExtractCommentTags("+", comments)[tagName] +} + +func extractInputTag(comments []string) []string { + return types.ExtractCommentTags("+", comments)[intputTagName] +} + +func checkTag(comments []string, require ...string) bool { + values := types.ExtractCommentTags("+", comments)[tagName] + if len(require) == 0 { + return len(values) == 1 && values[0] == "" + } + return reflect.DeepEqual(values, require) +} + +func defaultFnNamer() *namer.NameStrategy { + return &namer.NameStrategy{ + Prefix: "SetDefaults_", + Join: func(pre string, in []string, post string) string { + return pre + strings.Join(in, "_") + post + }, + } +} + +func objectDefaultFnNamer() *namer.NameStrategy { + return &namer.NameStrategy{ + Prefix: "SetObjectDefaults_", + Join: func(pre string, in []string, post string) string { + return pre + strings.Join(in, "_") + post + }, + } +} + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + return namer.NameSystems{ + "public": namer.NewPublicNamer(1), + "raw": namer.NewRawNamer("", nil), + "defaultfn": defaultFnNamer(), + "objectdefaultfn": objectDefaultFnNamer(), + } +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "public" +} + +// defaults holds the declared defaulting functions for a given type (all defaulting functions +// are expected to be func(1)) +type defaults struct { + // object is the defaulter function for a top level type (typically one with TypeMeta) that + // invokes all child defaulters. May be nil if the object defaulter has not yet been generated. + object *types.Type + // base is a defaulter function defined for a type SetDefaults_Pod which does not invoke all + // child defaults - the base defaulter alone is insufficient to default a type + base *types.Type + // additional is zero or more defaulter functions of the form SetDefaults_Pod_XXXX that can be + // included in the Object defaulter. + additional []*types.Type +} + +// All of the types in conversions map are of type "DeclarationOf" with +// the underlying type being "Func". +type defaulterFuncMap map[*types.Type]defaults + +// Returns all manually-defined defaulting functions in the package. +func getManualDefaultingFunctions(context *generator.Context, pkg *types.Package, manualMap defaulterFuncMap) { + buffer := &bytes.Buffer{} + sw := generator.NewSnippetWriter(buffer, context, "$", "$") + + for _, f := range pkg.Functions { + if f.Underlying == nil || f.Underlying.Kind != types.Func { + klog.Errorf("Malformed function: %#v", f) + continue + } + if f.Underlying.Signature == nil { + klog.Errorf("Function without signature: %#v", f) + continue + } + signature := f.Underlying.Signature + // Check whether the function is defaulting function. + // Note that all of them have signature: + // object: func SetObjectDefaults_inType(*inType) + // base: func SetDefaults_inType(*inType) + // additional: func SetDefaults_inType_Qualifier(*inType) + if signature.Receiver != nil { + continue + } + if len(signature.Parameters) != 1 { + continue + } + if len(signature.Results) != 0 { + continue + } + inType := signature.Parameters[0] + if inType.Kind != types.Pointer { + continue + } + // Check if this is the primary defaulter. + args := defaultingArgsFromType(inType.Elem) + sw.Do("$.inType|defaultfn$", args) + switch { + case f.Name.Name == buffer.String(): + key := inType.Elem + // We might scan the same package twice, and that's OK. + v, ok := manualMap[key] + if ok && v.base != nil && v.base.Name.Package != pkg.Path { + panic(fmt.Sprintf("duplicate static defaulter defined: %#v", key)) + } + v.base = f + manualMap[key] = v + klog.V(6).Infof("found base defaulter function for %s from %s", key.Name, f.Name) + // Is one of the additional defaulters - a top level defaulter on a type that is + // also invoked. + case strings.HasPrefix(f.Name.Name, buffer.String()+"_"): + key := inType.Elem + v, ok := manualMap[key] + if ok { + exists := false + for _, existing := range v.additional { + if existing.Name == f.Name { + exists = true + break + } + } + if exists { + continue + } + } + v.additional = append(v.additional, f) + manualMap[key] = v + klog.V(6).Infof("found additional defaulter function for %s from %s", key.Name, f.Name) + } + buffer.Reset() + sw.Do("$.inType|objectdefaultfn$", args) + if f.Name.Name == buffer.String() { + key := inType.Elem + // We might scan the same package twice, and that's OK. + v, ok := manualMap[key] + if ok && v.base != nil && v.base.Name.Package != pkg.Path { + panic(fmt.Sprintf("duplicate static defaulter defined: %#v", key)) + } + v.object = f + manualMap[key] = v + klog.V(6).Infof("found object defaulter function for %s from %s", key.Name, f.Name) + } + buffer.Reset() + } +} + +func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + + packages := generator.Packages{} + header := append([]byte(fmt.Sprintf("// +build !%s\n\n", arguments.GeneratedBuildTag)), boilerplate...) + + // Accumulate pre-existing default functions. + // TODO: This is too ad-hoc. We need a better way. + existingDefaulters := defaulterFuncMap{} + + buffer := &bytes.Buffer{} + sw := generator.NewSnippetWriter(buffer, context, "$", "$") + + // We are generating defaults only for packages that are explicitly + // passed as InputDir. + for _, i := range context.Inputs { + klog.V(5).Infof("considering pkg %q", i) + pkg := context.Universe[i] + if pkg == nil { + // If the input had no Go files, for example. + continue + } + // typesPkg is where the types that needs defaulter are defined. + // Sometimes it is different from pkg. For example, kubernetes core/v1 + // types are defined in vendor/k8s.io/api/core/v1, while pkg is at + // pkg/api/v1. + typesPkg := pkg + + // Add defaulting functions. + getManualDefaultingFunctions(context, pkg, existingDefaulters) + + var peerPkgs []string + if customArgs, ok := arguments.CustomArgs.(*CustomArgs); ok { + for _, pkg := range customArgs.ExtraPeerDirs { + if i := strings.Index(pkg, "/vendor/"); i != -1 { + pkg = pkg[i+len("/vendor/"):] + } + peerPkgs = append(peerPkgs, pkg) + } + } + // Make sure our peer-packages are added and fully parsed. + for _, pp := range peerPkgs { + context.AddDir(pp) + getManualDefaultingFunctions(context, context.Universe[pp], existingDefaulters) + } + + typesWith := extractTag(pkg.Comments) + shouldCreateObjectDefaulterFn := func(t *types.Type) bool { + if defaults, ok := existingDefaulters[t]; ok && defaults.object != nil { + // A default generator is defined + baseTypeName := "" + if defaults.base != nil { + baseTypeName = defaults.base.Name.String() + } + klog.V(5).Infof(" an object defaulter already exists as %s", baseTypeName) + return false + } + // opt-out + if checkTag(t.SecondClosestCommentLines, "false") { + return false + } + // opt-in + if checkTag(t.SecondClosestCommentLines, "true") { + return true + } + // For every k8s:defaulter-gen tag at the package level, interpret the value as a + // field name (like TypeMeta, ListMeta, ObjectMeta) and trigger defaulter generation + // for any type with any of the matching field names. Provides a more useful package + // level defaulting than global (because we only need defaulters on a subset of objects - + // usually those with TypeMeta). + if t.Kind == types.Struct && len(typesWith) > 0 { + for _, field := range t.Members { + for _, s := range typesWith { + if field.Name == s { + return true + } + } + } + } + return false + } + + // if the types are not in the same package where the defaulter functions to be generated + inputTags := extractInputTag(pkg.Comments) + if len(inputTags) > 1 { + panic(fmt.Sprintf("there could only be one input tag, got %#v", inputTags)) + } + if len(inputTags) == 1 { + var err error + typesPkg, err = context.AddDirectory(filepath.Join(pkg.Path, inputTags[0])) + if err != nil { + klog.Fatalf("cannot import package %s", inputTags[0]) + } + // update context.Order to the latest context.Universe + orderer := namer.Orderer{Namer: namer.NewPublicNamer(1)} + context.Order = orderer.OrderUniverse(context.Universe) + } + + newDefaulters := defaulterFuncMap{} + for _, t := range typesPkg.Types { + if !shouldCreateObjectDefaulterFn(t) { + continue + } + if namer.IsPrivateGoName(t.Name.Name) { + // We won't be able to convert to a private type. + klog.V(5).Infof(" found a type %v, but it is a private name", t) + continue + } + + // create a synthetic type we can use during generation + newDefaulters[t] = defaults{} + } + + // only generate defaulters for objects that actually have defined defaulters + // prevents empty defaulters from being registered + for { + promoted := 0 + for t, d := range newDefaulters { + if d.object != nil { + continue + } + if newCallTreeForType(existingDefaulters, newDefaulters).build(t, true) != nil { + args := defaultingArgsFromType(t) + sw.Do("$.inType|objectdefaultfn$", args) + newDefaulters[t] = defaults{ + object: &types.Type{ + Name: types.Name{ + Package: pkg.Path, + Name: buffer.String(), + }, + Kind: types.Func, + }, + } + buffer.Reset() + promoted++ + } + } + if promoted != 0 { + continue + } + + // prune any types that were not used + for t, d := range newDefaulters { + if d.object == nil { + klog.V(6).Infof("did not generate defaulter for %s because no child defaulters were registered", t.Name) + delete(newDefaulters, t) + } + } + break + } + + if len(newDefaulters) == 0 { + klog.V(5).Infof("no defaulters in package %s", pkg.Name) + } + + path := pkg.Path + // if the source path is within a /vendor/ directory (for example, + // k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1), allow + // generation to output to the proper relative path (under vendor). + // Otherwise, the generator will create the file in the wrong location + // in the output directory. + // TODO: build a more fundamental concept in gengo for dealing with modifications + // to vendored packages. + if strings.HasPrefix(pkg.SourcePath, arguments.OutputBase) { + expandedPath := strings.TrimPrefix(pkg.SourcePath, arguments.OutputBase) + if strings.Contains(expandedPath, "/vendor/") { + path = expandedPath + } + } + + packages = append(packages, + &generator.DefaultPackage{ + PackageName: filepath.Base(pkg.Path), + PackagePath: path, + HeaderText: header, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + return []generator.Generator{ + NewGenDefaulter(arguments.OutputFileBaseName, typesPkg.Path, pkg.Path, existingDefaulters, newDefaulters, peerPkgs), + } + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + return t.Name.Package == typesPkg.Path + }, + }) + } + return packages +} + +// callTreeForType contains fields necessary to build a tree for types. +type callTreeForType struct { + existingDefaulters defaulterFuncMap + newDefaulters defaulterFuncMap + currentlyBuildingTypes map[*types.Type]bool +} + +func newCallTreeForType(existingDefaulters, newDefaulters defaulterFuncMap) *callTreeForType { + return &callTreeForType{ + existingDefaulters: existingDefaulters, + newDefaulters: newDefaulters, + currentlyBuildingTypes: make(map[*types.Type]bool), + } +} + +// build creates a tree of paths to fields (based on how they would be accessed in Go - pointer, elem, +// slice, or key) and the functions that should be invoked on each field. An in-order traversal of the resulting tree +// can be used to generate a Go function that invokes each nested function on the appropriate type. The return +// value may be nil if there are no functions to call on type or the type is a primitive (Defaulters can only be +// invoked on structs today). When root is true this function will not use a newDefaulter. existingDefaulters should +// contain all defaulting functions by type defined in code - newDefaulters should contain all object defaulters +// that could be or will be generated. If newDefaulters has an entry for a type, but the 'object' field is nil, +// this function skips adding that defaulter - this allows us to avoid generating object defaulter functions for +// list types that call empty defaulters. +func (c *callTreeForType) build(t *types.Type, root bool) *callNode { + parent := &callNode{} + + if root { + // the root node is always a pointer + parent.elem = true + } + + defaults, _ := c.existingDefaulters[t] + newDefaults, generated := c.newDefaulters[t] + switch { + case !root && generated && newDefaults.object != nil: + parent.call = append(parent.call, newDefaults.object) + // if we will be generating the defaulter, it by definition is a covering + // defaulter, so we halt recursion + klog.V(6).Infof("the defaulter %s will be generated as an object defaulter", t.Name) + return parent + + case defaults.object != nil: + // object defaulters are always covering + parent.call = append(parent.call, defaults.object) + return parent + + case defaults.base != nil: + parent.call = append(parent.call, defaults.base) + // if the base function indicates it "covers" (it already includes defaulters) + // we can halt recursion + if checkTag(defaults.base.CommentLines, "covers") { + klog.V(6).Infof("the defaulter %s indicates it covers all sub generators", t.Name) + return parent + } + } + + // base has been added already, now add any additional defaulters defined for this object + parent.call = append(parent.call, defaults.additional...) + + // if the type already exists, don't build the tree for it and don't generate anything. + // This is used to avoid recursion for nested recursive types. + if c.currentlyBuildingTypes[t] { + return nil + } + // if type doesn't exist, mark it as existing + c.currentlyBuildingTypes[t] = true + + defer func() { + // The type will now acts as a parent, not a nested recursive type. + // We can now build the tree for it safely. + c.currentlyBuildingTypes[t] = false + }() + + switch t.Kind { + case types.Pointer: + if child := c.build(t.Elem, false); child != nil { + child.elem = true + parent.children = append(parent.children, *child) + } + case types.Slice, types.Array: + if child := c.build(t.Elem, false); child != nil { + child.index = true + if t.Elem.Kind == types.Pointer { + child.elem = true + } + parent.children = append(parent.children, *child) + } + case types.Map: + if child := c.build(t.Elem, false); child != nil { + child.key = true + parent.children = append(parent.children, *child) + } + case types.Struct: + for _, field := range t.Members { + name := field.Name + if len(name) == 0 { + if field.Type.Kind == types.Pointer { + name = field.Type.Elem.Name.Name + } else { + name = field.Type.Name.Name + } + } + if child := c.build(field.Type, false); child != nil { + child.field = name + parent.children = append(parent.children, *child) + } + } + case types.Alias: + if child := c.build(t.Underlying, false); child != nil { + parent.children = append(parent.children, *child) + } + } + if len(parent.children) == 0 && len(parent.call) == 0 { + //klog.V(6).Infof("decided type %s needs no generation", t.Name) + return nil + } + return parent +} + +const ( + runtimePackagePath = "k8s.io/apimachinery/pkg/runtime" + conversionPackagePath = "k8s.io/apimachinery/pkg/conversion" +) + +// genDefaulter produces a file with a autogenerated conversions. +type genDefaulter struct { + generator.DefaultGen + typesPackage string + outputPackage string + peerPackages []string + newDefaulters defaulterFuncMap + existingDefaulters defaulterFuncMap + imports namer.ImportTracker + typesForInit []*types.Type +} + +func NewGenDefaulter(sanitizedName, typesPackage, outputPackage string, existingDefaulters, newDefaulters defaulterFuncMap, peerPkgs []string) generator.Generator { + return &genDefaulter{ + DefaultGen: generator.DefaultGen{ + OptionalName: sanitizedName, + }, + typesPackage: typesPackage, + outputPackage: outputPackage, + peerPackages: peerPkgs, + newDefaulters: newDefaulters, + existingDefaulters: existingDefaulters, + imports: generator.NewImportTracker(), + typesForInit: make([]*types.Type, 0), + } +} + +func (g *genDefaulter) Namers(c *generator.Context) namer.NameSystems { + // Have the raw namer for this file track what it imports. + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *genDefaulter) isOtherPackage(pkg string) bool { + if pkg == g.outputPackage { + return false + } + if strings.HasSuffix(pkg, `"`+g.outputPackage+`"`) { + return false + } + return true +} + +func (g *genDefaulter) Filter(c *generator.Context, t *types.Type) bool { + defaults, ok := g.newDefaulters[t] + if !ok || defaults.object == nil { + return false + } + g.typesForInit = append(g.typesForInit, t) + return true +} + +func (g *genDefaulter) Imports(c *generator.Context) (imports []string) { + var importLines []string + for _, singleImport := range g.imports.ImportLines() { + if g.isOtherPackage(singleImport) { + importLines = append(importLines, singleImport) + } + } + return importLines +} + +func (g *genDefaulter) Init(c *generator.Context, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + + scheme := c.Universe.Type(types.Name{Package: runtimePackagePath, Name: "Scheme"}) + schemePtr := &types.Type{ + Kind: types.Pointer, + Elem: scheme, + } + sw.Do("// RegisterDefaults adds defaulters functions to the given scheme.\n", nil) + sw.Do("// Public to allow building arbitrary schemes.\n", nil) + sw.Do("// All generated defaulters are covering - they call all nested defaulters.\n", nil) + sw.Do("func RegisterDefaults(scheme $.|raw$) error {\n", schemePtr) + for _, t := range g.typesForInit { + args := defaultingArgsFromType(t) + sw.Do("scheme.AddTypeDefaultingFunc(&$.inType|raw${}, func(obj interface{}) { $.inType|objectdefaultfn$(obj.(*$.inType|raw$)) })\n", args) + } + sw.Do("return nil\n", nil) + sw.Do("}\n\n", nil) + return sw.Error() +} + +func (g *genDefaulter) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + if _, ok := g.newDefaulters[t]; !ok { + return nil + } + + klog.V(5).Infof("generating for type %v", t) + + callTree := newCallTreeForType(g.existingDefaulters, g.newDefaulters).build(t, true) + if callTree == nil { + klog.V(5).Infof(" no defaulters defined") + return nil + } + i := 0 + callTree.VisitInOrder(func(ancestors []*callNode, current *callNode) { + if len(current.call) == 0 { + return + } + path := callPath(append(ancestors, current)) + klog.V(5).Infof(" %d: %s", i, path) + i++ + }) + + sw := generator.NewSnippetWriter(w, c, "$", "$") + g.generateDefaulter(t, callTree, sw) + return sw.Error() +} + +func defaultingArgsFromType(inType *types.Type) generator.Args { + return generator.Args{ + "inType": inType, + } +} + +func (g *genDefaulter) generateDefaulter(inType *types.Type, callTree *callNode, sw *generator.SnippetWriter) { + sw.Do("func $.inType|objectdefaultfn$(in *$.inType|raw$) {\n", defaultingArgsFromType(inType)) + callTree.WriteMethod("in", 0, nil, sw) + sw.Do("}\n\n", nil) +} + +// callNode represents an entry in a tree of Go type accessors - the path from the root to a leaf represents +// how in Go code an access would be performed. For example, if a defaulting function exists on a container +// lifecycle hook, to invoke that defaulter correctly would require this Go code: +// +// for i := range pod.Spec.Containers { +// o := &pod.Spec.Containers[i] +// if o.LifecycleHook != nil { +// SetDefaults_LifecycleHook(o.LifecycleHook) +// } +// } +// +// That would be represented by a call tree like: +// +// callNode +// field: "Spec" +// children: +// - field: "Containers" +// children: +// - index: true +// children: +// - field: "LifecycleHook" +// elem: true +// call: +// - SetDefaults_LifecycleHook +// +// which we can traverse to build that Go struct (you must call the field Spec, then Containers, then range over +// that field, then check whether the LifecycleHook field is nil, before calling SetDefaults_LifecycleHook on +// the pointer to that field). +type callNode struct { + // field is the name of the Go member to access + field string + // key is true if this is a map and we must range over the key and values + key bool + // index is true if this is a slice and we must range over the slice values + index bool + // elem is true if the previous elements refer to a pointer (typically just field) + elem bool + + // call is all of the functions that must be invoked on this particular node, in order + call []*types.Type + // children is the child call nodes that must also be traversed + children []callNode +} + +// CallNodeVisitorFunc is a function for visiting a call tree. ancestors is the list of all parents +// of this node to the root of the tree - will be empty at the root. +type CallNodeVisitorFunc func(ancestors []*callNode, node *callNode) + +func (n *callNode) VisitInOrder(fn CallNodeVisitorFunc) { + n.visitInOrder(nil, fn) +} + +func (n *callNode) visitInOrder(ancestors []*callNode, fn CallNodeVisitorFunc) { + fn(ancestors, n) + ancestors = append(ancestors, n) + for i := range n.children { + n.children[i].visitInOrder(ancestors, fn) + } +} + +var ( + indexVariables = "ijklmnop" + localVariables = "abcdefgh" +) + +// varsForDepth creates temporary variables guaranteed to be unique within lexical Go scopes +// of this depth in a function. It uses canonical Go loop variables for the first 7 levels +// and then resorts to uglier prefixes. +func varsForDepth(depth int) (index, local string) { + if depth > len(indexVariables) { + index = fmt.Sprintf("i%d", depth) + } else { + index = indexVariables[depth : depth+1] + } + if depth > len(localVariables) { + local = fmt.Sprintf("local%d", depth) + } else { + local = localVariables[depth : depth+1] + } + return +} + +// writeCalls generates a list of function calls based on the calls field for the provided variable +// name and pointer. +func (n *callNode) writeCalls(varName string, isVarPointer bool, sw *generator.SnippetWriter) { + accessor := varName + if !isVarPointer { + accessor = "&" + accessor + } + for _, fn := range n.call { + sw.Do("$.fn|raw$($.var$)\n", generator.Args{ + "fn": fn, + "var": accessor, + }) + } +} + +// WriteMethod performs an in-order traversal of the calltree, generating loops and if blocks as necessary +// to correctly turn the call tree into a method body that invokes all calls on all child nodes of the call tree. +// Depth is used to generate local variables at the proper depth. +func (n *callNode) WriteMethod(varName string, depth int, ancestors []*callNode, sw *generator.SnippetWriter) { + // if len(n.call) > 0 { + // sw.Do(fmt.Sprintf("// %s\n", callPath(append(ancestors, n)).String()), nil) + // } + + if len(n.field) > 0 { + varName = varName + "." + n.field + } + + index, local := varsForDepth(depth) + vars := generator.Args{ + "index": index, + "local": local, + "var": varName, + } + + isPointer := n.elem && !n.index + if isPointer && len(ancestors) > 0 { + sw.Do("if $.var$ != nil {\n", vars) + } + + switch { + case n.index: + sw.Do("for $.index$ := range $.var$ {\n", vars) + if n.elem { + sw.Do("$.local$ := $.var$[$.index$]\n", vars) + } else { + sw.Do("$.local$ := &$.var$[$.index$]\n", vars) + } + + n.writeCalls(local, true, sw) + for i := range n.children { + n.children[i].WriteMethod(local, depth+1, append(ancestors, n), sw) + } + sw.Do("}\n", nil) + case n.key: + default: + n.writeCalls(varName, isPointer, sw) + for i := range n.children { + n.children[i].WriteMethod(varName, depth, append(ancestors, n), sw) + } + } + + if isPointer && len(ancestors) > 0 { + sw.Do("}\n", nil) + } +} + +type callPath []*callNode + +// String prints a representation of a callPath that roughly approximates what a Go accessor +// would look like. Used for debugging only. +func (path callPath) String() string { + if len(path) == 0 { + return "" + } + var parts []string + for _, p := range path { + last := len(parts) - 1 + switch { + case p.elem: + if len(parts) > 0 { + parts[last] = "*" + parts[last] + } else { + parts = append(parts, "*") + } + case p.index: + if len(parts) > 0 { + parts[last] = parts[last] + "[i]" + } else { + parts = append(parts, "[i]") + } + case p.key: + if len(parts) > 0 { + parts[last] = parts[last] + "[key]" + } else { + parts = append(parts, "[key]") + } + default: + if len(p.field) > 0 { + parts = append(parts, p.field) + } else { + parts = append(parts, "") + } + } + } + var calls []string + for _, fn := range path[len(path)-1].call { + calls = append(calls, fn.Name.String()) + } + if len(calls) == 0 { + calls = append(calls, "") + } + + return strings.Join(parts, ".") + " calls " + strings.Join(calls, ", ") +} diff --git a/vendor/k8s.io/gengo/examples/import-boss/generators/import_restrict.go b/vendor/k8s.io/gengo/examples/import-boss/generators/import_restrict.go new file mode 100644 index 0000000000..308bb6b8d2 --- /dev/null +++ b/vendor/k8s.io/gengo/examples/import-boss/generators/import_restrict.go @@ -0,0 +1,419 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package generators has the generators for the import-boss utility. +package generators + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + "sigs.k8s.io/yaml" + + "k8s.io/klog" +) + +const ( + goModFile = "go.mod" + importBossFileType = "import-boss" +) + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer("", nil), + } +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "raw" +} + +// Packages makes the import-boss package definition. +func Packages(c *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + pkgs := generator.Packages{} + c.FileTypes = map[string]generator.FileType{ + importBossFileType: importRuleFile{c}, + } + + for _, p := range c.Universe { + if !arguments.InputIncludes(p) { + // Don't run on e.g. third party dependencies. + continue + } + savedPackage := p + pkgs = append(pkgs, &generator.DefaultPackage{ + PackageName: p.Name, + PackagePath: p.Path, + Source: p.SourcePath, + // GeneratorFunc returns a list of generators. Each generator makes a + // single file. + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + return []generator.Generator{&importRules{ + myPackage: savedPackage, + }} + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + return false + }, + }) + } + + return pkgs +} + +// A single import restriction rule. +type Rule struct { + // All import paths that match this regexp... + SelectorRegexp string + // ... must have one of these prefixes ... + AllowedPrefixes []string + // ... and must not have one of these prefixes. + ForbiddenPrefixes []string +} + +type InverseRule struct { + Rule + // True if the rule is to be applied to transitive imports. + Transitive bool +} + +type fileFormat struct { + CurrentImports []string + + Rules []Rule + InverseRules []InverseRule + + path string +} + +func readFile(path string) (*fileFormat, error) { + currentBytes, err := ioutil.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("couldn't read %v: %v", path, err) + } + + var current fileFormat + err = yaml.Unmarshal(currentBytes, ¤t) + if err != nil { + return nil, fmt.Errorf("couldn't unmarshal %v: %v", path, err) + } + current.path = path + return ¤t, nil +} + +func writeFile(path string, ff *fileFormat) error { + raw, err := json.MarshalIndent(ff, "", "\t") + if err != nil { + return fmt.Errorf("couldn't format data for file %v.\n%#v", path, ff) + } + f, err := os.Create(path) + if err != nil { + return fmt.Errorf("couldn't open %v for writing: %v", path, err) + } + defer f.Close() + _, err = f.Write(raw) + return err +} + +// This does the actual checking, since it knows the literal destination file. +type importRuleFile struct { + context *generator.Context +} + +func (irf importRuleFile) AssembleFile(f *generator.File, path string) error { + return irf.VerifyFile(f, path) +} + +// TODO: make a flag to enable this, or expose this information in some other way. +func (importRuleFile) listEntireImportTree(f *generator.File, path string) error { + // If the file exists, populate its current imports. This is mostly to help + // humans figure out what they need to fix. + if _, err := os.Stat(path); err != nil { + // Ignore packages which haven't opted in by adding an .import-restrictions file. + return nil + } + + current, err := readFile(path) + if err != nil { + return err + } + + current.CurrentImports = []string{} + for v := range f.Imports { + current.CurrentImports = append(current.CurrentImports, v) + } + sort.Strings(current.CurrentImports) + + return writeFile(path, current) +} + +// removeLastDir removes the last directory, but leaves the file name +// unchanged. It returns the new path and the removed directory. So: +// "a/b/c/file" -> ("a/b/file", "c") +func removeLastDir(path string) (newPath, removedDir string) { + dir, file := filepath.Split(path) + dir = strings.TrimSuffix(dir, string(filepath.Separator)) + return filepath.Join(filepath.Dir(dir), file), filepath.Base(dir) +} + +// isGoModRoot checks if a directory is the root directory for a package +// by checking for the existence of a 'go.mod' file in that directory. +func isGoModRoot(path string) bool { + _, err := os.Stat(filepath.Join(filepath.Dir(path), goModFile)) + return err == nil +} + +// recursiveRead collects all '.import-restriction' files, between the current directory, +// and the package root when Go modules are enabled, or $GOPATH/src when they are not. +func recursiveRead(path string) ([]*fileFormat, error) { + restrictionFiles := make([]*fileFormat, 0) + + for { + if _, err := os.Stat(path); err == nil { + rules, err := readFile(path) + if err != nil { + return nil, err + } + + restrictionFiles = append(restrictionFiles, rules) + } + + nextPath, removedDir := removeLastDir(path) + if nextPath == path || isGoModRoot(path) || removedDir == "src" { + break + } + + path = nextPath + } + + return restrictionFiles, nil +} + +func (irf importRuleFile) VerifyFile(f *generator.File, path string) error { + restrictionFiles, err := recursiveRead(filepath.Join(f.PackageSourcePath, f.Name)) + if err != nil { + return fmt.Errorf("error finding rules file: %v", err) + } + + if err := irf.verifyRules(restrictionFiles, f); err != nil { + return err + } + + return irf.verifyInverseRules(restrictionFiles, f) +} + +func (irf importRuleFile) verifyRules(restrictionFiles []*fileFormat, f *generator.File) error { + selectors := make([][]*regexp.Regexp, len(restrictionFiles)) + for i, restrictionFile := range restrictionFiles { + for _, r := range restrictionFile.Rules { + re, err := regexp.Compile(r.SelectorRegexp) + if err != nil { + return fmt.Errorf("regexp `%s` in file %q doesn't compile: %v", r.SelectorRegexp, restrictionFile.path, err) + } + + selectors[i] = append(selectors[i], re) + } + } + + forbiddenImports := map[string]string{} + allowedMismatchedImports := []string{} + + for v := range f.Imports { + explicitlyAllowed := false + + NextRestrictionFiles: + for i, rules := range restrictionFiles { + for j, r := range rules.Rules { + matching := selectors[i][j].MatchString(v) + klog.V(5).Infof("Checking %v matches %v: %v\n", r.SelectorRegexp, v, matching) + if !matching { + continue + } + for _, forbidden := range r.ForbiddenPrefixes { + klog.V(4).Infof("Checking %v against %v\n", v, forbidden) + if strings.HasPrefix(v, forbidden) { + forbiddenImports[v] = forbidden + } + } + for _, allowed := range r.AllowedPrefixes { + klog.V(4).Infof("Checking %v against %v\n", v, allowed) + if strings.HasPrefix(v, allowed) { + explicitlyAllowed = true + break + } + } + + if !explicitlyAllowed { + allowedMismatchedImports = append(allowedMismatchedImports, v) + } else { + klog.V(2).Infof("%v importing %v allowed by %v\n", f.PackagePath, v, restrictionFiles[i].path) + break NextRestrictionFiles + } + } + } + } + + if len(forbiddenImports) > 0 || len(allowedMismatchedImports) > 0 { + var errorBuilder strings.Builder + for i, f := range forbiddenImports { + fmt.Fprintf(&errorBuilder, "import %v has forbidden prefix %v\n", i, f) + } + if len(allowedMismatchedImports) > 0 { + sort.Sort(sort.StringSlice(allowedMismatchedImports)) + fmt.Fprintf(&errorBuilder, "the following imports did not match any allowed prefix:\n") + for _, i := range allowedMismatchedImports { + fmt.Fprintf(&errorBuilder, " %v\n", i) + } + } + return errors.New(errorBuilder.String()) + } + + return nil +} + +// verifyInverseRules checks that all packages that import a package are allowed to import it. +func (irf importRuleFile) verifyInverseRules(restrictionFiles []*fileFormat, f *generator.File) error { + // compile all Selector regex in all restriction files + selectors := make([][]*regexp.Regexp, len(restrictionFiles)) + for i, restrictionFile := range restrictionFiles { + for _, r := range restrictionFile.InverseRules { + re, err := regexp.Compile(r.SelectorRegexp) + if err != nil { + return fmt.Errorf("regexp `%s` in file %q doesn't compile: %v", r.SelectorRegexp, restrictionFile.path, err) + } + + selectors[i] = append(selectors[i], re) + } + } + + directImport := map[string]bool{} + for _, imp := range irf.context.IncomingImports()[f.PackagePath] { + directImport[imp] = true + } + + forbiddenImports := map[string]string{} + allowedMismatchedImports := []string{} + + for _, v := range irf.context.TransitiveIncomingImports()[f.PackagePath] { + explicitlyAllowed := false + + NextRestrictionFiles: + for i, rules := range restrictionFiles { + for j, r := range rules.InverseRules { + if !r.Transitive && !directImport[v] { + continue + } + + re := selectors[i][j] + matching := re.MatchString(v) + klog.V(4).Infof("Checking %v matches %v (importing %v: %v\n", r.SelectorRegexp, v, f.PackagePath, matching) + if !matching { + continue + } + for _, forbidden := range r.ForbiddenPrefixes { + klog.V(4).Infof("Checking %v against %v\n", v, forbidden) + if strings.HasPrefix(v, forbidden) { + forbiddenImports[v] = forbidden + } + } + for _, allowed := range r.AllowedPrefixes { + klog.V(4).Infof("Checking %v against %v\n", v, allowed) + if strings.HasPrefix(v, allowed) { + explicitlyAllowed = true + break + } + } + if !explicitlyAllowed { + allowedMismatchedImports = append(allowedMismatchedImports, v) + } else { + klog.V(2).Infof("%v importing %v allowed by %v\n", v, f.PackagePath, restrictionFiles[i].path) + break NextRestrictionFiles + } + } + } + } + + if len(forbiddenImports) > 0 || len(allowedMismatchedImports) > 0 { + var errorBuilder strings.Builder + for i, f := range forbiddenImports { + fmt.Fprintf(&errorBuilder, "(inverse): import %v has forbidden prefix %v\n", i, f) + } + if len(allowedMismatchedImports) > 0 { + sort.Sort(sort.StringSlice(allowedMismatchedImports)) + fmt.Fprintf(&errorBuilder, "(inverse): the following imports did not match any allowed prefix:\n") + for _, i := range allowedMismatchedImports { + fmt.Fprintf(&errorBuilder, " %v\n", i) + } + } + return errors.New(errorBuilder.String()) + } + + return nil +} + +// importRules produces a file with a set for a single type. +type importRules struct { + myPackage *types.Package + imports namer.ImportTracker +} + +var ( + _ = generator.Generator(&importRules{}) + _ = generator.FileType(importRuleFile{}) +) + +func (r *importRules) Name() string { return "import rules" } +func (r *importRules) Filter(*generator.Context, *types.Type) bool { return false } +func (r *importRules) Namers(*generator.Context) namer.NameSystems { return nil } +func (r *importRules) PackageVars(*generator.Context) []string { return []string{} } +func (r *importRules) PackageConsts(*generator.Context) []string { return []string{} } +func (r *importRules) GenerateType(*generator.Context, *types.Type, io.Writer) error { return nil } +func (r *importRules) Filename() string { return ".import-restrictions" } +func (r *importRules) FileType() string { return importBossFileType } +func (r *importRules) Init(c *generator.Context, w io.Writer) error { return nil } +func (r *importRules) Finalize(*generator.Context, io.Writer) error { return nil } + +func dfsImports(dest *[]string, seen map[string]bool, p *types.Package) { + for _, p2 := range p.Imports { + if seen[p2.Path] { + continue + } + seen[p2.Path] = true + dfsImports(dest, seen, p2) + *dest = append(*dest, p2.Path) + } +} + +func (r *importRules) Imports(*generator.Context) []string { + all := []string{} + dfsImports(&all, map[string]bool{}, r.myPackage) + return all +} diff --git a/vendor/k8s.io/gengo/examples/set-gen/generators/sets.go b/vendor/k8s.io/gengo/examples/set-gen/generators/sets.go new file mode 100644 index 0000000000..8ddce7e3aa --- /dev/null +++ b/vendor/k8s.io/gengo/examples/set-gen/generators/sets.go @@ -0,0 +1,362 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package generators has the generators for the set-gen utility. +package generators + +import ( + "io" + + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/klog" +) + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + return namer.NameSystems{ + "public": namer.NewPublicNamer(0), + "private": namer.NewPrivateNamer(0), + "raw": namer.NewRawNamer("", nil), + } +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "public" +} + +// Packages makes the sets package definition. +func Packages(_ *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + + return generator.Packages{&generator.DefaultPackage{ + PackageName: "sets", + PackagePath: arguments.OutputPackagePath, + HeaderText: boilerplate, + PackageDocumentation: []byte( + `// Package sets has auto-generated set types. +`), + // GeneratorFunc returns a list of generators. Each generator makes a + // single file. + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = []generator.Generator{ + // Always generate a "doc.go" file. + generator.DefaultGen{OptionalName: "doc"}, + // Make a separate file for the Empty type, since it's shared by every type. + generator.DefaultGen{ + OptionalName: "empty", + OptionalBody: []byte(emptyTypeDecl), + }, + } + // Since we want a file per type that we generate a set for, we + // have to provide a function for this. + for _, t := range c.Order { + generators = append(generators, &genSet{ + DefaultGen: generator.DefaultGen{ + // Use the privatized version of the + // type name as the file name. + // + // TODO: make a namer that converts + // camelCase to '-' separation for file + // names? + OptionalName: c.Namers["private"].Name(t), + }, + outputPackage: arguments.OutputPackagePath, + typeToMatch: t, + imports: generator.NewImportTracker(), + }) + } + return generators + }, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + // It would be reasonable to filter by the type's package here. + // It might be necessary if your input directory has a big + // import graph. + switch t.Kind { + case types.Map, types.Slice, types.Pointer: + // These types can't be keys in a map. + return false + case types.Builtin: + return true + case types.Struct: + // Only some structs can be keys in a map. This is triggered by the line + // // +genset + // or + // // +genset=true + return extractBoolTagOrDie("genset", t.CommentLines) == true + } + return false + }, + }} +} + +// genSet produces a file with a set for a single type. +type genSet struct { + generator.DefaultGen + outputPackage string + typeToMatch *types.Type + imports namer.ImportTracker +} + +// Filter ignores all but one type because we're making a single file per type. +func (g *genSet) Filter(c *generator.Context, t *types.Type) bool { return t == g.typeToMatch } + +func (g *genSet) Namers(c *generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + } +} + +func (g *genSet) Imports(c *generator.Context) (imports []string) { + return append(g.imports.ImportLines(), "reflect", "sort") +} + +// args constructs arguments for templates. Usage: +// g.args(t, "key1", value1, "key2", value2, ...) +// +// 't' is loaded with the key 'type'. +// +// We could use t directly as the argument, but doing it this way makes it easy +// to mix in additional parameters. This feature is not used in this set +// generator, but is present as an example. +func (g *genSet) args(t *types.Type, kv ...interface{}) interface{} { + m := map[interface{}]interface{}{"type": t} + for i := 0; i < len(kv)/2; i++ { + m[kv[i*2]] = kv[i*2+1] + } + return m +} + +// GenerateType makes the body of a file implementing a set for type t. +func (g *genSet) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + sw.Do(setCode, g.args(t)) + sw.Do("func less$.type|public$(lhs, rhs $.type|raw$) bool {\n", g.args(t)) + g.lessBody(sw, t) + sw.Do("}\n", g.args(t)) + return sw.Error() +} + +func (g *genSet) lessBody(sw *generator.SnippetWriter, t *types.Type) { + // TODO: make this recursive, handle pointers and multiple nested structs... + switch t.Kind { + case types.Struct: + for _, m := range types.FlattenMembers(t.Members) { + sw.Do("if lhs.$.Name$ < rhs.$.Name$ { return true }\n", m) + sw.Do("if lhs.$.Name$ > rhs.$.Name$ { return false }\n", m) + } + sw.Do("return false\n", nil) + default: + sw.Do("return lhs < rhs\n", nil) + } +} + +// written to the "empty.go" file. +var emptyTypeDecl = ` +// Empty is public since it is used by some internal API objects for conversions between external +// string arrays and internal sets, and conversion logic requires public types today. +type Empty struct{} +` + +// Written for every type. If you've never used text/template before: +// $.type$ refers to the source type; |public means to +// call the function giving the public name, |raw the raw type name. +var setCode = `// sets.$.type|public$ is a set of $.type|raw$s, implemented via map[$.type|raw$]struct{} for minimal memory consumption. +type $.type|public$ map[$.type|raw$]Empty + +// New$.type|public$ creates a $.type|public$ from a list of values. +func New$.type|public$(items ...$.type|raw$) $.type|public$ { + ss := $.type|public${} + ss.Insert(items...) + return ss +} + +// $.type|public$KeySet creates a $.type|public$ from a keys of a map[$.type|raw$](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func $.type|public$KeySet(theMap interface{}) $.type|public$ { + v := reflect.ValueOf(theMap) + ret := $.type|public${} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().($.type|raw$)) + } + return ret +} + +// Insert adds items to the set. +func (s $.type|public$) Insert(items ...$.type|raw$) $.type|public$ { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s $.type|public$) Delete(items ...$.type|raw$) $.type|public$ { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s $.type|public$) Has(item $.type|raw$) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s $.type|public$) HasAll(items ...$.type|raw$) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s $.type|public$) HasAny(items ...$.type|raw$) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s $.type|public$) Difference(s2 $.type|public$) $.type|public$ { + result := New$.type|public$() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 $.type|public$) Union(s2 $.type|public$) $.type|public$ { + result := New$.type|public$() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 $.type|public$) Intersection(s2 $.type|public$) $.type|public$ { + var walk, other $.type|public$ + result := New$.type|public$() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 $.type|public$) IsSuperset(s2 $.type|public$) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 $.type|public$) Equal(s2 $.type|public$) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOf$.type|public$ []$.type|raw$ + +func (s sortableSliceOf$.type|public$) Len() int { return len(s) } +func (s sortableSliceOf$.type|public$) Less(i, j int) bool { return less$.type|public$(s[i], s[j]) } +func (s sortableSliceOf$.type|public$) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted $.type|raw$ slice. +func (s $.type|public$) List() []$.type|raw$ { + res := make(sortableSliceOf$.type|public$, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []$.type|raw$(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s $.type|public$) UnsortedList() []$.type|raw$ { + res :=make([]$.type|raw$, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s $.type|public$) PopAny() ($.type|raw$, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue $.type|raw$ + return zeroValue, false +} + +// Len returns the size of the set. +func (s $.type|public$) Len() int { + return len(s) +} + +` diff --git a/vendor/k8s.io/gengo/examples/set-gen/generators/tags.go b/vendor/k8s.io/gengo/examples/set-gen/generators/tags.go new file mode 100644 index 0000000000..bb3b4d2573 --- /dev/null +++ b/vendor/k8s.io/gengo/examples/set-gen/generators/tags.go @@ -0,0 +1,33 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "k8s.io/gengo/types" + "k8s.io/klog" +) + +// extractBoolTagOrDie gets the comment-tags for the key and asserts that, if +// it exists, the value is boolean. If the tag did not exist, it returns +// false. +func extractBoolTagOrDie(key string, lines []string) bool { + val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines) + if err != nil { + klog.Fatalf(err.Error()) + } + return val +} diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/byte.go b/vendor/k8s.io/gengo/examples/set-gen/sets/byte.go new file mode 100644 index 0000000000..9bfa85d43d --- /dev/null +++ b/vendor/k8s.io/gengo/examples/set-gen/sets/byte.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption. +type Byte map[byte]Empty + +// NewByte creates a Byte from a list of values. +func NewByte(items ...byte) Byte { + ss := Byte{} + ss.Insert(items...) + return ss +} + +// ByteKeySet creates a Byte from a keys of a map[byte](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func ByteKeySet(theMap interface{}) Byte { + v := reflect.ValueOf(theMap) + ret := Byte{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(byte)) + } + return ret +} + +// Insert adds items to the set. +func (s Byte) Insert(items ...byte) Byte { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s Byte) Delete(items ...byte) Byte { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Byte) Has(item byte) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Byte) HasAll(items ...byte) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Byte) HasAny(items ...byte) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Byte) Difference(s2 Byte) Byte { + result := NewByte() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Byte) Union(s2 Byte) Byte { + result := NewByte() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Byte) Intersection(s2 Byte) Byte { + var walk, other Byte + result := NewByte() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Byte) IsSuperset(s2 Byte) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Byte) Equal(s2 Byte) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfByte []byte + +func (s sortableSliceOfByte) Len() int { return len(s) } +func (s sortableSliceOfByte) Less(i, j int) bool { return lessByte(s[i], s[j]) } +func (s sortableSliceOfByte) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted byte slice. +func (s Byte) List() []byte { + res := make(sortableSliceOfByte, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []byte(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Byte) UnsortedList() []byte { + res := make([]byte, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Byte) PopAny() (byte, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue byte + return zeroValue, false +} + +// Len returns the size of the set. +func (s Byte) Len() int { + return len(s) +} + +func lessByte(lhs, rhs byte) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/doc.go b/vendor/k8s.io/gengo/examples/set-gen/sets/doc.go new file mode 100644 index 0000000000..b152a0bf00 --- /dev/null +++ b/vendor/k8s.io/gengo/examples/set-gen/sets/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +// Package sets has auto-generated set types. +package sets diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/empty.go b/vendor/k8s.io/gengo/examples/set-gen/sets/empty.go new file mode 100644 index 0000000000..e11e622c5b --- /dev/null +++ b/vendor/k8s.io/gengo/examples/set-gen/sets/empty.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +// Empty is public since it is used by some internal API objects for conversions between external +// string arrays and internal sets, and conversion logic requires public types today. +type Empty struct{} diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/int.go b/vendor/k8s.io/gengo/examples/set-gen/sets/int.go new file mode 100644 index 0000000000..88bd709679 --- /dev/null +++ b/vendor/k8s.io/gengo/examples/set-gen/sets/int.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption. +type Int map[int]Empty + +// NewInt creates a Int from a list of values. +func NewInt(items ...int) Int { + ss := Int{} + ss.Insert(items...) + return ss +} + +// IntKeySet creates a Int from a keys of a map[int](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func IntKeySet(theMap interface{}) Int { + v := reflect.ValueOf(theMap) + ret := Int{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int)) + } + return ret +} + +// Insert adds items to the set. +func (s Int) Insert(items ...int) Int { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s Int) Delete(items ...int) Int { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Int) Has(item int) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int) HasAll(items ...int) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int) HasAny(items ...int) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int) Difference(s2 Int) Int { + result := NewInt() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int) Union(s2 Int) Int { + result := NewInt() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int) Intersection(s2 Int) Int { + var walk, other Int + result := NewInt() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int) IsSuperset(s2 Int) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int) Equal(s2 Int) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt []int + +func (s sortableSliceOfInt) Len() int { return len(s) } +func (s sortableSliceOfInt) Less(i, j int) bool { return lessInt(s[i], s[j]) } +func (s sortableSliceOfInt) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int slice. +func (s Int) List() []int { + res := make(sortableSliceOfInt, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int) UnsortedList() []int { + res := make([]int, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int) PopAny() (int, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int) Len() int { + return len(s) +} + +func lessInt(lhs, rhs int) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/int64.go b/vendor/k8s.io/gengo/examples/set-gen/sets/int64.go new file mode 100644 index 0000000000..b375a1b065 --- /dev/null +++ b/vendor/k8s.io/gengo/examples/set-gen/sets/int64.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption. +type Int64 map[int64]Empty + +// NewInt64 creates a Int64 from a list of values. +func NewInt64(items ...int64) Int64 { + ss := Int64{} + ss.Insert(items...) + return ss +} + +// Int64KeySet creates a Int64 from a keys of a map[int64](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func Int64KeySet(theMap interface{}) Int64 { + v := reflect.ValueOf(theMap) + ret := Int64{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int64)) + } + return ret +} + +// Insert adds items to the set. +func (s Int64) Insert(items ...int64) Int64 { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s Int64) Delete(items ...int64) Int64 { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Int64) Has(item int64) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int64) HasAll(items ...int64) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int64) HasAny(items ...int64) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int64) Difference(s2 Int64) Int64 { + result := NewInt64() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int64) Union(s2 Int64) Int64 { + result := NewInt64() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int64) Intersection(s2 Int64) Int64 { + var walk, other Int64 + result := NewInt64() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int64) IsSuperset(s2 Int64) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int64) Equal(s2 Int64) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt64 []int64 + +func (s sortableSliceOfInt64) Len() int { return len(s) } +func (s sortableSliceOfInt64) Less(i, j int) bool { return lessInt64(s[i], s[j]) } +func (s sortableSliceOfInt64) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int64 slice. +func (s Int64) List() []int64 { + res := make(sortableSliceOfInt64, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int64(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int64) UnsortedList() []int64 { + res := make([]int64, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int64) PopAny() (int64, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int64 + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int64) Len() int { + return len(s) +} + +func lessInt64(lhs, rhs int64) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/string.go b/vendor/k8s.io/gengo/examples/set-gen/sets/string.go new file mode 100644 index 0000000000..e6f37db887 --- /dev/null +++ b/vendor/k8s.io/gengo/examples/set-gen/sets/string.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. +type String map[string]Empty + +// NewString creates a String from a list of values. +func NewString(items ...string) String { + ss := String{} + ss.Insert(items...) + return ss +} + +// StringKeySet creates a String from a keys of a map[string](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func StringKeySet(theMap interface{}) String { + v := reflect.ValueOf(theMap) + ret := String{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(string)) + } + return ret +} + +// Insert adds items to the set. +func (s String) Insert(items ...string) String { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s String) Delete(items ...string) String { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s String) Has(item string) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s String) HasAll(items ...string) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s String) HasAny(items ...string) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s String) Difference(s2 String) String { + result := NewString() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 String) Union(s2 String) String { + result := NewString() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 String) Intersection(s2 String) String { + var walk, other String + result := NewString() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 String) IsSuperset(s2 String) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 String) Equal(s2 String) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfString []string + +func (s sortableSliceOfString) Len() int { return len(s) } +func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) } +func (s sortableSliceOfString) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted string slice. +func (s String) List() []string { + res := make(sortableSliceOfString, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []string(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s String) UnsortedList() []string { + res := make([]string, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s String) PopAny() (string, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue string + return zeroValue, false +} + +// Len returns the size of the set. +func (s String) Len() int { + return len(s) +} + +func lessString(lhs, rhs string) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/gengo/generator/default_generator.go b/vendor/k8s.io/gengo/generator/default_generator.go new file mode 100644 index 0000000000..f947668214 --- /dev/null +++ b/vendor/k8s.io/gengo/generator/default_generator.go @@ -0,0 +1,62 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generator + +import ( + "io" + + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +const ( + GolangFileType = "golang" +) + +// DefaultGen implements a do-nothing Generator. +// +// It can be used to implement static content files. +type DefaultGen struct { + // OptionalName, if present, will be used for the generator's name, and + // the filename (with ".go" appended). + OptionalName string + + // OptionalBody, if present, will be used as the return from the "Init" + // method. This causes it to be static content for the entire file if + // no other generator touches the file. + OptionalBody []byte +} + +func (d DefaultGen) Name() string { return d.OptionalName } +func (d DefaultGen) Filter(*Context, *types.Type) bool { return true } +func (d DefaultGen) Namers(*Context) namer.NameSystems { return nil } +func (d DefaultGen) Imports(*Context) []string { return []string{} } +func (d DefaultGen) PackageVars(*Context) []string { return []string{} } +func (d DefaultGen) PackageConsts(*Context) []string { return []string{} } +func (d DefaultGen) GenerateType(*Context, *types.Type, io.Writer) error { return nil } +func (d DefaultGen) Filename() string { return d.OptionalName + ".go" } +func (d DefaultGen) FileType() string { return GolangFileType } +func (d DefaultGen) Finalize(*Context, io.Writer) error { return nil } + +func (d DefaultGen) Init(c *Context, w io.Writer) error { + _, err := w.Write(d.OptionalBody) + return err +} + +var ( + _ = Generator(DefaultGen{}) +) diff --git a/vendor/k8s.io/gengo/generator/default_package.go b/vendor/k8s.io/gengo/generator/default_package.go new file mode 100644 index 0000000000..dcf0883235 --- /dev/null +++ b/vendor/k8s.io/gengo/generator/default_package.go @@ -0,0 +1,75 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generator + +import ( + "k8s.io/gengo/types" +) + +// DefaultPackage contains a default implementation of Package. +type DefaultPackage struct { + // Short name of package, used in the "package xxxx" line. + PackageName string + // Import path of the package, and the location on disk of the package. + PackagePath string + // The location of the package on disk. + Source string + + // Emitted at the top of every file. + HeaderText []byte + + // Emitted only for a "doc.go" file; appended to the HeaderText for + // that file. + PackageDocumentation []byte + + // If non-nil, will be called on "Generators"; otherwise, the static + // list will be used. So you should set only one of these two fields. + GeneratorFunc func(*Context) []Generator + GeneratorList []Generator + + // Optional; filters the types exposed to the generators. + FilterFunc func(*Context, *types.Type) bool +} + +func (d *DefaultPackage) Name() string { return d.PackageName } +func (d *DefaultPackage) Path() string { return d.PackagePath } +func (d *DefaultPackage) SourcePath() string { return d.Source } + +func (d *DefaultPackage) Filter(c *Context, t *types.Type) bool { + if d.FilterFunc != nil { + return d.FilterFunc(c, t) + } + return true +} + +func (d *DefaultPackage) Generators(c *Context) []Generator { + if d.GeneratorFunc != nil { + return d.GeneratorFunc(c) + } + return d.GeneratorList +} + +func (d *DefaultPackage) Header(filename string) []byte { + if filename == "doc.go" { + return append(d.HeaderText, d.PackageDocumentation...) + } + return d.HeaderText +} + +var ( + _ = Package(&DefaultPackage{}) +) diff --git a/vendor/k8s.io/gengo/generator/doc.go b/vendor/k8s.io/gengo/generator/doc.go new file mode 100644 index 0000000000..d8e12534a4 --- /dev/null +++ b/vendor/k8s.io/gengo/generator/doc.go @@ -0,0 +1,31 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package generator defines an interface for code generators to implement. +// +// To use this package, you'll implement the "Package" and "Generator" +// interfaces; you'll call NewContext to load up the types you want to work +// with, and then you'll call one or more of the Execute methods. See the +// interface definitions for explanations. All output will have gofmt called on +// it automatically, so you do not need to worry about generating correct +// indentation. +// +// This package also exposes SnippetWriter. SnippetWriter reduces to a minimum +// the boilerplate involved in setting up a template from go's text/template +// package. Additionally, all naming systems in the Context will be added as +// functions to the parsed template, so that they can be called directly from +// your templates! +package generator // import "k8s.io/gengo/generator" diff --git a/vendor/k8s.io/gengo/generator/error_tracker.go b/vendor/k8s.io/gengo/generator/error_tracker.go new file mode 100644 index 0000000000..964dae37ba --- /dev/null +++ b/vendor/k8s.io/gengo/generator/error_tracker.go @@ -0,0 +1,50 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generator + +import ( + "io" +) + +// ErrorTracker tracks errors to the underlying writer, so that you can ignore +// them until you're ready to return. +type ErrorTracker struct { + io.Writer + err error +} + +// NewErrorTracker makes a new error tracker; note that it implements io.Writer. +func NewErrorTracker(w io.Writer) *ErrorTracker { + return &ErrorTracker{Writer: w} +} + +// Write intercepts calls to Write. +func (et *ErrorTracker) Write(p []byte) (n int, err error) { + if et.err != nil { + return 0, et.err + } + n, err = et.Writer.Write(p) + if err != nil { + et.err = err + } + return n, err +} + +// Error returns nil if no error has occurred, otherwise it returns the error. +func (et *ErrorTracker) Error() error { + return et.err +} diff --git a/vendor/k8s.io/gengo/generator/execute.go b/vendor/k8s.io/gengo/generator/execute.go new file mode 100644 index 0000000000..d1b12258c7 --- /dev/null +++ b/vendor/k8s.io/gengo/generator/execute.go @@ -0,0 +1,314 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generator + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "golang.org/x/tools/imports" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + + "k8s.io/klog" +) + +func errs2strings(errors []error) []string { + strs := make([]string, len(errors)) + for i := range errors { + strs[i] = errors[i].Error() + } + return strs +} + +// ExecutePackages runs the generators for every package in 'packages'. 'outDir' +// is the base directory in which to place all the generated packages; it +// should be a physical path on disk, not an import path. e.g.: +// /path/to/home/path/to/gopath/src/ +// Each package has its import path already, this will be appended to 'outDir'. +func (c *Context) ExecutePackages(outDir string, packages Packages) error { + var errors []error + for _, p := range packages { + if err := c.ExecutePackage(outDir, p); err != nil { + errors = append(errors, err) + } + } + if len(errors) > 0 { + return fmt.Errorf("some packages had errors:\n%v\n", strings.Join(errs2strings(errors), "\n")) + } + return nil +} + +type DefaultFileType struct { + Format func([]byte) ([]byte, error) + Assemble func(io.Writer, *File) +} + +func (ft DefaultFileType) AssembleFile(f *File, pathname string) error { + klog.V(2).Infof("Assembling file %q", pathname) + destFile, err := os.Create(pathname) + if err != nil { + return err + } + defer destFile.Close() + + b := &bytes.Buffer{} + et := NewErrorTracker(b) + ft.Assemble(et, f) + if et.Error() != nil { + return et.Error() + } + if formatted, err := ft.Format(b.Bytes()); err != nil { + err = fmt.Errorf("unable to format file %q (%v).", pathname, err) + // Write the file anyway, so they can see what's going wrong and fix the generator. + if _, err2 := destFile.Write(b.Bytes()); err2 != nil { + return err2 + } + return err + } else { + _, err = destFile.Write(formatted) + return err + } +} + +func (ft DefaultFileType) VerifyFile(f *File, pathname string) error { + klog.V(2).Infof("Verifying file %q", pathname) + friendlyName := filepath.Join(f.PackageName, f.Name) + b := &bytes.Buffer{} + et := NewErrorTracker(b) + ft.Assemble(et, f) + if et.Error() != nil { + return et.Error() + } + formatted, err := ft.Format(b.Bytes()) + if err != nil { + return fmt.Errorf("unable to format the output for %q: %v", friendlyName, err) + } + existing, err := ioutil.ReadFile(pathname) + if err != nil { + return fmt.Errorf("unable to read file %q for comparison: %v", friendlyName, err) + } + if bytes.Compare(formatted, existing) == 0 { + return nil + } + // Be nice and find the first place where they differ + i := 0 + for i < len(formatted) && i < len(existing) && formatted[i] == existing[i] { + i++ + } + eDiff, fDiff := existing[i:], formatted[i:] + if len(eDiff) > 100 { + eDiff = eDiff[:100] + } + if len(fDiff) > 100 { + fDiff = fDiff[:100] + } + return fmt.Errorf("output for %q differs; first existing/expected diff: \n %q\n %q", friendlyName, string(eDiff), string(fDiff)) +} + +func assembleGolangFile(w io.Writer, f *File) { + w.Write(f.Header) + fmt.Fprintf(w, "package %v\n\n", f.PackageName) + + if len(f.Imports) > 0 { + fmt.Fprint(w, "import (\n") + for i := range f.Imports { + if strings.Contains(i, "\"") { + // they included quotes, or are using the + // `name "path/to/pkg"` format. + fmt.Fprintf(w, "\t%s\n", i) + } else { + fmt.Fprintf(w, "\t%q\n", i) + } + } + fmt.Fprint(w, ")\n\n") + } + + if f.Vars.Len() > 0 { + fmt.Fprint(w, "var (\n") + w.Write(f.Vars.Bytes()) + fmt.Fprint(w, ")\n\n") + } + + if f.Consts.Len() > 0 { + fmt.Fprint(w, "const (\n") + w.Write(f.Consts.Bytes()) + fmt.Fprint(w, ")\n\n") + } + + w.Write(f.Body.Bytes()) +} + +func importsWrapper(src []byte) ([]byte, error) { + return imports.Process("", src, nil) +} + +func NewGolangFile() *DefaultFileType { + return &DefaultFileType{ + Format: importsWrapper, + Assemble: assembleGolangFile, + } +} + +// format should be one line only, and not end with \n. +func addIndentHeaderComment(b *bytes.Buffer, format string, args ...interface{}) { + if b.Len() > 0 { + fmt.Fprintf(b, "\n// "+format+"\n", args...) + } else { + fmt.Fprintf(b, "// "+format+"\n", args...) + } +} + +func (c *Context) filteredBy(f func(*Context, *types.Type) bool) *Context { + c2 := *c + c2.Order = []*types.Type{} + for _, t := range c.Order { + if f(c, t) { + c2.Order = append(c2.Order, t) + } + } + return &c2 +} + +// make a new context; inheret c.Namers, but add on 'namers'. In case of a name +// collision, the namer in 'namers' wins. +func (c *Context) addNameSystems(namers namer.NameSystems) *Context { + if namers == nil { + return c + } + c2 := *c + // Copy the existing name systems so we don't corrupt a parent context + c2.Namers = namer.NameSystems{} + for k, v := range c.Namers { + c2.Namers[k] = v + } + + for name, namer := range namers { + c2.Namers[name] = namer + } + return &c2 +} + +// ExecutePackage executes a single package. 'outDir' is the base directory in +// which to place the package; it should be a physical path on disk, not an +// import path. e.g.: '/path/to/home/path/to/gopath/src/' The package knows its +// import path already, this will be appended to 'outDir'. +func (c *Context) ExecutePackage(outDir string, p Package) error { + path := filepath.Join(outDir, p.Path()) + klog.V(2).Infof("Processing package %q, disk location %q", p.Name(), path) + // Filter out any types the *package* doesn't care about. + packageContext := c.filteredBy(p.Filter) + os.MkdirAll(path, 0755) + files := map[string]*File{} + for _, g := range p.Generators(packageContext) { + // Filter out types the *generator* doesn't care about. + genContext := packageContext.filteredBy(g.Filter) + // Now add any extra name systems defined by this generator + genContext = genContext.addNameSystems(g.Namers(genContext)) + + fileType := g.FileType() + if len(fileType) == 0 { + return fmt.Errorf("generator %q must specify a file type", g.Name()) + } + f := files[g.Filename()] + if f == nil { + // This is the first generator to reference this file, so start it. + f = &File{ + Name: g.Filename(), + FileType: fileType, + PackageName: p.Name(), + PackagePath: p.Path(), + PackageSourcePath: p.SourcePath(), + Header: p.Header(g.Filename()), + Imports: map[string]struct{}{}, + } + files[f.Name] = f + } else { + if f.FileType != g.FileType() { + return fmt.Errorf("file %q already has type %q, but generator %q wants to use type %q", f.Name, f.FileType, g.Name(), g.FileType()) + } + } + + if vars := g.PackageVars(genContext); len(vars) > 0 { + addIndentHeaderComment(&f.Vars, "Package-wide variables from generator %q.", g.Name()) + for _, v := range vars { + if _, err := fmt.Fprintf(&f.Vars, "%s\n", v); err != nil { + return err + } + } + } + if consts := g.PackageConsts(genContext); len(consts) > 0 { + addIndentHeaderComment(&f.Consts, "Package-wide consts from generator %q.", g.Name()) + for _, v := range consts { + if _, err := fmt.Fprintf(&f.Consts, "%s\n", v); err != nil { + return err + } + } + } + if err := genContext.executeBody(&f.Body, g); err != nil { + return err + } + if imports := g.Imports(genContext); len(imports) > 0 { + for _, i := range imports { + f.Imports[i] = struct{}{} + } + } + } + + var errors []error + for _, f := range files { + finalPath := filepath.Join(path, f.Name) + assembler, ok := c.FileTypes[f.FileType] + if !ok { + return fmt.Errorf("the file type %q registered for file %q does not exist in the context", f.FileType, f.Name) + } + var err error + if c.Verify { + err = assembler.VerifyFile(f, finalPath) + } else { + err = assembler.AssembleFile(f, finalPath) + } + if err != nil { + errors = append(errors, err) + } + } + if len(errors) > 0 { + return fmt.Errorf("errors in package %q:\n%v\n", p.Path(), strings.Join(errs2strings(errors), "\n")) + } + return nil +} + +func (c *Context) executeBody(w io.Writer, generator Generator) error { + et := NewErrorTracker(w) + if err := generator.Init(c, et); err != nil { + return err + } + for _, t := range c.Order { + if err := generator.GenerateType(c, t, et); err != nil { + return err + } + } + if err := generator.Finalize(c, et); err != nil { + return err + } + return et.Error() +} diff --git a/vendor/k8s.io/gengo/generator/generator.go b/vendor/k8s.io/gengo/generator/generator.go new file mode 100644 index 0000000000..4b48f503cf --- /dev/null +++ b/vendor/k8s.io/gengo/generator/generator.go @@ -0,0 +1,256 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generator + +import ( + "bytes" + "io" + + "k8s.io/gengo/namer" + "k8s.io/gengo/parser" + "k8s.io/gengo/types" +) + +// Package contains the contract for generating a package. +type Package interface { + // Name returns the package short name. + Name() string + // Path returns the package import path. + Path() string + // SourcePath returns the location of the package on disk. + SourcePath() string + + // Filter should return true if this package cares about this type. + // Otherwise, this type will be omitted from the type ordering for + // this package. + Filter(*Context, *types.Type) bool + + // Header should return a header for the file, including comment markers. + // Useful for copyright notices and doc strings. Include an + // autogeneration notice! Do not include the "package x" line. + Header(filename string) []byte + + // Generators returns the list of generators for this package. It is + // allowed for more than one generator to write to the same file. + // A Context is passed in case the list of generators depends on the + // input types. + Generators(*Context) []Generator +} + +type File struct { + Name string + FileType string + PackageName string + Header []byte + PackagePath string + PackageSourcePath string + Imports map[string]struct{} + Vars bytes.Buffer + Consts bytes.Buffer + Body bytes.Buffer +} + +type FileType interface { + AssembleFile(f *File, path string) error + VerifyFile(f *File, path string) error +} + +// Packages is a list of packages to generate. +type Packages []Package + +// Generator is the contract for anything that wants to do auto-generation. +// It's expected that the io.Writers passed to the below functions will be +// ErrorTrackers; this allows implementations to not check for io errors, +// making more readable code. +// +// The call order for the functions that take a Context is: +// 1. Filter() // Subsequent calls see only types that pass this. +// 2. Namers() // Subsequent calls see the namers provided by this. +// 3. PackageVars() +// 4. PackageConsts() +// 5. Init() +// 6. GenerateType() // Called N times, once per type in the context's Order. +// 7. Imports() +// +// You may have multiple generators for the same file. +type Generator interface { + // The name of this generator. Will be included in generated comments. + Name() string + + // Filter should return true if this generator cares about this type. + // (otherwise, GenerateType will not be called.) + // + // Filter is called before any of the generator's other functions; + // subsequent calls will get a context with only the types that passed + // this filter. + Filter(*Context, *types.Type) bool + + // If this generator needs special namers, return them here. These will + // override the original namers in the context if there is a collision. + // You may return nil if you don't need special names. These names will + // be available in the context passed to the rest of the generator's + // functions. + // + // A use case for this is to return a namer that tracks imports. + Namers(*Context) namer.NameSystems + + // Init should write an init function, and any other content that's not + // generated per-type. (It's not intended for generator specific + // initialization! Do that when your Package constructs the + // Generators.) + Init(*Context, io.Writer) error + + // Finalize should write finish up functions, and any other content that's not + // generated per-type. + Finalize(*Context, io.Writer) error + + // PackageVars should emit an array of variable lines. They will be + // placed in a var ( ... ) block. There's no need to include a leading + // \t or trailing \n. + PackageVars(*Context) []string + + // PackageConsts should emit an array of constant lines. They will be + // placed in a const ( ... ) block. There's no need to include a leading + // \t or trailing \n. + PackageConsts(*Context) []string + + // GenerateType should emit the code for a particular type. + GenerateType(*Context, *types.Type, io.Writer) error + + // Imports should return a list of necessary imports. They will be + // formatted correctly. You do not need to include quotation marks, + // return only the package name; alternatively, you can also return + // imports in the format `name "path/to/pkg"`. Imports will be called + // after Init, PackageVars, PackageConsts, and GenerateType, to allow + // you to keep track of what imports you actually need. + Imports(*Context) []string + + // Preferred file name of this generator, not including a path. It is + // allowed for multiple generators to use the same filename, but it's + // up to you to make sure they don't have colliding import names. + // TODO: provide per-file import tracking, removing the requirement + // that generators coordinate.. + Filename() string + + // A registered file type in the context to generate this file with. If + // the FileType is not found in the context, execution will stop. + FileType() string +} + +// Context is global context for individual generators to consume. +type Context struct { + // A map from the naming system to the names for that system. E.g., you + // might have public names and several private naming systems. + Namers namer.NameSystems + + // All the types, in case you want to look up something. + Universe types.Universe + + // Incoming imports, i.e. packages importing the given package. + incomingImports map[string][]string + + // Incoming transitive imports, i.e. the transitive closure of IncomingImports + incomingTransitiveImports map[string][]string + + // All the user-specified packages. This is after recursive expansion. + Inputs []string + + // The canonical ordering of the types (will be filtered by both the + // Package's and Generator's Filter methods). + Order []*types.Type + + // A set of types this context can process. If this is empty or nil, + // the default "golang" filetype will be provided. + FileTypes map[string]FileType + + // If true, Execute* calls will just verify that the existing output is + // correct. (You may set this after calling NewContext.) + Verify bool + + // Allows generators to add packages at runtime. + builder *parser.Builder +} + +// NewContext generates a context from the given builder, naming systems, and +// the naming system you wish to construct the canonical ordering from. +func NewContext(b *parser.Builder, nameSystems namer.NameSystems, canonicalOrderName string) (*Context, error) { + universe, err := b.FindTypes() + if err != nil { + return nil, err + } + + c := &Context{ + Namers: namer.NameSystems{}, + Universe: universe, + Inputs: b.FindPackages(), + FileTypes: map[string]FileType{ + GolangFileType: NewGolangFile(), + }, + builder: b, + } + + for name, systemNamer := range nameSystems { + c.Namers[name] = systemNamer + if name == canonicalOrderName { + orderer := namer.Orderer{Namer: systemNamer} + c.Order = orderer.OrderUniverse(universe) + } + } + return c, nil +} + +// IncomingImports returns the incoming imports for each package. The map is lazily computed. +func (ctxt *Context) IncomingImports() map[string][]string { + if ctxt.incomingImports == nil { + incoming := map[string][]string{} + for _, pkg := range ctxt.Universe { + for imp := range pkg.Imports { + incoming[imp] = append(incoming[imp], pkg.Path) + } + } + ctxt.incomingImports = incoming + } + return ctxt.incomingImports +} + +// TransitiveIncomingImports returns the transitive closure of the incoming imports for each package. +// The map is lazily computed. +func (ctxt *Context) TransitiveIncomingImports() map[string][]string { + if ctxt.incomingTransitiveImports == nil { + ctxt.incomingTransitiveImports = transitiveClosure(ctxt.IncomingImports()) + } + return ctxt.incomingTransitiveImports +} + +// AddDir adds a Go package to the context. The specified path must be a single +// go package import path. GOPATH, GOROOT, and the location of your go binary +// (`which go`) will all be searched, in the normal Go fashion. +// Deprecated. Please use AddDirectory. +func (ctxt *Context) AddDir(path string) error { + ctxt.incomingImports = nil + ctxt.incomingTransitiveImports = nil + return ctxt.builder.AddDirTo(path, &ctxt.Universe) +} + +// AddDirectory adds a Go package to the context. The specified path must be a +// single go package import path. GOPATH, GOROOT, and the location of your go +// binary (`which go`) will all be searched, in the normal Go fashion. +func (ctxt *Context) AddDirectory(path string) (*types.Package, error) { + ctxt.incomingImports = nil + ctxt.incomingTransitiveImports = nil + return ctxt.builder.AddDirectoryTo(path, &ctxt.Universe) +} diff --git a/vendor/k8s.io/gengo/generator/import_tracker.go b/vendor/k8s.io/gengo/generator/import_tracker.go new file mode 100644 index 0000000000..5d87de4ff0 --- /dev/null +++ b/vendor/k8s.io/gengo/generator/import_tracker.go @@ -0,0 +1,70 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generator + +import ( + "go/token" + "strings" + + "k8s.io/klog" + + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +func NewImportTracker(typesToAdd ...*types.Type) namer.ImportTracker { + tracker := namer.NewDefaultImportTracker(types.Name{}) + tracker.IsInvalidType = func(*types.Type) bool { return false } + tracker.LocalName = func(name types.Name) string { return golangTrackerLocalName(&tracker, name) } + tracker.PrintImport = func(path, name string) string { return name + " \"" + path + "\"" } + + tracker.AddTypes(typesToAdd...) + return &tracker + +} + +func golangTrackerLocalName(tracker namer.ImportTracker, t types.Name) string { + path := t.Package + + // Using backslashes in package names causes gengo to produce Go code which + // will not compile with the gc compiler. See the comment on GoSeperator. + if strings.ContainsRune(path, '\\') { + klog.Warningf("Warning: backslash used in import path '%v', this is unsupported.\n", path) + } + + dirs := strings.Split(path, namer.GoSeperator) + for n := len(dirs) - 1; n >= 0; n-- { + // follow kube convention of not having anything between directory names + name := strings.Join(dirs[n:], "") + name = strings.Replace(name, "_", "", -1) + // These characters commonly appear in import paths for go + // packages, but aren't legal go names. So we'll sanitize. + name = strings.Replace(name, ".", "", -1) + name = strings.Replace(name, "-", "", -1) + if _, found := tracker.PathOf(name); found { + // This name collides with some other package + continue + } + + // If the import name is a Go keyword, prefix with an underscore. + if token.Lookup(name).IsKeyword() { + name = "_" + name + } + return name + } + panic("can't find import for " + path) +} diff --git a/vendor/k8s.io/gengo/generator/snippet_writer.go b/vendor/k8s.io/gengo/generator/snippet_writer.go new file mode 100644 index 0000000000..eae917c138 --- /dev/null +++ b/vendor/k8s.io/gengo/generator/snippet_writer.go @@ -0,0 +1,154 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generator + +import ( + "fmt" + "io" + "runtime" + "text/template" +) + +// SnippetWriter is an attempt to make the template library usable. +// Methods are chainable, and you don't have to check Error() until you're all +// done. +type SnippetWriter struct { + w io.Writer + context *Context + // Left & right delimiters. text/template defaults to "{{" and "}}" + // which is totally unusable for go code based templates. + left, right string + funcMap template.FuncMap + err error +} + +// w is the destination; left and right are the delimiters; @ and $ are both +// reasonable choices. +// +// c is used to make a function for every naming system, to which you can pass +// a type and get the corresponding name. +func NewSnippetWriter(w io.Writer, c *Context, left, right string) *SnippetWriter { + sw := &SnippetWriter{ + w: w, + context: c, + left: left, + right: right, + funcMap: template.FuncMap{}, + } + for name, namer := range c.Namers { + sw.funcMap[name] = namer.Name + } + return sw +} + +// Do parses format and runs args through it. You can have arbitrary logic in +// the format (see the text/template documentation), but consider running many +// short templaces, with ordinary go logic in between--this may be more +// readable. Do is chainable. Any error causes every other call to do to be +// ignored, and the error will be returned by Error(). So you can check it just +// once, at the end of your function. +// +// 'args' can be quite literally anything; read the text/template documentation +// for details. Maps and structs work particularly nicely. Conveniently, the +// types package is designed to have structs that are easily referencable from +// the template language. +// +// Example: +// +// sw := generator.NewSnippetWriter(outBuffer, context, "$", "$") +// sw.Do(`The public type name is: $.type|public$`, map[string]interface{}{"type": t}) +// return sw.Error() +// +// Where: +// * "$" starts a template directive +// * "." references the entire thing passed as args +// * "type" therefore sees a map and looks up the key "type" +// * "|" means "pass the thing on the left to the thing on the right" +// * "public" is the name of a naming system, so the SnippetWriter has given +// the template a function called "public" that takes a *types.Type and +// returns the naming system's name. E.g., if the type is "string" this might +// return "String". +// * the second "$" ends the template directive. +// +// The map is actually not necessary. The below does the same thing: +// +// sw.Do(`The public type name is: $.|public$`, t) +// +// You may or may not find it more readable to use the map with a descriptive +// key, but if you want to pass more than one arg, the map or a custom struct +// becomes a requirement. You can do arbitrary logic inside these templates, +// but you should consider doing the logic in go and stitching them together +// for the sake of your readers. +// +// TODO: Change Do() to optionally take a list of pairs of parameters (key, value) +// and have it construct a combined map with that and args. +func (s *SnippetWriter) Do(format string, args interface{}) *SnippetWriter { + if s.err != nil { + return s + } + // Name the template by source file:line so it can be found when + // there's an error. + _, file, line, _ := runtime.Caller(1) + tmpl, err := template. + New(fmt.Sprintf("%s:%d", file, line)). + Delims(s.left, s.right). + Funcs(s.funcMap). + Parse(format) + if err != nil { + s.err = err + return s + } + err = tmpl.Execute(s.w, args) + if err != nil { + s.err = err + } + return s +} + +// Args exists to make it convenient to construct arguments for +// SnippetWriter.Do. +type Args map[interface{}]interface{} + +// With makes a copy of a and adds the given key, value pair. +func (a Args) With(key, value interface{}) Args { + a2 := Args{key: value} + for k, v := range a { + a2[k] = v + } + return a2 +} + +// WithArgs makes a copy of a and adds the given arguments. +func (a Args) WithArgs(rhs Args) Args { + a2 := Args{} + for k, v := range rhs { + a2[k] = v + } + for k, v := range a { + a2[k] = v + } + return a2 +} + +func (s *SnippetWriter) Out() io.Writer { + return s.w +} + +// Error returns any encountered error. +func (s *SnippetWriter) Error() error { + return s.err +} diff --git a/vendor/k8s.io/gengo/generator/transitive_closure.go b/vendor/k8s.io/gengo/generator/transitive_closure.go new file mode 100644 index 0000000000..385a49fce3 --- /dev/null +++ b/vendor/k8s.io/gengo/generator/transitive_closure.go @@ -0,0 +1,65 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generator + +import "sort" + +type edge struct { + from string + to string +} + +func transitiveClosure(in map[string][]string) map[string][]string { + adj := make(map[edge]bool) + imports := make(map[string]struct{}) + for from, tos := range in { + for _, to := range tos { + adj[edge{from, to}] = true + imports[to] = struct{}{} + } + } + + // Warshal's algorithm + for k := range in { + for i := range in { + if !adj[edge{i, k}] { + continue + } + for j := range imports { + if adj[edge{i, j}] { + continue + } + if adj[edge{k, j}] { + adj[edge{i, j}] = true + } + } + } + } + + out := make(map[string][]string, len(in)) + for i := range in { + for j := range imports { + if adj[edge{i, j}] { + out[i] = append(out[i], j) + } + } + + sort.Strings(out[i]) + } + + return out +} diff --git a/vendor/k8s.io/gengo/namer/doc.go b/vendor/k8s.io/gengo/namer/doc.go new file mode 100644 index 0000000000..8a44ea9959 --- /dev/null +++ b/vendor/k8s.io/gengo/namer/doc.go @@ -0,0 +1,31 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package namer has support for making different type naming systems. +// +// This is because sometimes you want to refer to the literal type, sometimes +// you want to make a name for the thing you're generating, and you want to +// make the name based on the type. For example, if you have `type foo string`, +// you want to be able to generate something like `func FooPrinter(f *foo) { +// Print(string(*f)) }`; that is, you want to refer to a public name, a literal +// name, and the underlying literal name. +// +// This package supports the idea of a "Namer" and a set of "NameSystems" to +// support these use cases. +// +// Additionally, a "RawNamer" can optionally keep track of what needs to be +// imported. +package namer // import "k8s.io/gengo/namer" diff --git a/vendor/k8s.io/gengo/namer/import_tracker.go b/vendor/k8s.io/gengo/namer/import_tracker.go new file mode 100644 index 0000000000..37094b2deb --- /dev/null +++ b/vendor/k8s.io/gengo/namer/import_tracker.go @@ -0,0 +1,112 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package namer + +import ( + "sort" + + "k8s.io/gengo/types" +) + +// ImportTracker may be passed to a namer.RawNamer, to track the imports needed +// for the types it names. +// +// TODO: pay attention to the package name (instead of renaming every package). +type DefaultImportTracker struct { + pathToName map[string]string + // forbidden names are in here. (e.g. "go" is a directory in which + // there is code, but "go" is not a legal name for a package, so we put + // it here to prevent us from naming any package "go") + nameToPath map[string]string + local types.Name + + // Returns true if a given types is an invalid type and should be ignored. + IsInvalidType func(*types.Type) bool + // Returns the final local name for the given name + LocalName func(types.Name) string + // Returns the "import" line for a given (path, name). + PrintImport func(string, string) string +} + +func NewDefaultImportTracker(local types.Name) DefaultImportTracker { + return DefaultImportTracker{ + pathToName: map[string]string{}, + nameToPath: map[string]string{}, + local: local, + } +} + +func (tracker *DefaultImportTracker) AddTypes(types ...*types.Type) { + for _, t := range types { + tracker.AddType(t) + } +} +func (tracker *DefaultImportTracker) AddType(t *types.Type) { + if tracker.local.Package == t.Name.Package { + return + } + + if tracker.IsInvalidType(t) { + if t.Kind == types.Builtin { + return + } + if _, ok := tracker.nameToPath[t.Name.Package]; !ok { + tracker.nameToPath[t.Name.Package] = "" + } + return + } + + if len(t.Name.Package) == 0 { + return + } + path := t.Name.Path + if len(path) == 0 { + path = t.Name.Package + } + if _, ok := tracker.pathToName[path]; ok { + return + } + name := tracker.LocalName(t.Name) + tracker.nameToPath[name] = path + tracker.pathToName[path] = name +} + +func (tracker *DefaultImportTracker) ImportLines() []string { + importPaths := []string{} + for path := range tracker.pathToName { + importPaths = append(importPaths, path) + } + sort.Sort(sort.StringSlice(importPaths)) + out := []string{} + for _, path := range importPaths { + out = append(out, tracker.PrintImport(path, tracker.pathToName[path])) + } + return out +} + +// LocalNameOf returns the name you would use to refer to the package at the +// specified path within the body of a file. +func (tracker *DefaultImportTracker) LocalNameOf(path string) string { + return tracker.pathToName[path] +} + +// PathOf returns the path that a given localName is referring to within the +// body of a file. +func (tracker *DefaultImportTracker) PathOf(localName string) (string, bool) { + name, ok := tracker.nameToPath[localName] + return name, ok +} diff --git a/vendor/k8s.io/gengo/namer/namer.go b/vendor/k8s.io/gengo/namer/namer.go new file mode 100644 index 0000000000..d700a00a53 --- /dev/null +++ b/vendor/k8s.io/gengo/namer/namer.go @@ -0,0 +1,383 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package namer + +import ( + "path/filepath" + "strings" + + "k8s.io/gengo/types" +) + +const ( + // GoSeperator is used to split go import paths. + // Forward slash is used instead of filepath.Seperator because it is the + // only universally-accepted path delimiter and the only delimiter not + // potentially forbidden by Go compilers. (In particular gc does not allow + // the use of backslashes in import paths.) + // See https://golang.org/ref/spec#Import_declarations. + // See also https://github.com/kubernetes/gengo/issues/83#issuecomment-367040772. + GoSeperator = "/" +) + +// Returns whether a name is a private Go name. +func IsPrivateGoName(name string) bool { + return len(name) == 0 || strings.ToLower(name[:1]) == name[:1] +} + +// NewPublicNamer is a helper function that returns a namer that makes +// CamelCase names. See the NameStrategy struct for an explanation of the +// arguments to this constructor. +func NewPublicNamer(prependPackageNames int, ignoreWords ...string) *NameStrategy { + n := &NameStrategy{ + Join: Joiner(IC, IC), + IgnoreWords: map[string]bool{}, + PrependPackageNames: prependPackageNames, + } + for _, w := range ignoreWords { + n.IgnoreWords[w] = true + } + return n +} + +// NewPrivateNamer is a helper function that returns a namer that makes +// camelCase names. See the NameStrategy struct for an explanation of the +// arguments to this constructor. +func NewPrivateNamer(prependPackageNames int, ignoreWords ...string) *NameStrategy { + n := &NameStrategy{ + Join: Joiner(IL, IC), + IgnoreWords: map[string]bool{}, + PrependPackageNames: prependPackageNames, + } + for _, w := range ignoreWords { + n.IgnoreWords[w] = true + } + return n +} + +// NewRawNamer will return a Namer that makes a name by which you would +// directly refer to a type, optionally keeping track of the import paths +// necessary to reference the names it provides. Tracker may be nil. +// The 'pkg' is the full package name, in which the Namer is used - all +// types from that package will be referenced by just type name without +// referencing the package. +// +// For example, if the type is map[string]int, a raw namer will literally +// return "map[string]int". +// +// Or if the type, in package foo, is "type Bar struct { ... }", then the raw +// namer will return "foo.Bar" as the name of the type, and if 'tracker' was +// not nil, will record that package foo needs to be imported. +func NewRawNamer(pkg string, tracker ImportTracker) *rawNamer { + return &rawNamer{pkg: pkg, tracker: tracker} +} + +// Names is a map from Type to name, as defined by some Namer. +type Names map[*types.Type]string + +// Namer takes a type, and assigns a name. +// +// The purpose of this complexity is so that you can assign coherent +// side-by-side systems of names for the types. For example, you might want a +// public interface, a private implementation struct, and also to reference +// literally the type name. +// +// Note that it is safe to call your own Name() function recursively to find +// the names of keys, elements, etc. This is because anonymous types can't have +// cycles in their names, and named types don't require the sort of recursion +// that would be problematic. +type Namer interface { + Name(*types.Type) string +} + +// NameSystems is a map of a system name to a namer for that system. +type NameSystems map[string]Namer + +// NameStrategy is a general Namer. The easiest way to use it is to copy the +// Public/PrivateNamer variables, and modify the members you wish to change. +// +// The Name method produces a name for the given type, of the forms: +// Anonymous types: +// Named types: +// +// In all cases, every part of the name is run through the capitalization +// functions. +// +// The IgnoreWords map can be set if you have directory names that are +// semantically meaningless for naming purposes, e.g. "proto". +// +// Prefix and Suffix can be used to disambiguate parallel systems of type +// names. For example, if you want to generate an interface and an +// implementation, you might want to suffix one with "Interface" and the other +// with "Implementation". Another common use-- if you want to generate private +// types, and one of your source types could be "string", you can't use the +// default lowercase private namer. You'll have to add a suffix or prefix. +type NameStrategy struct { + Prefix, Suffix string + Join func(pre string, parts []string, post string) string + + // Add non-meaningful package directory names here (e.g. "proto") and + // they will be ignored. + IgnoreWords map[string]bool + + // If > 0, prepend exactly that many package directory names (or as + // many as there are). Package names listed in "IgnoreWords" will be + // ignored. + // + // For example, if Ignore words lists "proto" and type Foo is in + // pkg/server/frobbing/proto, then a value of 1 will give a type name + // of FrobbingFoo, 2 gives ServerFrobbingFoo, etc. + PrependPackageNames int + + // A cache of names thus far assigned by this namer. + Names +} + +// IC ensures the first character is uppercase. +func IC(in string) string { + if in == "" { + return in + } + return strings.ToUpper(in[:1]) + in[1:] +} + +// IL ensures the first character is lowercase. +func IL(in string) string { + if in == "" { + return in + } + return strings.ToLower(in[:1]) + in[1:] +} + +// Joiner lets you specify functions that preprocess the various components of +// a name before joining them. You can construct e.g. camelCase or CamelCase or +// any other way of joining words. (See the IC and IL convenience functions.) +func Joiner(first, others func(string) string) func(pre string, in []string, post string) string { + return func(pre string, in []string, post string) string { + tmp := []string{others(pre)} + for i := range in { + tmp = append(tmp, others(in[i])) + } + tmp = append(tmp, others(post)) + return first(strings.Join(tmp, "")) + } +} + +func (ns *NameStrategy) removePrefixAndSuffix(s string) string { + // The join function may have changed capitalization. + lowerIn := strings.ToLower(s) + lowerP := strings.ToLower(ns.Prefix) + lowerS := strings.ToLower(ns.Suffix) + b, e := 0, len(s) + if strings.HasPrefix(lowerIn, lowerP) { + b = len(ns.Prefix) + } + if strings.HasSuffix(lowerIn, lowerS) { + e -= len(ns.Suffix) + } + return s[b:e] +} + +var ( + importPathNameSanitizer = strings.NewReplacer("-", "_", ".", "") +) + +// filters out unwanted directory names and sanitizes remaining names. +func (ns *NameStrategy) filterDirs(path string) []string { + allDirs := strings.Split(path, GoSeperator) + dirs := make([]string, 0, len(allDirs)) + for _, p := range allDirs { + if ns.IgnoreWords == nil || !ns.IgnoreWords[p] { + dirs = append(dirs, importPathNameSanitizer.Replace(p)) + } + } + return dirs +} + +// See the comment on NameStrategy. +func (ns *NameStrategy) Name(t *types.Type) string { + if ns.Names == nil { + ns.Names = Names{} + } + if s, ok := ns.Names[t]; ok { + return s + } + + if t.Name.Package != "" { + dirs := append(ns.filterDirs(t.Name.Package), t.Name.Name) + i := ns.PrependPackageNames + 1 + dn := len(dirs) + if i > dn { + i = dn + } + name := ns.Join(ns.Prefix, dirs[dn-i:], ns.Suffix) + ns.Names[t] = name + return name + } + + // Only anonymous types remain. + var name string + switch t.Kind { + case types.Builtin: + name = ns.Join(ns.Prefix, []string{t.Name.Name}, ns.Suffix) + case types.Map: + name = ns.Join(ns.Prefix, []string{ + "Map", + ns.removePrefixAndSuffix(ns.Name(t.Key)), + "To", + ns.removePrefixAndSuffix(ns.Name(t.Elem)), + }, ns.Suffix) + case types.Slice: + name = ns.Join(ns.Prefix, []string{ + "Slice", + ns.removePrefixAndSuffix(ns.Name(t.Elem)), + }, ns.Suffix) + case types.Pointer: + name = ns.Join(ns.Prefix, []string{ + "Pointer", + ns.removePrefixAndSuffix(ns.Name(t.Elem)), + }, ns.Suffix) + case types.Struct: + names := []string{"Struct"} + for _, m := range t.Members { + names = append(names, ns.removePrefixAndSuffix(ns.Name(m.Type))) + } + name = ns.Join(ns.Prefix, names, ns.Suffix) + case types.Chan: + name = ns.Join(ns.Prefix, []string{ + "Chan", + ns.removePrefixAndSuffix(ns.Name(t.Elem)), + }, ns.Suffix) + case types.Interface: + // TODO: add to name test + names := []string{"Interface"} + for _, m := range t.Methods { + // TODO: include function signature + names = append(names, m.Name.Name) + } + name = ns.Join(ns.Prefix, names, ns.Suffix) + case types.Func: + // TODO: add to name test + parts := []string{"Func"} + for _, pt := range t.Signature.Parameters { + parts = append(parts, ns.removePrefixAndSuffix(ns.Name(pt))) + } + parts = append(parts, "Returns") + for _, rt := range t.Signature.Results { + parts = append(parts, ns.removePrefixAndSuffix(ns.Name(rt))) + } + name = ns.Join(ns.Prefix, parts, ns.Suffix) + default: + name = "unnameable_" + string(t.Kind) + } + ns.Names[t] = name + return name +} + +// ImportTracker allows a raw namer to keep track of the packages needed for +// import. You can implement yourself or use the one in the generation package. +type ImportTracker interface { + AddType(*types.Type) + LocalNameOf(packagePath string) string + PathOf(localName string) (string, bool) + ImportLines() []string +} + +type rawNamer struct { + pkg string + tracker ImportTracker + Names +} + +// Name makes a name the way you'd write it to literally refer to type t, +// making ordinary assumptions about how you've imported t's package (or using +// r.tracker to specifically track the package imports). +func (r *rawNamer) Name(t *types.Type) string { + if r.Names == nil { + r.Names = Names{} + } + if name, ok := r.Names[t]; ok { + return name + } + if t.Name.Package != "" { + var name string + if r.tracker != nil { + r.tracker.AddType(t) + if t.Name.Package == r.pkg { + name = t.Name.Name + } else { + name = r.tracker.LocalNameOf(t.Name.Package) + "." + t.Name.Name + } + } else { + if t.Name.Package == r.pkg { + name = t.Name.Name + } else { + name = filepath.Base(t.Name.Package) + "." + t.Name.Name + } + } + r.Names[t] = name + return name + } + var name string + switch t.Kind { + case types.Builtin: + name = t.Name.Name + case types.Map: + name = "map[" + r.Name(t.Key) + "]" + r.Name(t.Elem) + case types.Slice: + name = "[]" + r.Name(t.Elem) + case types.Pointer: + name = "*" + r.Name(t.Elem) + case types.Struct: + elems := []string{} + for _, m := range t.Members { + elems = append(elems, m.Name+" "+r.Name(m.Type)) + } + name = "struct{" + strings.Join(elems, "; ") + "}" + case types.Chan: + // TODO: include directionality + name = "chan " + r.Name(t.Elem) + case types.Interface: + // TODO: add to name test + elems := []string{} + for _, m := range t.Methods { + // TODO: include function signature + elems = append(elems, m.Name.Name) + } + name = "interface{" + strings.Join(elems, "; ") + "}" + case types.Func: + // TODO: add to name test + params := []string{} + for _, pt := range t.Signature.Parameters { + params = append(params, r.Name(pt)) + } + results := []string{} + for _, rt := range t.Signature.Results { + results = append(results, r.Name(rt)) + } + name = "func(" + strings.Join(params, ",") + ")" + if len(results) == 1 { + name += " " + results[0] + } else if len(results) > 1 { + name += " (" + strings.Join(results, ",") + ")" + } + default: + name = "unnameable_" + string(t.Kind) + } + r.Names[t] = name + return name +} diff --git a/vendor/k8s.io/gengo/namer/order.go b/vendor/k8s.io/gengo/namer/order.go new file mode 100644 index 0000000000..fd89be9b08 --- /dev/null +++ b/vendor/k8s.io/gengo/namer/order.go @@ -0,0 +1,72 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package namer + +import ( + "sort" + + "k8s.io/gengo/types" +) + +// Orderer produces an ordering of types given a Namer. +type Orderer struct { + Namer +} + +// OrderUniverse assigns a name to every type in the Universe, including Types, +// Functions and Variables, and returns a list sorted by those names. +func (o *Orderer) OrderUniverse(u types.Universe) []*types.Type { + list := tList{ + namer: o.Namer, + } + for _, p := range u { + for _, t := range p.Types { + list.types = append(list.types, t) + } + for _, f := range p.Functions { + list.types = append(list.types, f) + } + for _, v := range p.Variables { + list.types = append(list.types, v) + } + for _, v := range p.Constants { + list.types = append(list.types, v) + } + } + sort.Sort(list) + return list.types +} + +// OrderTypes assigns a name to every type, and returns a list sorted by those +// names. +func (o *Orderer) OrderTypes(typeList []*types.Type) []*types.Type { + list := tList{ + namer: o.Namer, + types: typeList, + } + sort.Sort(list) + return list.types +} + +type tList struct { + namer Namer + types []*types.Type +} + +func (t tList) Len() int { return len(t.types) } +func (t tList) Less(i, j int) bool { return t.namer.Name(t.types[i]) < t.namer.Name(t.types[j]) } +func (t tList) Swap(i, j int) { t.types[i], t.types[j] = t.types[j], t.types[i] } diff --git a/vendor/k8s.io/gengo/namer/plural_namer.go b/vendor/k8s.io/gengo/namer/plural_namer.go new file mode 100644 index 0000000000..0e3ebbf262 --- /dev/null +++ b/vendor/k8s.io/gengo/namer/plural_namer.go @@ -0,0 +1,120 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package namer + +import ( + "strings" + + "k8s.io/gengo/types" +) + +var consonants = "bcdfghjklmnpqrstvwxyz" + +type pluralNamer struct { + // key is the case-sensitive type name, value is the case-insensitive + // intended output. + exceptions map[string]string + finalize func(string) string +} + +// NewPublicPluralNamer returns a namer that returns the plural form of the input +// type's name, starting with a uppercase letter. +func NewPublicPluralNamer(exceptions map[string]string) *pluralNamer { + return &pluralNamer{exceptions, IC} +} + +// NewPrivatePluralNamer returns a namer that returns the plural form of the input +// type's name, starting with a lowercase letter. +func NewPrivatePluralNamer(exceptions map[string]string) *pluralNamer { + return &pluralNamer{exceptions, IL} +} + +// NewAllLowercasePluralNamer returns a namer that returns the plural form of the input +// type's name, with all letters in lowercase. +func NewAllLowercasePluralNamer(exceptions map[string]string) *pluralNamer { + return &pluralNamer{exceptions, strings.ToLower} +} + +// Name returns the plural form of the type's name. If the type's name is found +// in the exceptions map, the map value is returned. +func (r *pluralNamer) Name(t *types.Type) string { + singular := t.Name.Name + var plural string + var ok bool + if plural, ok = r.exceptions[singular]; ok { + return r.finalize(plural) + } + if len(singular) < 2 { + return r.finalize(singular) + } + + switch rune(singular[len(singular)-1]) { + case 's', 'x', 'z': + plural = esPlural(singular) + case 'y': + sl := rune(singular[len(singular)-2]) + if isConsonant(sl) { + plural = iesPlural(singular) + } else { + plural = sPlural(singular) + } + case 'h': + sl := rune(singular[len(singular)-2]) + if sl == 'c' || sl == 's' { + plural = esPlural(singular) + } else { + plural = sPlural(singular) + } + case 'e': + sl := rune(singular[len(singular)-2]) + if sl == 'f' { + plural = vesPlural(singular[:len(singular)-1]) + } else { + plural = sPlural(singular) + } + case 'f': + plural = vesPlural(singular) + default: + plural = sPlural(singular) + } + return r.finalize(plural) +} + +func iesPlural(singular string) string { + return singular[:len(singular)-1] + "ies" +} + +func vesPlural(singular string) string { + return singular[:len(singular)-1] + "ves" +} + +func esPlural(singular string) string { + return singular + "es" +} + +func sPlural(singular string) string { + return singular + "s" +} + +func isConsonant(char rune) bool { + for _, c := range consonants { + if char == c { + return true + } + } + return false +} diff --git a/vendor/k8s.io/gengo/parser/doc.go b/vendor/k8s.io/gengo/parser/doc.go new file mode 100644 index 0000000000..8231b6d432 --- /dev/null +++ b/vendor/k8s.io/gengo/parser/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package parser provides code to parse go files, type-check them, extract the +// types. +package parser // import "k8s.io/gengo/parser" diff --git a/vendor/k8s.io/gengo/parser/parse.go b/vendor/k8s.io/gengo/parser/parse.go new file mode 100644 index 0000000000..f3abe57cce --- /dev/null +++ b/vendor/k8s.io/gengo/parser/parse.go @@ -0,0 +1,859 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parser + +import ( + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + tc "go/types" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + "sort" + "strings" + + "k8s.io/gengo/types" + "k8s.io/klog" +) + +// This clarifies when a pkg path has been canonicalized. +type importPathString string + +// Builder lets you add all the go files in all the packages that you care +// about, then constructs the type source data. +type Builder struct { + context *build.Context + + // If true, include *_test.go + IncludeTestFiles bool + + // Map of package names to more canonical information about the package. + // This might hold the same value for multiple names, e.g. if someone + // referenced ./pkg/name or in the case of vendoring, which canonicalizes + // differently that what humans would type. + buildPackages map[string]*build.Package + + fset *token.FileSet + // map of package path to list of parsed files + parsed map[importPathString][]parsedFile + // map of package path to absolute path (to prevent overlap) + absPaths map[importPathString]string + + // Set by typeCheckPackage(), used by importPackage() and friends. + typeCheckedPackages map[importPathString]*tc.Package + + // Map of package path to whether the user requested it or it was from + // an import. + userRequested map[importPathString]bool + + // All comments from everywhere in every parsed file. + endLineToCommentGroup map[fileLine]*ast.CommentGroup + + // map of package to list of packages it imports. + importGraph map[importPathString]map[string]struct{} +} + +// parsedFile is for tracking files with name +type parsedFile struct { + name string + file *ast.File +} + +// key type for finding comments. +type fileLine struct { + file string + line int +} + +// New constructs a new builder. +func New() *Builder { + c := build.Default + if c.GOROOT == "" { + if p, err := exec.Command("which", "go").CombinedOutput(); err == nil { + // The returned string will have some/path/bin/go, so remove the last two elements. + c.GOROOT = filepath.Dir(filepath.Dir(strings.Trim(string(p), "\n"))) + } else { + klog.Warningf("Warning: $GOROOT not set, and unable to run `which go` to find it: %v\n", err) + } + } + // Force this to off, since we don't properly parse CGo. All symbols must + // have non-CGo equivalents. + c.CgoEnabled = false + return &Builder{ + context: &c, + buildPackages: map[string]*build.Package{}, + typeCheckedPackages: map[importPathString]*tc.Package{}, + fset: token.NewFileSet(), + parsed: map[importPathString][]parsedFile{}, + absPaths: map[importPathString]string{}, + userRequested: map[importPathString]bool{}, + endLineToCommentGroup: map[fileLine]*ast.CommentGroup{}, + importGraph: map[importPathString]map[string]struct{}{}, + } +} + +// AddBuildTags adds the specified build tags to the parse context. +func (b *Builder) AddBuildTags(tags ...string) { + b.context.BuildTags = append(b.context.BuildTags, tags...) +} + +// Get package information from the go/build package. Automatically excludes +// e.g. test files and files for other platforms-- there is quite a bit of +// logic of that nature in the build package. +func (b *Builder) importBuildPackage(dir string) (*build.Package, error) { + if buildPkg, ok := b.buildPackages[dir]; ok { + return buildPkg, nil + } + // This validates the `package foo // github.com/bar/foo` comments. + buildPkg, err := b.importWithMode(dir, build.ImportComment) + if err != nil { + if _, ok := err.(*build.NoGoError); !ok { + return nil, fmt.Errorf("unable to import %q: %v", dir, err) + } + } + if buildPkg == nil { + // Might be an empty directory. Try to just find the dir. + buildPkg, err = b.importWithMode(dir, build.FindOnly) + if err != nil { + return nil, err + } + } + + // Remember it under the user-provided name. + klog.V(5).Infof("saving buildPackage %s", dir) + b.buildPackages[dir] = buildPkg + canonicalPackage := canonicalizeImportPath(buildPkg.ImportPath) + if dir != string(canonicalPackage) { + // Since `dir` is not the canonical name, see if we knew it under another name. + if buildPkg, ok := b.buildPackages[string(canonicalPackage)]; ok { + return buildPkg, nil + } + // Must be new, save it under the canonical name, too. + klog.V(5).Infof("saving buildPackage %s", canonicalPackage) + b.buildPackages[string(canonicalPackage)] = buildPkg + } + + return buildPkg, nil +} + +// AddFileForTest adds a file to the set, without verifying that the provided +// pkg actually exists on disk. The pkg must be of the form "canonical/pkg/path" +// and the path must be the absolute path to the file. Because this bypasses +// the normal recursive finding of package dependencies (on disk), test should +// sort their test files topologically first, so all deps are resolved by the +// time we need them. +func (b *Builder) AddFileForTest(pkg string, path string, src []byte) error { + if err := b.addFile(importPathString(pkg), path, src, true); err != nil { + return err + } + if _, err := b.typeCheckPackage(importPathString(pkg)); err != nil { + return err + } + return nil +} + +// addFile adds a file to the set. The pkgPath must be of the form +// "canonical/pkg/path" and the path must be the absolute path to the file. A +// flag indicates whether this file was user-requested or just from following +// the import graph. +func (b *Builder) addFile(pkgPath importPathString, path string, src []byte, userRequested bool) error { + for _, p := range b.parsed[pkgPath] { + if path == p.name { + klog.V(5).Infof("addFile %s %s already parsed, skipping", pkgPath, path) + return nil + } + } + klog.V(6).Infof("addFile %s %s", pkgPath, path) + p, err := parser.ParseFile(b.fset, path, src, parser.DeclarationErrors|parser.ParseComments) + if err != nil { + return err + } + + // This is redundant with addDir, but some tests call AddFileForTest, which + // call into here without calling addDir. + b.userRequested[pkgPath] = userRequested || b.userRequested[pkgPath] + + b.parsed[pkgPath] = append(b.parsed[pkgPath], parsedFile{path, p}) + for _, c := range p.Comments { + position := b.fset.Position(c.End()) + b.endLineToCommentGroup[fileLine{position.Filename, position.Line}] = c + } + + // We have to get the packages from this specific file, in case the + // user added individual files instead of entire directories. + if b.importGraph[pkgPath] == nil { + b.importGraph[pkgPath] = map[string]struct{}{} + } + for _, im := range p.Imports { + importedPath := strings.Trim(im.Path.Value, `"`) + b.importGraph[pkgPath][importedPath] = struct{}{} + } + return nil +} + +// AddDir adds an entire directory, scanning it for go files. 'dir' should have +// a single go package in it. GOPATH, GOROOT, and the location of your go +// binary (`which go`) will all be searched if dir doesn't literally resolve. +func (b *Builder) AddDir(dir string) error { + _, err := b.importPackage(dir, true) + return err +} + +// AddDirRecursive is just like AddDir, but it also recursively adds +// subdirectories; it returns an error only if the path couldn't be resolved; +// any directories recursed into without go source are ignored. +func (b *Builder) AddDirRecursive(dir string) error { + // Add the root. + if _, err := b.importPackage(dir, true); err != nil { + klog.Warningf("Ignoring directory %v: %v", dir, err) + } + + // filepath.Walk does not follow symlinks. We therefore evaluate symlinks and use that with + // filepath.Walk. + realPath, err := filepath.EvalSymlinks(b.buildPackages[dir].Dir) + if err != nil { + return err + } + + fn := func(filePath string, info os.FileInfo, err error) error { + if info != nil && info.IsDir() { + rel := filepath.ToSlash(strings.TrimPrefix(filePath, realPath)) + if rel != "" { + // Make a pkg path. + pkg := path.Join(string(canonicalizeImportPath(b.buildPackages[dir].ImportPath)), rel) + + // Add it. + if _, err := b.importPackage(pkg, true); err != nil { + klog.Warningf("Ignoring child directory %v: %v", pkg, err) + } + } + } + return nil + } + if err := filepath.Walk(realPath, fn); err != nil { + return err + } + return nil +} + +// AddDirTo adds an entire directory to a given Universe. Unlike AddDir, this +// processes the package immediately, which makes it safe to use from within a +// generator (rather than just at init time. 'dir' must be a single go package. +// GOPATH, GOROOT, and the location of your go binary (`which go`) will all be +// searched if dir doesn't literally resolve. +// Deprecated. Please use AddDirectoryTo. +func (b *Builder) AddDirTo(dir string, u *types.Universe) error { + // We want all types from this package, as if they were directly added + // by the user. They WERE added by the user, in effect. + if _, err := b.importPackage(dir, true); err != nil { + return err + } + return b.findTypesIn(canonicalizeImportPath(b.buildPackages[dir].ImportPath), u) +} + +// AddDirectoryTo adds an entire directory to a given Universe. Unlike AddDir, +// this processes the package immediately, which makes it safe to use from +// within a generator (rather than just at init time. 'dir' must be a single go +// package. GOPATH, GOROOT, and the location of your go binary (`which go`) +// will all be searched if dir doesn't literally resolve. +func (b *Builder) AddDirectoryTo(dir string, u *types.Universe) (*types.Package, error) { + // We want all types from this package, as if they were directly added + // by the user. They WERE added by the user, in effect. + if _, err := b.importPackage(dir, true); err != nil { + return nil, err + } + path := canonicalizeImportPath(b.buildPackages[dir].ImportPath) + if err := b.findTypesIn(path, u); err != nil { + return nil, err + } + return u.Package(string(path)), nil +} + +// The implementation of AddDir. A flag indicates whether this directory was +// user-requested or just from following the import graph. +func (b *Builder) addDir(dir string, userRequested bool) error { + klog.V(5).Infof("addDir %s", dir) + buildPkg, err := b.importBuildPackage(dir) + if err != nil { + return err + } + canonicalPackage := canonicalizeImportPath(buildPkg.ImportPath) + pkgPath := canonicalPackage + if dir != string(canonicalPackage) { + klog.V(5).Infof("addDir %s, canonical path is %s", dir, pkgPath) + } + + // Sanity check the pkg dir has not changed. + if prev, found := b.absPaths[pkgPath]; found { + if buildPkg.Dir != prev { + return fmt.Errorf("package %q (%s) previously resolved to %s", pkgPath, buildPkg.Dir, prev) + } + } else { + b.absPaths[pkgPath] = buildPkg.Dir + } + + files := []string{} + files = append(files, buildPkg.GoFiles...) + if b.IncludeTestFiles { + files = append(files, buildPkg.TestGoFiles...) + } + + for _, file := range files { + if !strings.HasSuffix(file, ".go") { + continue + } + absPath := filepath.Join(buildPkg.Dir, file) + data, err := ioutil.ReadFile(absPath) + if err != nil { + return fmt.Errorf("while loading %q: %v", absPath, err) + } + err = b.addFile(pkgPath, absPath, data, userRequested) + if err != nil { + return fmt.Errorf("while parsing %q: %v", absPath, err) + } + } + return nil +} + +var regexErrPackageNotFound = regexp.MustCompile(`^unable to import ".*?": cannot find package ".*?" in any of:`) + +func isErrPackageNotFound(err error) bool { + return regexErrPackageNotFound.MatchString(err.Error()) +} + +// importPackage is a function that will be called by the type check package when it +// needs to import a go package. 'path' is the import path. +func (b *Builder) importPackage(dir string, userRequested bool) (*tc.Package, error) { + klog.V(5).Infof("importPackage %s", dir) + var pkgPath = importPathString(dir) + + // Get the canonical path if we can. + if buildPkg := b.buildPackages[dir]; buildPkg != nil { + canonicalPackage := canonicalizeImportPath(buildPkg.ImportPath) + klog.V(5).Infof("importPackage %s, canonical path is %s", dir, canonicalPackage) + pkgPath = canonicalPackage + } + + // If we have not seen this before, process it now. + ignoreError := false + if _, found := b.parsed[pkgPath]; !found { + // Ignore errors in paths that we're importing solely because + // they're referenced by other packages. + ignoreError = true + + // Add it. + if err := b.addDir(dir, userRequested); err != nil { + if isErrPackageNotFound(err) { + klog.V(6).Info(err) + return nil, nil + } + + return nil, err + } + + // Get the canonical path now that it has been added. + if buildPkg := b.buildPackages[dir]; buildPkg != nil { + canonicalPackage := canonicalizeImportPath(buildPkg.ImportPath) + klog.V(5).Infof("importPackage %s, canonical path is %s", dir, canonicalPackage) + pkgPath = canonicalPackage + } + } + + // If it was previously known, just check that the user-requestedness hasn't + // changed. + b.userRequested[pkgPath] = userRequested || b.userRequested[pkgPath] + + // Run the type checker. We may end up doing this to pkgs that are already + // done, or are in the queue to be done later, but it will short-circuit, + // and we can't miss pkgs that are only depended on. + pkg, err := b.typeCheckPackage(pkgPath) + if err != nil { + switch { + case ignoreError && pkg != nil: + klog.V(2).Infof("type checking encountered some issues in %q, but ignoring.\n", pkgPath) + case !ignoreError && pkg != nil: + klog.V(2).Infof("type checking encountered some errors in %q\n", pkgPath) + return nil, err + default: + return nil, err + } + } + + return pkg, nil +} + +type importAdapter struct { + b *Builder +} + +func (a importAdapter) Import(path string) (*tc.Package, error) { + return a.b.importPackage(path, false) +} + +// typeCheckPackage will attempt to return the package even if there are some +// errors, so you may check whether the package is nil or not even if you get +// an error. +func (b *Builder) typeCheckPackage(pkgPath importPathString) (*tc.Package, error) { + klog.V(5).Infof("typeCheckPackage %s", pkgPath) + if pkg, ok := b.typeCheckedPackages[pkgPath]; ok { + if pkg != nil { + klog.V(6).Infof("typeCheckPackage %s already done", pkgPath) + return pkg, nil + } + // We store a nil right before starting work on a package. So + // if we get here and it's present and nil, that means there's + // another invocation of this function on the call stack + // already processing this package. + return nil, fmt.Errorf("circular dependency for %q", pkgPath) + } + parsedFiles, ok := b.parsed[pkgPath] + if !ok { + return nil, fmt.Errorf("No files for pkg %q", pkgPath) + } + files := make([]*ast.File, len(parsedFiles)) + for i := range parsedFiles { + files[i] = parsedFiles[i].file + } + b.typeCheckedPackages[pkgPath] = nil + c := tc.Config{ + IgnoreFuncBodies: true, + // Note that importAdapter can call b.importPackage which calls this + // method. So there can't be cycles in the import graph. + Importer: importAdapter{b}, + Error: func(err error) { + klog.V(2).Infof("type checker: %v\n", err) + }, + } + pkg, err := c.Check(string(pkgPath), b.fset, files, nil) + b.typeCheckedPackages[pkgPath] = pkg // record the result whether or not there was an error + return pkg, err +} + +// FindPackages fetches a list of the user-imported packages. +// Note that you need to call b.FindTypes() first. +func (b *Builder) FindPackages() []string { + // Iterate packages in a predictable order. + pkgPaths := []string{} + for k := range b.typeCheckedPackages { + pkgPaths = append(pkgPaths, string(k)) + } + sort.Strings(pkgPaths) + + result := []string{} + for _, pkgPath := range pkgPaths { + if b.userRequested[importPathString(pkgPath)] { + // Since walkType is recursive, all types that are in packages that + // were directly mentioned will be included. We don't need to + // include all types in all transitive packages, though. + result = append(result, pkgPath) + } + } + return result +} + +// FindTypes finalizes the package imports, and searches through all the +// packages for types. +func (b *Builder) FindTypes() (types.Universe, error) { + // Take a snapshot of pkgs to iterate, since this will recursively mutate + // b.parsed. Iterate in a predictable order. + pkgPaths := []string{} + for pkgPath := range b.parsed { + pkgPaths = append(pkgPaths, string(pkgPath)) + } + sort.Strings(pkgPaths) + + u := types.Universe{} + for _, pkgPath := range pkgPaths { + if err := b.findTypesIn(importPathString(pkgPath), &u); err != nil { + return nil, err + } + } + return u, nil +} + +// findTypesIn finalizes the package import and searches through the package +// for types. +func (b *Builder) findTypesIn(pkgPath importPathString, u *types.Universe) error { + klog.V(5).Infof("findTypesIn %s", pkgPath) + pkg := b.typeCheckedPackages[pkgPath] + if pkg == nil { + return fmt.Errorf("findTypesIn(%s): package is not known", pkgPath) + } + if !b.userRequested[pkgPath] { + // Since walkType is recursive, all types that the + // packages they asked for depend on will be included. + // But we don't need to include all types in all + // *packages* they depend on. + klog.V(5).Infof("findTypesIn %s: package is not user requested", pkgPath) + return nil + } + + // We're keeping this package. This call will create the record. + u.Package(string(pkgPath)).Name = pkg.Name() + u.Package(string(pkgPath)).Path = pkg.Path() + u.Package(string(pkgPath)).SourcePath = b.absPaths[pkgPath] + + for _, f := range b.parsed[pkgPath] { + if _, fileName := filepath.Split(f.name); fileName == "doc.go" { + tp := u.Package(string(pkgPath)) + // findTypesIn might be called multiple times. Clean up tp.Comments + // to avoid repeatedly fill same comments to it. + tp.Comments = []string{} + for i := range f.file.Comments { + tp.Comments = append(tp.Comments, splitLines(f.file.Comments[i].Text())...) + } + if f.file.Doc != nil { + tp.DocComments = splitLines(f.file.Doc.Text()) + } + } + } + + s := pkg.Scope() + for _, n := range s.Names() { + obj := s.Lookup(n) + tn, ok := obj.(*tc.TypeName) + if ok { + t := b.walkType(*u, nil, tn.Type()) + c1 := b.priorCommentLines(obj.Pos(), 1) + // c1.Text() is safe if c1 is nil + t.CommentLines = splitLines(c1.Text()) + if c1 == nil { + t.SecondClosestCommentLines = splitLines(b.priorCommentLines(obj.Pos(), 2).Text()) + } else { + t.SecondClosestCommentLines = splitLines(b.priorCommentLines(c1.List[0].Slash, 2).Text()) + } + } + tf, ok := obj.(*tc.Func) + // We only care about functions, not concrete/abstract methods. + if ok && tf.Type() != nil && tf.Type().(*tc.Signature).Recv() == nil { + t := b.addFunction(*u, nil, tf) + c1 := b.priorCommentLines(obj.Pos(), 1) + // c1.Text() is safe if c1 is nil + t.CommentLines = splitLines(c1.Text()) + if c1 == nil { + t.SecondClosestCommentLines = splitLines(b.priorCommentLines(obj.Pos(), 2).Text()) + } else { + t.SecondClosestCommentLines = splitLines(b.priorCommentLines(c1.List[0].Slash, 2).Text()) + } + } + tv, ok := obj.(*tc.Var) + if ok && !tv.IsField() { + b.addVariable(*u, nil, tv) + } + tconst, ok := obj.(*tc.Const) + if ok { + b.addConstant(*u, nil, tconst) + } + } + + importedPkgs := []string{} + for k := range b.importGraph[pkgPath] { + importedPkgs = append(importedPkgs, string(k)) + } + sort.Strings(importedPkgs) + for _, p := range importedPkgs { + u.AddImports(string(pkgPath), p) + } + return nil +} + +func (b *Builder) importWithMode(dir string, mode build.ImportMode) (*build.Package, error) { + // This is a bit of a hack. The srcDir argument to Import() should + // properly be the dir of the file which depends on the package to be + // imported, so that vendoring can work properly and local paths can + // resolve. We assume that there is only one level of vendoring, and that + // the CWD is inside the GOPATH, so this should be safe. Nobody should be + // using local (relative) paths except on the CLI, so CWD is also + // sufficient. + cwd, err := os.Getwd() + if err != nil { + return nil, fmt.Errorf("unable to get current directory: %v", err) + } + buildPkg, err := b.context.Import(dir, cwd, mode) + if err != nil { + return nil, err + } + return buildPkg, nil +} + +// if there's a comment on the line `lines` before pos, return its text, otherwise "". +func (b *Builder) priorCommentLines(pos token.Pos, lines int) *ast.CommentGroup { + position := b.fset.Position(pos) + key := fileLine{position.Filename, position.Line - lines} + return b.endLineToCommentGroup[key] +} + +func splitLines(str string) []string { + return strings.Split(strings.TrimRight(str, "\n"), "\n") +} + +func tcFuncNameToName(in string) types.Name { + name := strings.TrimPrefix(in, "func ") + nameParts := strings.Split(name, "(") + return tcNameToName(nameParts[0]) +} + +func tcVarNameToName(in string) types.Name { + nameParts := strings.Split(in, " ") + // nameParts[0] is "var". + // nameParts[2:] is the type of the variable, we ignore it for now. + return tcNameToName(nameParts[1]) +} + +func tcNameToName(in string) types.Name { + // Detect anonymous type names. (These may have '.' characters because + // embedded types may have packages, so we detect them specially.) + if strings.HasPrefix(in, "struct{") || + strings.HasPrefix(in, "<-chan") || + strings.HasPrefix(in, "chan<-") || + strings.HasPrefix(in, "chan ") || + strings.HasPrefix(in, "func(") || + strings.HasPrefix(in, "*") || + strings.HasPrefix(in, "map[") || + strings.HasPrefix(in, "[") { + return types.Name{Name: in} + } + + // Otherwise, if there are '.' characters present, the name has a + // package path in front. + nameParts := strings.Split(in, ".") + name := types.Name{Name: in} + if n := len(nameParts); n >= 2 { + // The final "." is the name of the type--previous ones must + // have been in the package path. + name.Package, name.Name = strings.Join(nameParts[:n-1], "."), nameParts[n-1] + } + return name +} + +func (b *Builder) convertSignature(u types.Universe, t *tc.Signature) *types.Signature { + signature := &types.Signature{} + for i := 0; i < t.Params().Len(); i++ { + signature.Parameters = append(signature.Parameters, b.walkType(u, nil, t.Params().At(i).Type())) + } + for i := 0; i < t.Results().Len(); i++ { + signature.Results = append(signature.Results, b.walkType(u, nil, t.Results().At(i).Type())) + } + if r := t.Recv(); r != nil { + signature.Receiver = b.walkType(u, nil, r.Type()) + } + signature.Variadic = t.Variadic() + return signature +} + +// walkType adds the type, and any necessary child types. +func (b *Builder) walkType(u types.Universe, useName *types.Name, in tc.Type) *types.Type { + // Most of the cases are underlying types of the named type. + name := tcNameToName(in.String()) + if useName != nil { + name = *useName + } + + switch t := in.(type) { + case *tc.Struct: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Struct + for i := 0; i < t.NumFields(); i++ { + f := t.Field(i) + m := types.Member{ + Name: f.Name(), + Embedded: f.Anonymous(), + Tags: t.Tag(i), + Type: b.walkType(u, nil, f.Type()), + CommentLines: splitLines(b.priorCommentLines(f.Pos(), 1).Text()), + } + out.Members = append(out.Members, m) + } + return out + case *tc.Map: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Map + out.Elem = b.walkType(u, nil, t.Elem()) + out.Key = b.walkType(u, nil, t.Key()) + return out + case *tc.Pointer: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Pointer + out.Elem = b.walkType(u, nil, t.Elem()) + return out + case *tc.Slice: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Slice + out.Elem = b.walkType(u, nil, t.Elem()) + return out + case *tc.Array: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Array + out.Elem = b.walkType(u, nil, t.Elem()) + // TODO: need to store array length, otherwise raw type name + // cannot be properly written. + return out + case *tc.Chan: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Chan + out.Elem = b.walkType(u, nil, t.Elem()) + // TODO: need to store direction, otherwise raw type name + // cannot be properly written. + return out + case *tc.Basic: + out := u.Type(types.Name{ + Package: "", + Name: t.Name(), + }) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Unsupported + return out + case *tc.Signature: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Func + out.Signature = b.convertSignature(u, t) + return out + case *tc.Interface: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Interface + t.Complete() + for i := 0; i < t.NumMethods(); i++ { + if out.Methods == nil { + out.Methods = map[string]*types.Type{} + } + method := t.Method(i) + mt := b.walkType(u, nil, method.Type()) + mt.CommentLines = splitLines(b.priorCommentLines(method.Pos(), 1).Text()) + out.Methods[method.Name()] = mt + } + return out + case *tc.Named: + var out *types.Type + switch t.Underlying().(type) { + case *tc.Named, *tc.Basic, *tc.Map, *tc.Slice: + name := tcNameToName(t.String()) + out = u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Alias + out.Underlying = b.walkType(u, nil, t.Underlying()) + default: + // tc package makes everything "named" with an + // underlying anonymous type--we remove that annoying + // "feature" for users. This flattens those types + // together. + name := tcNameToName(t.String()) + if out := u.Type(name); out.Kind != types.Unknown { + return out // short circuit if we've already made this. + } + out = b.walkType(u, &name, t.Underlying()) + } + // If the underlying type didn't already add methods, add them. + // (Interface types will have already added methods.) + if len(out.Methods) == 0 { + for i := 0; i < t.NumMethods(); i++ { + if out.Methods == nil { + out.Methods = map[string]*types.Type{} + } + method := t.Method(i) + mt := b.walkType(u, nil, method.Type()) + mt.CommentLines = splitLines(b.priorCommentLines(method.Pos(), 1).Text()) + out.Methods[method.Name()] = mt + } + } + return out + default: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Unsupported + klog.Warningf("Making unsupported type entry %q for: %#v\n", out, t) + return out + } +} + +func (b *Builder) addFunction(u types.Universe, useName *types.Name, in *tc.Func) *types.Type { + name := tcFuncNameToName(in.String()) + if useName != nil { + name = *useName + } + out := u.Function(name) + out.Kind = types.DeclarationOf + out.Underlying = b.walkType(u, nil, in.Type()) + return out +} + +func (b *Builder) addVariable(u types.Universe, useName *types.Name, in *tc.Var) *types.Type { + name := tcVarNameToName(in.String()) + if useName != nil { + name = *useName + } + out := u.Variable(name) + out.Kind = types.DeclarationOf + out.Underlying = b.walkType(u, nil, in.Type()) + return out +} + +func (b *Builder) addConstant(u types.Universe, useName *types.Name, in *tc.Const) *types.Type { + name := tcVarNameToName(in.String()) + if useName != nil { + name = *useName + } + out := u.Constant(name) + out.Kind = types.DeclarationOf + out.Underlying = b.walkType(u, nil, in.Type()) + return out +} + +// canonicalizeImportPath takes an import path and returns the actual package. +// It doesn't support nested vendoring. +func canonicalizeImportPath(importPath string) importPathString { + if !strings.Contains(importPath, "/vendor/") { + return importPathString(importPath) + } + + return importPathString(importPath[strings.Index(importPath, "/vendor/")+len("/vendor/"):]) +} diff --git a/vendor/k8s.io/gengo/types/comments.go b/vendor/k8s.io/gengo/types/comments.go new file mode 100644 index 0000000000..8150c38387 --- /dev/null +++ b/vendor/k8s.io/gengo/types/comments.go @@ -0,0 +1,82 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package types contains go type information, packaged in a way that makes +// auto-generation convenient, whether by template or straight go functions. +package types + +import ( + "fmt" + "strings" +) + +// ExtractCommentTags parses comments for lines of the form: +// +// 'marker' + "key=value". +// +// Values are optional; "" is the default. A tag can be specified more than +// one time and all values are returned. If the resulting map has an entry for +// a key, the value (a slice) is guaranteed to have at least 1 element. +// +// Example: if you pass "+" for 'marker', and the following lines are in +// the comments: +// +foo=value1 +// +bar +// +foo=value2 +// +baz="qux" +// Then this function will return: +// map[string][]string{"foo":{"value1, "value2"}, "bar": {""}, "baz": {"qux"}} +func ExtractCommentTags(marker string, lines []string) map[string][]string { + out := map[string][]string{} + for _, line := range lines { + line = strings.Trim(line, " ") + if len(line) == 0 { + continue + } + if !strings.HasPrefix(line, marker) { + continue + } + // TODO: we could support multiple values per key if we split on spaces + kv := strings.SplitN(line[len(marker):], "=", 2) + if len(kv) == 2 { + out[kv[0]] = append(out[kv[0]], kv[1]) + } else if len(kv) == 1 { + out[kv[0]] = append(out[kv[0]], "") + } + } + return out +} + +// ExtractSingleBoolCommentTag parses comments for lines of the form: +// +// 'marker' + "key=value1" +// +// If the tag is not found, the default value is returned. Values are asserted +// to be boolean ("true" or "false"), and any other value will cause an error +// to be returned. If the key has multiple values, the first one will be used. +func ExtractSingleBoolCommentTag(marker string, key string, defaultVal bool, lines []string) (bool, error) { + values := ExtractCommentTags(marker, lines)[key] + if values == nil { + return defaultVal, nil + } + if values[0] == "true" { + return true, nil + } + if values[0] == "false" { + return false, nil + } + return false, fmt.Errorf("tag value for %q is not boolean: %q", key, values[0]) +} diff --git a/vendor/k8s.io/gengo/types/doc.go b/vendor/k8s.io/gengo/types/doc.go new file mode 100644 index 0000000000..74a969a763 --- /dev/null +++ b/vendor/k8s.io/gengo/types/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package types contains go type information, packaged in a way that makes +// auto-generation convenient, whether by template or straight go functions. +package types // import "k8s.io/gengo/types" diff --git a/vendor/k8s.io/gengo/types/flatten.go b/vendor/k8s.io/gengo/types/flatten.go new file mode 100644 index 0000000000..585014e8ba --- /dev/null +++ b/vendor/k8s.io/gengo/types/flatten.go @@ -0,0 +1,57 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +// FlattenMembers recursively takes any embedded members and puts them in the +// top level, correctly hiding them if the top level hides them. There must not +// be a cycle-- that implies infinite members. +// +// This is useful for e.g. computing all the valid keys in a json struct, +// properly considering any configuration of embedded structs. +func FlattenMembers(m []Member) []Member { + embedded := []Member{} + normal := []Member{} + type nameInfo struct { + top bool + i int + } + names := map[string]nameInfo{} + for i := range m { + if m[i].Embedded && m[i].Type.Kind == Struct { + embedded = append(embedded, m[i]) + } else { + normal = append(normal, m[i]) + names[m[i].Name] = nameInfo{true, len(normal) - 1} + } + } + for i := range embedded { + for _, e := range FlattenMembers(embedded[i].Type.Members) { + if info, found := names[e.Name]; found { + if info.top { + continue + } + if n := normal[info.i]; n.Name == e.Name && n.Type == e.Type { + continue + } + panic("conflicting members") + } + normal = append(normal, e) + names[e.Name] = nameInfo{false, len(normal) - 1} + } + } + return normal +} diff --git a/vendor/k8s.io/gengo/types/types.go b/vendor/k8s.io/gengo/types/types.go new file mode 100644 index 0000000000..78357bcce1 --- /dev/null +++ b/vendor/k8s.io/gengo/types/types.go @@ -0,0 +1,526 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import "strings" + +// Ref makes a reference to the given type. It can only be used for e.g. +// passing to namers. +func Ref(packageName, typeName string) *Type { + return &Type{Name: Name{ + Name: typeName, + Package: packageName, + }} +} + +// A type name may have a package qualifier. +type Name struct { + // Empty if embedded or builtin. This is the package path unless Path is specified. + Package string + // The type name. + Name string + // An optional location of the type definition for languages that can have disjoint + // packages and paths. + Path string +} + +// String returns the name formatted as a string. +func (n Name) String() string { + if n.Package == "" { + return n.Name + } + return n.Package + "." + n.Name +} + +// ParseFullyQualifiedName parses a name like k8s.io/kubernetes/pkg/api.Pod into a Name. +func ParseFullyQualifiedName(fqn string) Name { + cs := strings.Split(fqn, ".") + pkg := "" + if len(cs) > 1 { + pkg = strings.Join(cs[0:len(cs)-1], ".") + } + return Name{ + Name: cs[len(cs)-1], + Package: pkg, + } +} + +// The possible classes of types. +type Kind string + +const ( + // Builtin is a primitive, like bool, string, int. + Builtin Kind = "Builtin" + Struct Kind = "Struct" + Map Kind = "Map" + Slice Kind = "Slice" + Pointer Kind = "Pointer" + + // Alias is an alias of another type, e.g. in: + // type Foo string + // type Bar Foo + // Bar is an alias of Foo. + // + // In the real go type system, Foo is a "Named" string; but to simplify + // generation, this type system will just say that Foo *is* a builtin. + // We then need "Alias" as a way for us to say that Bar *is* a Foo. + Alias Kind = "Alias" + + // Interface is any type that could have differing types at run time. + Interface Kind = "Interface" + + // The remaining types are included for completeness, but are not well + // supported. + Array Kind = "Array" // Array is just like slice, but has a fixed length. + Chan Kind = "Chan" + Func Kind = "Func" + + // DeclarationOf is different from other Kinds; it indicates that instead of + // representing an actual Type, the type is a declaration of an instance of + // a type. E.g., a top-level function, variable, or constant. See the + // comment for Type.Name for more detail. + DeclarationOf Kind = "DeclarationOf" + Unknown Kind = "" + Unsupported Kind = "Unsupported" + + // Protobuf is protobuf type. + Protobuf Kind = "Protobuf" +) + +// Package holds package-level information. +// Fields are public, as everything in this package, to enable consumption by +// templates (for example). But it is strongly encouraged for code to build by +// using the provided functions. +type Package struct { + // Canonical name of this package-- its path. + Path string + + // The location this package was loaded from + SourcePath string + + // Short name of this package; the name that appears in the + // 'package x' line. + Name string + + // The comment right above the package declaration in doc.go, if any. + DocComments []string + + // All comments from doc.go, if any. + // TODO: remove Comments and use DocComments everywhere. + Comments []string + + // Types within this package, indexed by their name (*not* including + // package name). + Types map[string]*Type + + // Functions within this package, indexed by their name (*not* including + // package name). + Functions map[string]*Type + + // Global variables within this package, indexed by their name (*not* including + // package name). + Variables map[string]*Type + + // Global constants within this package, indexed by their name (*not* including + // package name). + Constants map[string]*Type + + // Packages imported by this package, indexed by (canonicalized) + // package path. + Imports map[string]*Package +} + +// Has returns true if the given name references a type known to this package. +func (p *Package) Has(name string) bool { + _, has := p.Types[name] + return has +} + +// Type gets the given Type in this Package. If the Type is not already +// defined, this will add it and return the new Type value. The caller is +// expected to finish initialization. +func (p *Package) Type(typeName string) *Type { + if t, ok := p.Types[typeName]; ok { + return t + } + if p.Path == "" { + // Import the standard builtin types! + if t, ok := builtins.Types[typeName]; ok { + p.Types[typeName] = t + return t + } + } + t := &Type{Name: Name{Package: p.Path, Name: typeName}} + p.Types[typeName] = t + return t +} + +// Function gets the given function Type in this Package. If the function is +// not already defined, this will add it. If a function is added, it's the +// caller's responsibility to finish construction of the function by setting +// Underlying to the correct type. +func (p *Package) Function(funcName string) *Type { + if t, ok := p.Functions[funcName]; ok { + return t + } + t := &Type{Name: Name{Package: p.Path, Name: funcName}} + t.Kind = DeclarationOf + p.Functions[funcName] = t + return t +} + +// Variable gets the given variable Type in this Package. If the variable is +// not already defined, this will add it. If a variable is added, it's the caller's +// responsibility to finish construction of the variable by setting Underlying +// to the correct type. +func (p *Package) Variable(varName string) *Type { + if t, ok := p.Variables[varName]; ok { + return t + } + t := &Type{Name: Name{Package: p.Path, Name: varName}} + t.Kind = DeclarationOf + p.Variables[varName] = t + return t +} + +// Constant gets the given constant Type in this Package. If the constant is +// not already defined, this will add it. If a constant is added, it's the caller's +// responsibility to finish construction of the constant by setting Underlying +// to the correct type. +func (p *Package) Constant(constName string) *Type { + if t, ok := p.Constants[constName]; ok { + return t + } + t := &Type{Name: Name{Package: p.Path, Name: constName}} + t.Kind = DeclarationOf + p.Constants[constName] = t + return t +} + +// HasImport returns true if p imports packageName. Package names include the +// package directory. +func (p *Package) HasImport(packageName string) bool { + _, has := p.Imports[packageName] + return has +} + +// Universe is a map of all packages. The key is the package name, but you +// should use Package(), Type(), Function(), or Variable() instead of direct +// access. +type Universe map[string]*Package + +// Type returns the canonical type for the given fully-qualified name. Builtin +// types will always be found, even if they haven't been explicitly added to +// the map. If a non-existing type is requested, this will create (a marker for) +// it. +func (u Universe) Type(n Name) *Type { + return u.Package(n.Package).Type(n.Name) +} + +// Function returns the canonical function for the given fully-qualified name. +// If a non-existing function is requested, this will create (a marker for) it. +// If a marker is created, it's the caller's responsibility to finish +// construction of the function by setting Underlying to the correct type. +func (u Universe) Function(n Name) *Type { + return u.Package(n.Package).Function(n.Name) +} + +// Variable returns the canonical variable for the given fully-qualified name. +// If a non-existing variable is requested, this will create (a marker for) it. +// If a marker is created, it's the caller's responsibility to finish +// construction of the variable by setting Underlying to the correct type. +func (u Universe) Variable(n Name) *Type { + return u.Package(n.Package).Variable(n.Name) +} + +// Constant returns the canonical constant for the given fully-qualified name. +// If a non-existing constant is requested, this will create (a marker for) it. +// If a marker is created, it's the caller's responsibility to finish +// construction of the constant by setting Underlying to the correct type. +func (u Universe) Constant(n Name) *Type { + return u.Package(n.Package).Constant(n.Name) +} + +// AddImports registers import lines for packageName. May be called multiple times. +// You are responsible for canonicalizing all package paths. +func (u Universe) AddImports(packagePath string, importPaths ...string) { + p := u.Package(packagePath) + for _, i := range importPaths { + p.Imports[i] = u.Package(i) + } +} + +// Package returns the Package for the given path. +// If a non-existing package is requested, this will create (a marker for) it. +// If a marker is created, it's the caller's responsibility to finish +// construction of the package. +func (u Universe) Package(packagePath string) *Package { + if p, ok := u[packagePath]; ok { + return p + } + p := &Package{ + Path: packagePath, + Types: map[string]*Type{}, + Functions: map[string]*Type{}, + Variables: map[string]*Type{}, + Constants: map[string]*Type{}, + Imports: map[string]*Package{}, + } + u[packagePath] = p + return p +} + +// Type represents a subset of possible go types. +type Type struct { + // There are two general categories of types, those explicitly named + // and those anonymous. Named ones will have a non-empty package in the + // name field. + // + // An exception: If Kind == DeclarationOf, then this name is the name of a + // top-level function, variable, or const, and the type can be found in Underlying. + // We do this to allow the naming system to work against these objects, even + // though they aren't strictly speaking types. + Name Name + + // The general kind of this type. + Kind Kind + + // If there are comment lines immediately before the type definition, + // they will be recorded here. + CommentLines []string + + // If there are comment lines preceding the `CommentLines`, they will be + // recorded here. There are two cases: + // --- + // SecondClosestCommentLines + // a blank line + // CommentLines + // type definition + // --- + // + // or + // --- + // SecondClosestCommentLines + // a blank line + // type definition + // --- + SecondClosestCommentLines []string + + // If Kind == Struct + Members []Member + + // If Kind == Map, Slice, Pointer, or Chan + Elem *Type + + // If Kind == Map, this is the map's key type. + Key *Type + + // If Kind == Alias, this is the underlying type. + // If Kind == DeclarationOf, this is the type of the declaration. + Underlying *Type + + // If Kind == Interface, this is the set of all required functions. + // Otherwise, if this is a named type, this is the list of methods that + // type has. (All elements will have Kind=="Func") + Methods map[string]*Type + + // If Kind == func, this is the signature of the function. + Signature *Signature + + // TODO: Add: + // * channel direction + // * array length +} + +// String returns the name of the type. +func (t *Type) String() string { + return t.Name.String() +} + +// IsPrimitive returns whether the type is a built-in type or is an alias to a +// built-in type. For example: strings and aliases of strings are primitives, +// structs are not. +func (t *Type) IsPrimitive() bool { + if t.Kind == Builtin || (t.Kind == Alias && t.Underlying.Kind == Builtin) { + return true + } + return false +} + +// IsAssignable returns whether the type is deep-assignable. For example, +// slices and maps and pointers are shallow copies, but ints and strings are +// complete. +func (t *Type) IsAssignable() bool { + if t.IsPrimitive() { + return true + } + if t.Kind == Struct { + for _, m := range t.Members { + if !m.Type.IsAssignable() { + return false + } + } + return true + } + return false +} + +// IsAnonymousStruct returns true if the type is an anonymous struct or an alias +// to an anonymous struct. +func (t *Type) IsAnonymousStruct() bool { + return (t.Kind == Struct && t.Name.Name == "struct{}") || (t.Kind == Alias && t.Underlying.IsAnonymousStruct()) +} + +// A single struct member +type Member struct { + // The name of the member. + Name string + + // If the member is embedded (anonymous) this will be true, and the + // Name will be the type name. + Embedded bool + + // If there are comment lines immediately before the member in the type + // definition, they will be recorded here. + CommentLines []string + + // If there are tags along with this member, they will be saved here. + Tags string + + // The type of this member. + Type *Type +} + +// String returns the name and type of the member. +func (m Member) String() string { + return m.Name + " " + m.Type.String() +} + +// Signature is a function's signature. +type Signature struct { + // TODO: store the parameter names, not just types. + + // If a method of some type, this is the type it's a member of. + Receiver *Type + Parameters []*Type + Results []*Type + + // True if the last in parameter is of the form ...T. + Variadic bool + + // If there are comment lines immediately before this + // signature/method/function declaration, they will be recorded here. + CommentLines []string +} + +// Built in types. +var ( + String = &Type{ + Name: Name{Name: "string"}, + Kind: Builtin, + } + Int64 = &Type{ + Name: Name{Name: "int64"}, + Kind: Builtin, + } + Int32 = &Type{ + Name: Name{Name: "int32"}, + Kind: Builtin, + } + Int16 = &Type{ + Name: Name{Name: "int16"}, + Kind: Builtin, + } + Int = &Type{ + Name: Name{Name: "int"}, + Kind: Builtin, + } + Uint64 = &Type{ + Name: Name{Name: "uint64"}, + Kind: Builtin, + } + Uint32 = &Type{ + Name: Name{Name: "uint32"}, + Kind: Builtin, + } + Uint16 = &Type{ + Name: Name{Name: "uint16"}, + Kind: Builtin, + } + Uint = &Type{ + Name: Name{Name: "uint"}, + Kind: Builtin, + } + Uintptr = &Type{ + Name: Name{Name: "uintptr"}, + Kind: Builtin, + } + Float64 = &Type{ + Name: Name{Name: "float64"}, + Kind: Builtin, + } + Float32 = &Type{ + Name: Name{Name: "float32"}, + Kind: Builtin, + } + Float = &Type{ + Name: Name{Name: "float"}, + Kind: Builtin, + } + Bool = &Type{ + Name: Name{Name: "bool"}, + Kind: Builtin, + } + Byte = &Type{ + Name: Name{Name: "byte"}, + Kind: Builtin, + } + + builtins = &Package{ + Types: map[string]*Type{ + "bool": Bool, + "string": String, + "int": Int, + "int64": Int64, + "int32": Int32, + "int16": Int16, + "int8": Byte, + "uint": Uint, + "uint64": Uint64, + "uint32": Uint32, + "uint16": Uint16, + "uint8": Byte, + "uintptr": Uintptr, + "byte": Byte, + "float": Float, + "float64": Float64, + "float32": Float32, + }, + Imports: map[string]*Package{}, + Path: "", + Name: "", + } +) + +func IsInteger(t *Type) bool { + switch t { + case Int, Int64, Int32, Int16, Uint, Uint64, Uint32, Uint16, Byte: + return true + default: + return false + } +} diff --git a/vendor/k8s.io/kube-openapi/cmd/openapi-gen/args/args.go b/vendor/k8s.io/kube-openapi/cmd/openapi-gen/args/args.go new file mode 100644 index 0000000000..19783370e9 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/cmd/openapi-gen/args/args.go @@ -0,0 +1,76 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + "path/filepath" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" +) + +// CustomArgs is used by the gengo framework to pass args specific to this generator. +type CustomArgs struct { + // ReportFilename is added to CustomArgs for specifying name of report file used + // by API linter. If specified, API rule violations will be printed to report file. + // Otherwise default value "-" will be used which indicates stdout. + ReportFilename string +} + +// NewDefaults returns default arguments for the generator. Returning the arguments instead +// of using default flag parsing allows registering custom arguments afterwards +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + // Default() sets a couple of flag default values for example the boilerplate. + // WithoutDefaultFlagParsing() disables implicit addition of command line flags and parsing, + // which allows registering custom arguments afterwards + genericArgs := args.Default().WithoutDefaultFlagParsing() + genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kube-openapi/boilerplate/boilerplate.go.txt") + + customArgs := &CustomArgs{} + genericArgs.CustomArgs = customArgs + + // Default value for report filename is "-", which stands for stdout + customArgs.ReportFilename = "-" + // Default value for output file base name + genericArgs.OutputFileBaseName = "openapi_generated" + + return genericArgs, customArgs +} + +// AddFlags add the generator flags to the flag set. +func (c *CustomArgs) AddFlags(fs *pflag.FlagSet) { + fs.StringVarP(&c.ReportFilename, "report-filename", "r", c.ReportFilename, "Name of report file used by API linter to print API violations. Default \"-\" stands for standard output. NOTE that if valid filename other than \"-\" is specified, API linter won't return error on detected API violations. This allows further check of existing API violations without stopping the OpenAPI generation toolchain.") +} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + c, ok := genericArgs.CustomArgs.(*CustomArgs) + if !ok { + return fmt.Errorf("input arguments don't contain valid custom arguments") + } + if len(c.ReportFilename) == 0 { + return fmt.Errorf("report filename cannot be empty. specify a valid filename or use \"-\" for stdout") + } + if len(genericArgs.OutputFileBaseName) == 0 { + return fmt.Errorf("output file base name cannot be empty") + } + if len(genericArgs.OutputPackagePath) == 0 { + return fmt.Errorf("output package cannot be empty") + } + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/README.md b/vendor/k8s.io/kube-openapi/pkg/generators/README.md new file mode 100644 index 0000000000..72b4e5fb43 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/README.md @@ -0,0 +1,49 @@ +# Generate OpenAPI definitions + +- To generate definition for a specific type or package add "+k8s:openapi-gen=true" tag to the type/package comment lines. +- To exclude a type or a member from a tagged package/type, add "+k8s:openapi-gen=false" tag to the comment lines. + +# OpenAPI Extensions + +OpenAPI spec can have extensions on types. To define one or more extensions on a type or its member +add `+k8s:openapi-gen=x-kubernetes-$NAME:$VALUE` to the comment lines before type/member. A type/member can +have multiple extensions. The rest of the line in the comment will be used as $VALUE so there is no need to +escape or quote the value string. Extensions can be used to pass more information to client generators or +documentation generators. For example a type might have a friendly name to be displayed in documentation or +being used in a client's fluent interface. + +# Custom OpenAPI type definitions + +Custom types which otherwise don't map directly to OpenAPI can override their +OpenAPI definition by implementing a function named "OpenAPIDefinition" with +the following signature: + +```go + import openapi "k8s.io/kube-openapi/pkg/common" + + // ... + + type Time struct { + time.Time + } + + func (_ Time) OpenAPIDefinition() openapi.OpenAPIDefinition { + return openapi.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "date-time", + }, + }, + } + } +``` + +Alternatively, the type can avoid the "openapi" import by defining the following +methods. The following example produces the same OpenAPI definition as the +example above: + +```go + func (_ Time) OpenAPISchemaType() []string { return []string{"string"} } + func (_ Time) OpenAPISchemaFormat() string { return "date-time" } +``` diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go b/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go new file mode 100644 index 0000000000..26b951bc8b --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go @@ -0,0 +1,220 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "sort" + + "k8s.io/kube-openapi/pkg/generators/rules" + + "k8s.io/gengo/generator" + "k8s.io/gengo/types" + "k8s.io/klog" +) + +const apiViolationFileType = "api-violation" + +type apiViolationFile struct { + // Since our file actually is unrelated to the package structure, use a + // path that hasn't been mangled by the framework. + unmangledPath string +} + +func (a apiViolationFile) AssembleFile(f *generator.File, path string) error { + path = a.unmangledPath + klog.V(2).Infof("Assembling file %q", path) + if path == "-" { + _, err := io.Copy(os.Stdout, &f.Body) + return err + } + + output, err := os.Create(path) + if err != nil { + return err + } + defer output.Close() + _, err = io.Copy(output, &f.Body) + return err +} + +func (a apiViolationFile) VerifyFile(f *generator.File, path string) error { + if path == "-" { + // Nothing to verify against. + return nil + } + path = a.unmangledPath + + formatted := f.Body.Bytes() + existing, err := ioutil.ReadFile(path) + if err != nil { + return fmt.Errorf("unable to read file %q for comparison: %v", path, err) + } + if bytes.Compare(formatted, existing) == 0 { + return nil + } + + // Be nice and find the first place where they differ + // (Copied from gengo's default file type) + i := 0 + for i < len(formatted) && i < len(existing) && formatted[i] == existing[i] { + i++ + } + eDiff, fDiff := existing[i:], formatted[i:] + if len(eDiff) > 100 { + eDiff = eDiff[:100] + } + if len(fDiff) > 100 { + fDiff = fDiff[:100] + } + return fmt.Errorf("output for %q differs; first existing/expected diff: \n %q\n %q", path, string(eDiff), string(fDiff)) +} + +func newAPIViolationGen() *apiViolationGen { + return &apiViolationGen{ + linter: newAPILinter(), + } +} + +type apiViolationGen struct { + generator.DefaultGen + + linter *apiLinter +} + +func (v *apiViolationGen) FileType() string { return apiViolationFileType } +func (v *apiViolationGen) Filename() string { + return "this file is ignored by the file assembler" +} + +func (v *apiViolationGen) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + klog.V(5).Infof("validating API rules for type %v", t) + if err := v.linter.validate(t); err != nil { + return err + } + return nil +} + +// Finalize prints the API rule violations to report file (if specified from +// arguments) or stdout (default) +func (v *apiViolationGen) Finalize(c *generator.Context, w io.Writer) error { + // NOTE: we don't return error here because we assume that the report file will + // get evaluated afterwards to determine if error should be raised. For example, + // you can have make rules that compare the report file with existing known + // violations (whitelist) and determine no error if no change is detected. + v.linter.report(w) + return nil +} + +// apiLinter is the framework hosting multiple API rules and recording API rule +// violations +type apiLinter struct { + // API rules that implement APIRule interface and output API rule violations + rules []APIRule + violations []apiViolation +} + +// newAPILinter creates an apiLinter object with API rules in package rules. Please +// add APIRule here when new API rule is implemented. +func newAPILinter() *apiLinter { + return &apiLinter{ + rules: []APIRule{ + &rules.NamesMatch{}, + &rules.OmitEmptyMatchCase{}, + &rules.ListTypeMissing{}, + }, + } +} + +// apiViolation uniquely identifies single API rule violation +type apiViolation struct { + // Name of rule from APIRule.Name() + rule string + + packageName string + typeName string + + // Optional: name of field that violates API rule. Empty fieldName implies that + // the entire type violates the rule. + field string +} + +// apiViolations implements sort.Interface for []apiViolation based on the fields: rule, +// packageName, typeName and field. +type apiViolations []apiViolation + +func (a apiViolations) Len() int { return len(a) } +func (a apiViolations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a apiViolations) Less(i, j int) bool { + if a[i].rule != a[j].rule { + return a[i].rule < a[j].rule + } + if a[i].packageName != a[j].packageName { + return a[i].packageName < a[j].packageName + } + if a[i].typeName != a[j].typeName { + return a[i].typeName < a[j].typeName + } + return a[i].field < a[j].field +} + +// APIRule is the interface for validating API rule on Go types +type APIRule interface { + // Validate evaluates API rule on type t and returns a list of field names in + // the type that violate the rule. Empty field name [""] implies the entire + // type violates the rule. + Validate(t *types.Type) ([]string, error) + + // Name returns the name of APIRule + Name() string +} + +// validate runs all API rules on type t and records any API rule violation +func (l *apiLinter) validate(t *types.Type) error { + for _, r := range l.rules { + klog.V(5).Infof("validating API rule %v for type %v", r.Name(), t) + fields, err := r.Validate(t) + if err != nil { + return err + } + for _, field := range fields { + l.violations = append(l.violations, apiViolation{ + rule: r.Name(), + packageName: t.Name.Package, + typeName: t.Name.Name, + field: field, + }) + } + } + return nil +} + +// report prints any API rule violation to writer w and returns error if violation exists +func (l *apiLinter) report(w io.Writer) error { + sort.Sort(apiViolations(l.violations)) + for _, v := range l.violations { + fmt.Fprintf(w, "API rule violation: %s,%s,%s,%s\n", v.rule, v.packageName, v.typeName, v.field) + } + if len(l.violations) > 0 { + return fmt.Errorf("API rule violations exist") + } + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/config.go b/vendor/k8s.io/kube-openapi/pkg/generators/config.go new file mode 100644 index 0000000000..33cd9eb5a8 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/config.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "path/filepath" + + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + "k8s.io/klog" + + generatorargs "k8s.io/kube-openapi/cmd/openapi-gen/args" +) + +type identityNamer struct{} + +func (_ identityNamer) Name(t *types.Type) string { + return t.Name.String() +} + +var _ namer.Namer = identityNamer{} + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer("", nil), + "sorting_namer": identityNamer{}, + } +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "sorting_namer" +} + +func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + header := append([]byte(fmt.Sprintf("// +build !%s\n\n", arguments.GeneratedBuildTag)), boilerplate...) + header = append(header, []byte( + ` +// This file was autogenerated by openapi-gen. Do not edit it manually! + +`)...) + + reportPath := "-" + if customArgs, ok := arguments.CustomArgs.(*generatorargs.CustomArgs); ok { + reportPath = customArgs.ReportFilename + } + context.FileTypes[apiViolationFileType] = apiViolationFile{ + unmangledPath: reportPath, + } + + return generator.Packages{ + &generator.DefaultPackage{ + PackageName: filepath.Base(arguments.OutputPackagePath), + PackagePath: arguments.OutputPackagePath, + HeaderText: header, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + return []generator.Generator{ + newOpenAPIGen( + arguments.OutputFileBaseName, + arguments.OutputPackagePath, + ), + newAPIViolationGen(), + } + }, + FilterFunc: apiTypeFilterFunc, + }, + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/extension.go b/vendor/k8s.io/kube-openapi/pkg/generators/extension.go new file mode 100644 index 0000000000..af5d5be681 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/extension.go @@ -0,0 +1,188 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "sort" + "strings" + + "k8s.io/gengo/examples/set-gen/sets" + "k8s.io/gengo/types" +) + +const extensionPrefix = "x-kubernetes-" + +// extensionAttributes encapsulates common traits for particular extensions. +type extensionAttributes struct { + xName string + kind types.Kind + allowedValues sets.String + enforceArray bool +} + +// Extension tag to openapi extension attributes +var tagToExtension = map[string]extensionAttributes{ + "patchMergeKey": { + xName: "x-kubernetes-patch-merge-key", + kind: types.Slice, + }, + "patchStrategy": { + xName: "x-kubernetes-patch-strategy", + kind: types.Slice, + allowedValues: sets.NewString("merge", "retainKeys"), + }, + "listMapKey": { + xName: "x-kubernetes-list-map-keys", + kind: types.Slice, + enforceArray: true, + }, + "listType": { + xName: "x-kubernetes-list-type", + kind: types.Slice, + allowedValues: sets.NewString("atomic", "set", "map"), + }, +} + +// Extension encapsulates information necessary to generate an OpenAPI extension. +type extension struct { + idlTag string // Example: listType + xName string // Example: x-kubernetes-list-type + values []string // Example: [atomic] +} + +func (e extension) hasAllowedValues() bool { + return tagToExtension[e.idlTag].allowedValues.Len() > 0 +} + +func (e extension) allowedValues() sets.String { + return tagToExtension[e.idlTag].allowedValues +} + +func (e extension) hasKind() bool { + return len(tagToExtension[e.idlTag].kind) > 0 +} + +func (e extension) kind() types.Kind { + return tagToExtension[e.idlTag].kind +} + +func (e extension) validateAllowedValues() error { + // allowedValues not set means no restrictions on values. + if !e.hasAllowedValues() { + return nil + } + // Check for missing value. + if len(e.values) == 0 { + return fmt.Errorf("%s needs a value, none given.", e.idlTag) + } + // For each extension value, validate that it is allowed. + allowedValues := e.allowedValues() + if !allowedValues.HasAll(e.values...) { + return fmt.Errorf("%v not allowed for %s. Allowed values: %v", + e.values, e.idlTag, allowedValues.List()) + } + return nil +} + +func (e extension) validateType(kind types.Kind) error { + // If this extension class has no kind, then don't validate the type. + if !e.hasKind() { + return nil + } + if kind != e.kind() { + return fmt.Errorf("tag %s on type %v; only allowed on type %v", + e.idlTag, kind, e.kind()) + } + return nil +} + +func (e extension) hasMultipleValues() bool { + return len(e.values) > 1 +} + +func (e extension) isAlwaysArrayFormat() bool { + return tagToExtension[e.idlTag].enforceArray +} + +// Returns sorted list of map keys. Needed for deterministic testing. +func sortedMapKeys(m map[string][]string) []string { + keys := make([]string, len(m)) + i := 0 + for k := range m { + keys[i] = k + i++ + } + sort.Strings(keys) + return keys +} + +// Parses comments to return openapi extensions. Returns a list of +// extensions which parsed correctly, as well as a list of the +// parse errors. Validating extensions is performed separately. +// NOTE: Non-empty errors does not mean extensions is empty. +func parseExtensions(comments []string) ([]extension, []error) { + extensions := []extension{} + errors := []error{} + // First, generate extensions from "+k8s:openapi-gen=x-kubernetes-*" annotations. + values := getOpenAPITagValue(comments) + for _, val := range values { + // Example: x-kubernetes-member-tag:member_test + if strings.HasPrefix(val, extensionPrefix) { + parts := strings.SplitN(val, ":", 2) + if len(parts) != 2 { + errors = append(errors, fmt.Errorf("invalid extension value: %v", val)) + continue + } + e := extension{ + idlTag: tagName, // Example: k8s:openapi-gen + xName: parts[0], // Example: x-kubernetes-member-tag + values: []string{parts[1]}, // Example: member_test + } + extensions = append(extensions, e) + } + } + // Next, generate extensions from "idlTags" (e.g. +listType) + tagValues := types.ExtractCommentTags("+", comments) + for _, idlTag := range sortedMapKeys(tagValues) { + xAttrs, exists := tagToExtension[idlTag] + if !exists { + continue + } + values := tagValues[idlTag] + e := extension{ + idlTag: idlTag, // listType + xName: xAttrs.xName, // x-kubernetes-list-type + values: values, // [atomic] + } + extensions = append(extensions, e) + } + return extensions, errors +} + +func validateMemberExtensions(extensions []extension, m *types.Member) []error { + errors := []error{} + for _, e := range extensions { + if err := e.validateAllowedValues(); err != nil { + errors = append(errors, err) + } + if err := e.validateType(m.Type.Kind); err != nil { + errors = append(errors, err) + } + } + return errors +} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go new file mode 100644 index 0000000000..b8ec898c4d --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go @@ -0,0 +1,692 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "reflect" + "sort" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + openapi "k8s.io/kube-openapi/pkg/common" + + "k8s.io/klog" +) + +// This is the comment tag that carries parameters for open API generation. +const tagName = "k8s:openapi-gen" +const tagOptional = "optional" + +// Known values for the tag. +const ( + tagValueTrue = "true" + tagValueFalse = "false" +) + +// Used for temporary validation of patch struct tags. +// TODO: Remove patch struct tag validation because they we are now consuming OpenAPI on server. +var tempPatchTags = [...]string{ + "patchMergeKey", + "patchStrategy", +} + +func getOpenAPITagValue(comments []string) []string { + return types.ExtractCommentTags("+", comments)[tagName] +} + +func getSingleTagsValue(comments []string, tag string) (string, error) { + tags, ok := types.ExtractCommentTags("+", comments)[tag] + if !ok || len(tags) == 0 { + return "", nil + } + if len(tags) > 1 { + return "", fmt.Errorf("multiple values are not allowed for tag %s", tag) + } + return tags[0], nil +} + +func hasOpenAPITagValue(comments []string, value string) bool { + tagValues := getOpenAPITagValue(comments) + for _, val := range tagValues { + if val == value { + return true + } + } + return false +} + +// hasOptionalTag returns true if the member has +optional in its comments or +// omitempty in its json tags. +func hasOptionalTag(m *types.Member) bool { + hasOptionalCommentTag := types.ExtractCommentTags( + "+", m.CommentLines)[tagOptional] != nil + hasOptionalJsonTag := strings.Contains( + reflect.StructTag(m.Tags).Get("json"), "omitempty") + return hasOptionalCommentTag || hasOptionalJsonTag +} + +func apiTypeFilterFunc(c *generator.Context, t *types.Type) bool { + // There is a conflict between this codegen and codecgen, we should avoid types generated for codecgen + if strings.HasPrefix(t.Name.Name, "codecSelfer") { + return false + } + pkg := c.Universe.Package(t.Name.Package) + if hasOpenAPITagValue(pkg.Comments, tagValueTrue) { + return !hasOpenAPITagValue(t.CommentLines, tagValueFalse) + } + if hasOpenAPITagValue(t.CommentLines, tagValueTrue) { + return true + } + return false +} + +const ( + specPackagePath = "github.com/go-openapi/spec" + openAPICommonPackagePath = "k8s.io/kube-openapi/pkg/common" +) + +// openApiGen produces a file with auto-generated OpenAPI functions. +type openAPIGen struct { + generator.DefaultGen + // TargetPackage is the package that will get GetOpenAPIDefinitions function returns all open API definitions. + targetPackage string + imports namer.ImportTracker +} + +func newOpenAPIGen(sanitizedName string, targetPackage string) generator.Generator { + return &openAPIGen{ + DefaultGen: generator.DefaultGen{ + OptionalName: sanitizedName, + }, + imports: generator.NewImportTracker(), + targetPackage: targetPackage, + } +} + +const nameTmpl = "schema_$.type|private$" + +func (g *openAPIGen) Namers(c *generator.Context) namer.NameSystems { + // Have the raw namer for this file track what it imports. + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.targetPackage, g.imports), + "private": &namer.NameStrategy{ + Join: func(pre string, in []string, post string) string { + return strings.Join(in, "_") + }, + PrependPackageNames: 4, // enough to fully qualify from k8s.io/api/... + }, + } +} + +func (g *openAPIGen) isOtherPackage(pkg string) bool { + if pkg == g.targetPackage { + return false + } + if strings.HasSuffix(pkg, "\""+g.targetPackage+"\"") { + return false + } + return true +} + +func (g *openAPIGen) Imports(c *generator.Context) []string { + importLines := []string{} + for _, singleImport := range g.imports.ImportLines() { + importLines = append(importLines, singleImport) + } + return importLines +} + +func argsFromType(t *types.Type) generator.Args { + return generator.Args{ + "type": t, + "ReferenceCallback": types.Ref(openAPICommonPackagePath, "ReferenceCallback"), + "OpenAPIDefinition": types.Ref(openAPICommonPackagePath, "OpenAPIDefinition"), + "SpecSchemaType": types.Ref(specPackagePath, "Schema"), + } +} + +func (g *openAPIGen) Init(c *generator.Context, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + sw.Do("func GetOpenAPIDefinitions(ref $.ReferenceCallback|raw$) map[string]$.OpenAPIDefinition|raw$ {\n", argsFromType(nil)) + sw.Do("return map[string]$.OpenAPIDefinition|raw${\n", argsFromType(nil)) + + for _, t := range c.Order { + err := newOpenAPITypeWriter(sw, c).generateCall(t) + if err != nil { + return err + } + } + + sw.Do("}\n", nil) + sw.Do("}\n\n", nil) + + return sw.Error() +} + +func (g *openAPIGen) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + klog.V(5).Infof("generating for type %v", t) + sw := generator.NewSnippetWriter(w, c, "$", "$") + err := newOpenAPITypeWriter(sw, c).generate(t) + if err != nil { + return err + } + return sw.Error() +} + +func getJsonTags(m *types.Member) []string { + jsonTag := reflect.StructTag(m.Tags).Get("json") + if jsonTag == "" { + return []string{} + } + return strings.Split(jsonTag, ",") +} + +func getReferableName(m *types.Member) string { + jsonTags := getJsonTags(m) + if len(jsonTags) > 0 { + if jsonTags[0] == "-" { + return "" + } else { + return jsonTags[0] + } + } else { + return m.Name + } +} + +func shouldInlineMembers(m *types.Member) bool { + jsonTags := getJsonTags(m) + return len(jsonTags) > 1 && jsonTags[1] == "inline" +} + +type openAPITypeWriter struct { + *generator.SnippetWriter + context *generator.Context + refTypes map[string]*types.Type + GetDefinitionInterface *types.Type +} + +func newOpenAPITypeWriter(sw *generator.SnippetWriter, c *generator.Context) openAPITypeWriter { + return openAPITypeWriter{ + SnippetWriter: sw, + context: c, + refTypes: map[string]*types.Type{}, + } +} + +func methodReturnsValue(mt *types.Type, pkg, name string) bool { + if len(mt.Signature.Parameters) != 0 || len(mt.Signature.Results) != 1 { + return false + } + r := mt.Signature.Results[0] + return r.Name.Name == name && r.Name.Package == pkg +} + +func hasOpenAPIV3DefinitionMethod(t *types.Type) bool { + for mn, mt := range t.Methods { + if mn != "OpenAPIV3Definition" { + continue + } + return methodReturnsValue(mt, openAPICommonPackagePath, "OpenAPIDefinition") + } + return false +} + +func hasOpenAPIDefinitionMethod(t *types.Type) bool { + for mn, mt := range t.Methods { + if mn != "OpenAPIDefinition" { + continue + } + return methodReturnsValue(mt, openAPICommonPackagePath, "OpenAPIDefinition") + } + return false +} + +func hasOpenAPIDefinitionMethods(t *types.Type) bool { + var hasSchemaTypeMethod, hasOpenAPISchemaFormat bool + for mn, mt := range t.Methods { + switch mn { + case "OpenAPISchemaType": + hasSchemaTypeMethod = methodReturnsValue(mt, "", "[]string") + case "OpenAPISchemaFormat": + hasOpenAPISchemaFormat = methodReturnsValue(mt, "", "string") + } + } + return hasSchemaTypeMethod && hasOpenAPISchemaFormat +} + +// typeShortName returns short package name (e.g. the name x appears in package x definition) dot type name. +func typeShortName(t *types.Type) string { + return filepath.Base(t.Name.Package) + "." + t.Name.Name +} + +func (g openAPITypeWriter) generateMembers(t *types.Type, required []string) ([]string, error) { + var err error + for _, m := range t.Members { + if hasOpenAPITagValue(m.CommentLines, tagValueFalse) { + continue + } + if shouldInlineMembers(&m) { + required, err = g.generateMembers(m.Type, required) + if err != nil { + return required, err + } + continue + } + name := getReferableName(&m) + if name == "" { + continue + } + if !hasOptionalTag(&m) { + required = append(required, name) + } + if err = g.generateProperty(&m, t); err != nil { + klog.Errorf("Error when generating: %v, %v\n", name, m) + return required, err + } + } + return required, nil +} + +func (g openAPITypeWriter) generateCall(t *types.Type) error { + // Only generate for struct type and ignore the rest + switch t.Kind { + case types.Struct: + args := argsFromType(t) + g.Do("\"$.$\": ", t.Name) + + hasV2Definition := hasOpenAPIDefinitionMethod(t) + hasV2DefinitionTypeAndFormat := hasOpenAPIDefinitionMethods(t) + hasV3Definition := hasOpenAPIV3DefinitionMethod(t) + + switch { + case hasV2DefinitionTypeAndFormat: + g.Do(nameTmpl+"(ref),\n", args) + case hasV2Definition && hasV3Definition: + g.Do("common.EmbedOpenAPIDefinitionIntoV2Extension($.type|raw${}.OpenAPIV3Definition(), $.type|raw${}.OpenAPIDefinition()),\n", args) + case hasV2Definition: + g.Do("$.type|raw${}.OpenAPIDefinition(),\n", args) + case hasV3Definition: + g.Do("$.type|raw${}.OpenAPIV3Definition(),\n", args) + default: + g.Do(nameTmpl+"(ref),\n", args) + } + } + return g.Error() +} + +func (g openAPITypeWriter) generate(t *types.Type) error { + // Only generate for struct type and ignore the rest + switch t.Kind { + case types.Struct: + hasV2Definition := hasOpenAPIDefinitionMethod(t) + hasV2DefinitionTypeAndFormat := hasOpenAPIDefinitionMethods(t) + hasV3Definition := hasOpenAPIV3DefinitionMethod(t) + + if hasV2Definition || (hasV3Definition && !hasV2DefinitionTypeAndFormat) { + // already invoked directly + return nil + } + + args := argsFromType(t) + g.Do("func "+nameTmpl+"(ref $.ReferenceCallback|raw$) $.OpenAPIDefinition|raw$ {\n", args) + switch { + case hasV2DefinitionTypeAndFormat && hasV3Definition: + g.Do("return common.EmbedOpenAPIDefinitionIntoV2Extension($.type|raw${}.OpenAPIV3Definition(), $.OpenAPIDefinition|raw${\n"+ + "Schema: spec.Schema{\n"+ + "SchemaProps: spec.SchemaProps{\n", args) + g.generateDescription(t.CommentLines) + g.Do("Type:$.type|raw${}.OpenAPISchemaType(),\n"+ + "Format:$.type|raw${}.OpenAPISchemaFormat(),\n"+ + "},\n"+ + "},\n"+ + "})\n}\n\n", args) + return nil + case hasV2DefinitionTypeAndFormat: + g.Do("return $.OpenAPIDefinition|raw${\n"+ + "Schema: spec.Schema{\n"+ + "SchemaProps: spec.SchemaProps{\n", args) + g.generateDescription(t.CommentLines) + g.Do("Type:$.type|raw${}.OpenAPISchemaType(),\n"+ + "Format:$.type|raw${}.OpenAPISchemaFormat(),\n"+ + "},\n"+ + "},\n"+ + "}\n}\n\n", args) + return nil + } + g.Do("return $.OpenAPIDefinition|raw${\nSchema: spec.Schema{\nSchemaProps: spec.SchemaProps{\n", args) + g.generateDescription(t.CommentLines) + g.Do("Type: []string{\"object\"},\n", nil) + + // write members into a temporary buffer, in order to postpone writing out the Properties field. We only do + // that if it is not empty. + propertiesBuf := bytes.Buffer{} + bsw := g + bsw.SnippetWriter = generator.NewSnippetWriter(&propertiesBuf, g.context, "$", "$") + required, err := bsw.generateMembers(t, []string{}) + if err != nil { + return err + } + if propertiesBuf.Len() > 0 { + g.Do("Properties: map[string]$.SpecSchemaType|raw${\n", args) + g.Do(strings.Replace(propertiesBuf.String(), "$", "$\"$\"$", -1), nil) // escape $ (used as delimiter of the templates) + g.Do("},\n", nil) + } + + if len(required) > 0 { + g.Do("Required: []string{\"$.$\"},\n", strings.Join(required, "\",\"")) + } + g.Do("},\n", nil) + if err := g.generateStructExtensions(t); err != nil { + return err + } + g.Do("},\n", nil) + + // Map order is undefined, sort them or we may get a different file generated each time. + keys := []string{} + for k := range g.refTypes { + keys = append(keys, k) + } + sort.Strings(keys) + deps := []string{} + for _, k := range keys { + v := g.refTypes[k] + if t, _ := openapi.GetOpenAPITypeFormat(v.String()); t != "" { + // This is a known type, we do not need a reference to it + // Will eliminate special case of time.Time + continue + } + deps = append(deps, k) + } + if len(deps) > 0 { + g.Do("Dependencies: []string{\n", args) + for _, k := range deps { + g.Do("\"$.$\",", k) + } + g.Do("},\n", nil) + } + g.Do("}\n}\n\n", nil) + } + return nil +} + +func (g openAPITypeWriter) generateStructExtensions(t *types.Type) error { + extensions, errors := parseExtensions(t.CommentLines) + // Initially, we will only log struct extension errors. + if len(errors) > 0 { + for _, e := range errors { + klog.Errorf("[%s]: %s\n", t.String(), e) + } + } + unions, errors := parseUnions(t) + if len(errors) > 0 { + for _, e := range errors { + klog.Errorf("[%s]: %s\n", t.String(), e) + } + } + + // TODO(seans3): Validate struct extensions here. + g.emitExtensions(extensions, unions) + return nil +} + +func (g openAPITypeWriter) generateMemberExtensions(m *types.Member, parent *types.Type) error { + extensions, parseErrors := parseExtensions(m.CommentLines) + validationErrors := validateMemberExtensions(extensions, m) + errors := append(parseErrors, validationErrors...) + // Initially, we will only log member extension errors. + if len(errors) > 0 { + errorPrefix := fmt.Sprintf("[%s] %s:", parent.String(), m.String()) + for _, e := range errors { + klog.V(2).Infof("%s %s\n", errorPrefix, e) + } + } + g.emitExtensions(extensions, nil) + return nil +} + +func (g openAPITypeWriter) emitExtensions(extensions []extension, unions []union) { + // If any extensions exist, then emit code to create them. + if len(extensions) == 0 && len(unions) == 0 { + return + } + g.Do("VendorExtensible: spec.VendorExtensible{\nExtensions: spec.Extensions{\n", nil) + for _, extension := range extensions { + g.Do("\"$.$\": ", extension.xName) + if extension.hasMultipleValues() || extension.isAlwaysArrayFormat() { + g.Do("[]interface{}{\n", nil) + } + for _, value := range extension.values { + g.Do("\"$.$\",\n", value) + } + if extension.hasMultipleValues() || extension.isAlwaysArrayFormat() { + g.Do("},\n", nil) + } + } + if len(unions) > 0 { + g.Do("\"x-kubernetes-unions\": []interface{}{\n", nil) + for _, u := range unions { + u.emit(g) + } + g.Do("},\n", nil) + } + g.Do("},\n},\n", nil) +} + +// TODO(#44005): Move this validation outside of this generator (probably to policy verifier) +func (g openAPITypeWriter) validatePatchTags(m *types.Member, parent *types.Type) error { + // TODO: Remove patch struct tag validation because they we are now consuming OpenAPI on server. + for _, tagKey := range tempPatchTags { + structTagValue := reflect.StructTag(m.Tags).Get(tagKey) + commentTagValue, err := getSingleTagsValue(m.CommentLines, tagKey) + if err != nil { + return err + } + if structTagValue != commentTagValue { + return fmt.Errorf("Tags in comment and struct should match for member (%s) of (%s)", + m.Name, parent.Name.String()) + } + } + return nil +} + +func (g openAPITypeWriter) generateDescription(CommentLines []string) { + var buffer bytes.Buffer + delPrevChar := func() { + if buffer.Len() > 0 { + buffer.Truncate(buffer.Len() - 1) // Delete the last " " or "\n" + } + } + + for _, line := range CommentLines { + // Ignore all lines after --- + if line == "---" { + break + } + line = strings.TrimRight(line, " ") + leading := strings.TrimLeft(line, " ") + switch { + case len(line) == 0: // Keep paragraphs + delPrevChar() + buffer.WriteString("\n\n") + case strings.HasPrefix(leading, "TODO"): // Ignore one line TODOs + case strings.HasPrefix(leading, "+"): // Ignore instructions to go2idl + default: + if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") { + delPrevChar() + line = "\n" + line + "\n" // Replace it with newline. This is useful when we have a line with: "Example:\n\tJSON-someting..." + } else { + line += " " + } + buffer.WriteString(line) + } + } + + postDoc := strings.TrimRight(buffer.String(), "\n") + postDoc = strings.Replace(postDoc, "\\\"", "\"", -1) // replace user's \" to " + postDoc = strings.Replace(postDoc, "\"", "\\\"", -1) // Escape " + postDoc = strings.Replace(postDoc, "\n", "\\n", -1) + postDoc = strings.Replace(postDoc, "\t", "\\t", -1) + postDoc = strings.Trim(postDoc, " ") + if postDoc != "" { + g.Do("Description: \"$.$\",\n", postDoc) + } +} + +func (g openAPITypeWriter) generateProperty(m *types.Member, parent *types.Type) error { + name := getReferableName(m) + if name == "" { + return nil + } + if err := g.validatePatchTags(m, parent); err != nil { + return err + } + g.Do("\"$.$\": {\n", name) + if err := g.generateMemberExtensions(m, parent); err != nil { + return err + } + g.Do("SchemaProps: spec.SchemaProps{\n", nil) + g.generateDescription(m.CommentLines) + jsonTags := getJsonTags(m) + if len(jsonTags) > 1 && jsonTags[1] == "string" { + g.generateSimpleProperty("string", "") + g.Do("},\n},\n", nil) + return nil + } + t := resolveAliasAndPtrType(m.Type) + // If we can get a openAPI type and format for this type, we consider it to be simple property + typeString, format := openapi.GetOpenAPITypeFormat(t.String()) + if typeString != "" { + g.generateSimpleProperty(typeString, format) + g.Do("},\n},\n", nil) + return nil + } + switch t.Kind { + case types.Builtin: + return fmt.Errorf("please add type %v to getOpenAPITypeFormat function", t) + case types.Map: + if err := g.generateMapProperty(t); err != nil { + return err + } + case types.Slice, types.Array: + if err := g.generateSliceProperty(t); err != nil { + return err + } + case types.Struct, types.Interface: + g.generateReferenceProperty(t) + default: + return fmt.Errorf("cannot generate spec for type %v", t) + } + g.Do("},\n},\n", nil) + return g.Error() +} + +func (g openAPITypeWriter) generateSimpleProperty(typeString, format string) { + g.Do("Type: []string{\"$.$\"},\n", typeString) + g.Do("Format: \"$.$\",\n", format) +} + +func (g openAPITypeWriter) generateReferenceProperty(t *types.Type) { + g.refTypes[t.Name.String()] = t + g.Do("Ref: ref(\"$.$\"),\n", t.Name.String()) +} + +func resolveAliasAndPtrType(t *types.Type) *types.Type { + var prev *types.Type + for prev != t { + prev = t + if t.Kind == types.Alias { + t = t.Underlying + } + if t.Kind == types.Pointer { + t = t.Elem + } + } + return t +} + +func (g openAPITypeWriter) generateMapProperty(t *types.Type) error { + keyType := resolveAliasAndPtrType(t.Key) + elemType := resolveAliasAndPtrType(t.Elem) + + // According to OpenAPI examples, only map from string is supported + if keyType.Name.Name != "string" { + return fmt.Errorf("map with non-string keys are not supported by OpenAPI in %v", t) + } + g.Do("Type: []string{\"object\"},\n", nil) + g.Do("AdditionalProperties: &spec.SchemaOrBool{\nAllows: true,\nSchema: &spec.Schema{\nSchemaProps: spec.SchemaProps{\n", nil) + typeString, format := openapi.GetOpenAPITypeFormat(elemType.String()) + if typeString != "" { + g.generateSimpleProperty(typeString, format) + g.Do("},\n},\n},\n", nil) + return nil + } + switch elemType.Kind { + case types.Builtin: + return fmt.Errorf("please add type %v to getOpenAPITypeFormat function", elemType) + case types.Struct: + g.generateReferenceProperty(elemType) + case types.Slice, types.Array: + if err := g.generateSliceProperty(elemType); err != nil { + return err + } + case types.Map: + if err := g.generateMapProperty(elemType); err != nil { + return err + } + default: + return fmt.Errorf("map Element kind %v is not supported in %v", elemType.Kind, t.Name) + } + g.Do("},\n},\n},\n", nil) + return nil +} + +func (g openAPITypeWriter) generateSliceProperty(t *types.Type) error { + elemType := resolveAliasAndPtrType(t.Elem) + g.Do("Type: []string{\"array\"},\n", nil) + g.Do("Items: &spec.SchemaOrArray{\nSchema: &spec.Schema{\nSchemaProps: spec.SchemaProps{\n", nil) + typeString, format := openapi.GetOpenAPITypeFormat(elemType.String()) + if typeString != "" { + g.generateSimpleProperty(typeString, format) + g.Do("},\n},\n},\n", nil) + return nil + } + switch elemType.Kind { + case types.Builtin: + return fmt.Errorf("please add type %v to getOpenAPITypeFormat function", elemType) + case types.Struct: + g.generateReferenceProperty(elemType) + case types.Slice, types.Array: + if err := g.generateSliceProperty(elemType); err != nil { + return err + } + case types.Map: + if err := g.generateMapProperty(elemType); err != nil { + return err + } + default: + return fmt.Errorf("slice Element kind %v is not supported in %v", elemType.Kind, t) + } + g.Do("},\n},\n},\n", nil) + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS b/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS new file mode 100644 index 0000000000..235bc545b8 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS @@ -0,0 +1,4 @@ +reviewers: +- roycaihw +approvers: +- roycaihw diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/doc.go b/vendor/k8s.io/kube-openapi/pkg/generators/rules/doc.go new file mode 100644 index 0000000000..384a44dca0 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rules contains API rules that are enforced in OpenAPI spec generation +// as part of the machinery. Files under this package implement APIRule interface +// which evaluates Go type and produces list of API rule violations. +// +// Implementations of APIRule should be added to API linter under openAPIGen code- +// generator to get integrated in the generation process. +package rules diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/idl_tag.go b/vendor/k8s.io/kube-openapi/pkg/generators/rules/idl_tag.go new file mode 100644 index 0000000000..7c5ebb30f6 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/idl_tag.go @@ -0,0 +1,36 @@ +package rules + +import ( + "k8s.io/gengo/types" +) + +const ListTypeIDLTag = "listType" + +// ListTypeMissing implements APIRule interface. +// A list type is required for inlined list. +type ListTypeMissing struct{} + +// Name returns the name of APIRule +func (l *ListTypeMissing) Name() string { + return "list_type_missing" +} + +// Validate evaluates API rule on type t and returns a list of field names in +// the type that violate the rule. Empty field name [""] implies the entire +// type violates the rule. +func (l *ListTypeMissing) Validate(t *types.Type) ([]string, error) { + fields := make([]string, 0) + + switch t.Kind { + case types.Struct: + for _, m := range t.Members { + if m.Type.Kind == types.Slice && types.ExtractCommentTags("+", m.CommentLines)[ListTypeIDLTag] == nil { + fields = append(fields, m.Name) + continue + } + } + } + + return fields, nil + +} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go b/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go new file mode 100644 index 0000000000..3a71ff178b --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go @@ -0,0 +1,172 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "reflect" + "strings" + + "k8s.io/kube-openapi/pkg/util/sets" + + "k8s.io/gengo/types" +) + +var ( + // Blacklist of JSON tags that should skip match evaluation + jsonTagBlacklist = sets.NewString( + // Omitted field is ignored by the package + "-", + ) + + // Blacklist of JSON names that should skip match evaluation + jsonNameBlacklist = sets.NewString( + // Empty name is used for inline struct field (e.g. metav1.TypeMeta) + "", + // Special case for object and list meta + "metadata", + ) + + // List of substrings that aren't allowed in Go name and JSON name + disallowedNameSubstrings = sets.NewString( + // Underscore is not allowed in either name + "_", + // Dash is not allowed in either name. Note that since dash is a valid JSON tag, this should be checked + // after JSON tag blacklist check. + "-", + ) +) + +/* +NamesMatch implements APIRule interface. +Go field names must be CamelCase. JSON field names must be camelCase. Other than capitalization of the +initial letter, the two should almost always match. No underscores nor dashes in either. +This rule verifies the convention "Other than capitalization of the initial letter, the two should almost always match." +Examples (also in unit test): + Go name | JSON name | match + podSpec false + PodSpec podSpec true + PodSpec PodSpec false + podSpec podSpec false + PodSpec spec false + Spec podSpec false + JSONSpec jsonSpec true + JSONSpec jsonspec false + HTTPJSONSpec httpJSONSpec true +NOTE: this validator cannot tell two sequential all-capital words from one word, therefore the case below +is also considered matched. + HTTPJSONSpec httpjsonSpec true +NOTE: JSON names in jsonNameBlacklist should skip evaluation + true + podSpec true + podSpec - true + podSpec metadata true +*/ +type NamesMatch struct{} + +// Name returns the name of APIRule +func (n *NamesMatch) Name() string { + return "names_match" +} + +// Validate evaluates API rule on type t and returns a list of field names in +// the type that violate the rule. Empty field name [""] implies the entire +// type violates the rule. +func (n *NamesMatch) Validate(t *types.Type) ([]string, error) { + fields := make([]string, 0) + + // Only validate struct type and ignore the rest + switch t.Kind { + case types.Struct: + for _, m := range t.Members { + goName := m.Name + jsonTag, ok := reflect.StructTag(m.Tags).Lookup("json") + // Distinguish empty JSON tag and missing JSON tag. Empty JSON tag / name is + // allowed (in JSON name blacklist) but missing JSON tag is invalid. + if !ok { + fields = append(fields, goName) + continue + } + if jsonTagBlacklist.Has(jsonTag) { + continue + } + jsonName := strings.Split(jsonTag, ",")[0] + if !namesMatch(goName, jsonName) { + fields = append(fields, goName) + } + } + } + return fields, nil +} + +// namesMatch evaluates if goName and jsonName match the API rule +// TODO: Use an off-the-shelf CamelCase solution instead of implementing this logic. The following existing +// packages have been tried out: +// github.com/markbates/inflect +// github.com/segmentio/go-camelcase +// github.com/iancoleman/strcase +// github.com/fatih/camelcase +// Please see https://github.com/kubernetes/kube-openapi/pull/83#issuecomment-400842314 for more details +// about why they don't satisfy our need. What we need can be a function that detects an acronym at the +// beginning of a string. +func namesMatch(goName, jsonName string) bool { + if jsonNameBlacklist.Has(jsonName) { + return true + } + if !isAllowedName(goName) || !isAllowedName(jsonName) { + return false + } + if strings.ToLower(goName) != strings.ToLower(jsonName) { + return false + } + // Go field names must be CamelCase. JSON field names must be camelCase. + if !isCapital(goName[0]) || isCapital(jsonName[0]) { + return false + } + for i := 0; i < len(goName); i++ { + if goName[i] == jsonName[i] { + // goName[0:i-1] is uppercase and jsonName[0:i-1] is lowercase, goName[i:] + // and jsonName[i:] should match; + // goName[i] should be lowercase if i is equal to 1, e.g.: + // goName | jsonName + // PodSpec podSpec + // or uppercase if i is greater than 1, e.g.: + // goname | jsonName + // JSONSpec jsonSpec + // This is to rule out cases like: + // goname | jsonName + // JSONSpec jsonspec + return goName[i:] == jsonName[i:] && (i == 1 || isCapital(goName[i])) + } + } + return true +} + +// isCaptical returns true if one character is capital +func isCapital(b byte) bool { + return b >= 'A' && b <= 'Z' +} + +// isAllowedName checks the list of disallowedNameSubstrings and returns true if name doesn't contain +// any disallowed substring. +func isAllowedName(name string) bool { + for _, substr := range disallowedNameSubstrings.UnsortedList() { + if strings.Contains(name, substr) { + return false + } + } + return true +} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go b/vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go new file mode 100644 index 0000000000..dd37ad8a57 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go @@ -0,0 +1,64 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "reflect" + "strings" + + "k8s.io/gengo/types" +) + +// OmitEmptyMatchCase implements APIRule interface. +// "omitempty" must appear verbatim (no case variants). +type OmitEmptyMatchCase struct{} + +func (n *OmitEmptyMatchCase) Name() string { + return "omitempty_match_case" +} + +func (n *OmitEmptyMatchCase) Validate(t *types.Type) ([]string, error) { + fields := make([]string, 0) + + // Only validate struct type and ignore the rest + switch t.Kind { + case types.Struct: + for _, m := range t.Members { + goName := m.Name + jsonTag, ok := reflect.StructTag(m.Tags).Lookup("json") + if !ok { + continue + } + + parts := strings.Split(jsonTag, ",") + if len(parts) < 2 { + // no tags other than name + continue + } + if parts[0] == "-" { + // not serialized + continue + } + for _, part := range parts[1:] { + if strings.EqualFold(part, "omitempty") && part != "omitempty" { + fields = append(fields, goName) + } + } + } + } + return fields, nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/union.go b/vendor/k8s.io/kube-openapi/pkg/generators/union.go new file mode 100644 index 0000000000..a0281fe470 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/union.go @@ -0,0 +1,207 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "sort" + + "k8s.io/gengo/types" +) + +const tagUnionMember = "union" +const tagUnionDeprecated = "unionDeprecated" +const tagUnionDiscriminator = "unionDiscriminator" + +type union struct { + discriminator string + fieldsToDiscriminated map[string]string +} + +// emit prints the union, can be called on a nil union (emits nothing) +func (u *union) emit(g openAPITypeWriter) { + if u == nil { + return + } + g.Do("map[string]interface{}{\n", nil) + if u.discriminator != "" { + g.Do("\"discriminator\": \"$.$\",\n", u.discriminator) + } + g.Do("\"fields-to-discriminateBy\": map[string]interface{}{\n", nil) + keys := []string{} + for field := range u.fieldsToDiscriminated { + keys = append(keys, field) + } + sort.Strings(keys) + for _, field := range keys { + g.Do("\"$.$\": ", field) + g.Do("\"$.$\",\n", u.fieldsToDiscriminated[field]) + } + g.Do("},\n", nil) + g.Do("},\n", nil) +} + +// Sets the discriminator if it's not set yet, otherwise return an error +func (u *union) setDiscriminator(value string) []error { + errors := []error{} + if u.discriminator != "" { + errors = append(errors, fmt.Errorf("at least two discriminators found: %v and %v", value, u.discriminator)) + } + u.discriminator = value + return errors +} + +// Add a new member to the union +func (u *union) addMember(jsonName, variableName string) { + if _, ok := u.fieldsToDiscriminated[jsonName]; ok { + panic(fmt.Errorf("same field (%v) found multiple times", jsonName)) + } + u.fieldsToDiscriminated[jsonName] = variableName +} + +// Makes sure that the union is valid, specifically looking for re-used discriminated +func (u *union) isValid() []error { + errors := []error{} + // Case 1: discriminator but no fields + if u.discriminator != "" && len(u.fieldsToDiscriminated) == 0 { + errors = append(errors, fmt.Errorf("discriminator set with no fields in union")) + } + // Case 2: two fields have the same discriminated value + discriminated := map[string]struct{}{} + for _, d := range u.fieldsToDiscriminated { + if _, ok := discriminated[d]; ok { + errors = append(errors, fmt.Errorf("discriminated value is used twice: %v", d)) + } + discriminated[d] = struct{}{} + } + // Case 3: a field is both discriminator AND part of the union + if u.discriminator != "" { + if _, ok := u.fieldsToDiscriminated[u.discriminator]; ok { + errors = append(errors, fmt.Errorf("%v can't be both discriminator and part of the union", u.discriminator)) + } + } + return errors +} + +// Find unions either directly on the members (or inlined members, not +// going across types) or on the type itself, or on embedded types. +func parseUnions(t *types.Type) ([]union, []error) { + errors := []error{} + unions := []union{} + su, err := parseUnionStruct(t) + if su != nil { + unions = append(unions, *su) + } + errors = append(errors, err...) + eu, err := parseEmbeddedUnion(t) + unions = append(unions, eu...) + errors = append(errors, err...) + mu, err := parseUnionMembers(t) + if mu != nil { + unions = append(unions, *mu) + } + errors = append(errors, err...) + return unions, errors +} + +// Find unions in embedded types, unions shouldn't go across types. +func parseEmbeddedUnion(t *types.Type) ([]union, []error) { + errors := []error{} + unions := []union{} + for _, m := range t.Members { + if hasOpenAPITagValue(m.CommentLines, tagValueFalse) { + continue + } + if !shouldInlineMembers(&m) { + continue + } + u, err := parseUnions(m.Type) + unions = append(unions, u...) + errors = append(errors, err...) + } + return unions, errors +} + +// Look for union tag on a struct, and then include all the fields +// (except the discriminator if there is one). The struct shouldn't have +// embedded types. +func parseUnionStruct(t *types.Type) (*union, []error) { + errors := []error{} + if types.ExtractCommentTags("+", t.CommentLines)[tagUnionMember] == nil { + return nil, nil + } + + u := &union{fieldsToDiscriminated: map[string]string{}} + + for _, m := range t.Members { + jsonName := getReferableName(&m) + if jsonName == "" { + continue + } + if shouldInlineMembers(&m) { + errors = append(errors, fmt.Errorf("union structures can't have embedded fields: %v.%v", t.Name, m.Name)) + continue + } + if types.ExtractCommentTags("+", m.CommentLines)[tagUnionDeprecated] != nil { + errors = append(errors, fmt.Errorf("union struct can't have unionDeprecated members: %v.%v", t.Name, m.Name)) + continue + } + if types.ExtractCommentTags("+", m.CommentLines)[tagUnionDiscriminator] != nil { + errors = append(errors, u.setDiscriminator(jsonName)...) + } else { + if !hasOptionalTag(&m) { + errors = append(errors, fmt.Errorf("union members must be optional: %v.%v", t.Name, m.Name)) + } + u.addMember(jsonName, m.Name) + } + } + + return u, errors +} + +// Find unions specifically on members. +func parseUnionMembers(t *types.Type) (*union, []error) { + errors := []error{} + u := &union{fieldsToDiscriminated: map[string]string{}} + + for _, m := range t.Members { + jsonName := getReferableName(&m) + if jsonName == "" { + continue + } + if shouldInlineMembers(&m) { + continue + } + if types.ExtractCommentTags("+", m.CommentLines)[tagUnionDiscriminator] != nil { + errors = append(errors, u.setDiscriminator(jsonName)...) + } + if types.ExtractCommentTags("+", m.CommentLines)[tagUnionMember] != nil { + errors = append(errors, fmt.Errorf("union tag is not accepted on struct members: %v.%v", t.Name, m.Name)) + continue + } + if types.ExtractCommentTags("+", m.CommentLines)[tagUnionDeprecated] != nil { + if !hasOptionalTag(&m) { + errors = append(errors, fmt.Errorf("union members must be optional: %v.%v", t.Name, m.Name)) + } + u.addMember(jsonName, m.Name) + } + } + if len(u.fieldsToDiscriminated) == 0 { + return nil, nil + } + return u, append(errors, u.isValid()...) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/sets/empty.go b/vendor/k8s.io/kube-openapi/pkg/util/sets/empty.go new file mode 100644 index 0000000000..13303ea890 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/sets/empty.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +// NOTE: This file is copied from k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go +// because in Kubernetes we don't allowed vendor code to import staging code. See +// https://github.com/kubernetes/kube-openapi/pull/90 for more details. + +package sets + +// Empty is public since it is used by some internal API objects for conversions between external +// string arrays and internal sets, and conversion logic requires public types today. +type Empty struct{} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/sets/string.go b/vendor/k8s.io/kube-openapi/pkg/util/sets/string.go new file mode 100644 index 0000000000..53f2bc12aa --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/sets/string.go @@ -0,0 +1,207 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +// NOTE: This file is copied from k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/sets/string.go +// because in Kubernetes we don't allowed vendor code to import staging code. See +// https://github.com/kubernetes/kube-openapi/pull/90 for more details. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. +type String map[string]Empty + +// NewString creates a String from a list of values. +func NewString(items ...string) String { + ss := String{} + ss.Insert(items...) + return ss +} + +// StringKeySet creates a String from a keys of a map[string](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func StringKeySet(theMap interface{}) String { + v := reflect.ValueOf(theMap) + ret := String{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(string)) + } + return ret +} + +// Insert adds items to the set. +func (s String) Insert(items ...string) { + for _, item := range items { + s[item] = Empty{} + } +} + +// Delete removes all items from the set. +func (s String) Delete(items ...string) { + for _, item := range items { + delete(s, item) + } +} + +// Has returns true if and only if item is contained in the set. +func (s String) Has(item string) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s String) HasAll(items ...string) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s String) HasAny(items ...string) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s String) Difference(s2 String) String { + result := NewString() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 String) Union(s2 String) String { + result := NewString() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 String) Intersection(s2 String) String { + var walk, other String + result := NewString() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 String) IsSuperset(s2 String) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 String) Equal(s2 String) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfString []string + +func (s sortableSliceOfString) Len() int { return len(s) } +func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) } +func (s sortableSliceOfString) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted string slice. +func (s String) List() []string { + res := make(sortableSliceOfString, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []string(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s String) UnsortedList() []string { + res := make([]string, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s String) PopAny() (string, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue string + return zeroValue, false +} + +// Len returns the size of the set. +func (s String) Len() int { + return len(s) +} + +func lessString(lhs, rhs string) bool { + return lhs < rhs +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 598c8bd91c..c2049add48 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -456,6 +456,7 @@ k8s.io/apimachinery/third_party/forked/golang/reflect k8s.io/client-go/discovery k8s.io/client-go/discovery/cached k8s.io/client-go/discovery/cached/memory +k8s.io/client-go/discovery/fake k8s.io/client-go/dynamic k8s.io/client-go/informers k8s.io/client-go/informers/admissionregistration @@ -605,6 +606,7 @@ k8s.io/client-go/plugin/pkg/client/auth/openstack k8s.io/client-go/rest k8s.io/client-go/rest/watch k8s.io/client-go/restmapper +k8s.io/client-go/testing k8s.io/client-go/third_party/forked/golang/template k8s.io/client-go/tools/auth k8s.io/client-go/tools/cache