StatProfilerHTML.jl report
Generated on Mon, 01 Apr 2024 20:34:29
File source code
Line Exclusive Inclusive Code
1 # This file is a part of Julia. License is MIT: https://julialang.org/license
2
3 ## linalg.jl: Some generic Linear Algebra definitions
4
5 # Elements of `out` may not be defined (e.g., for `BigFloat`). To make
6 # `mul!(out, A, B)` work for such cases, `out .*ₛ beta` short-circuits
7 # `out * beta`. Using `broadcasted` to avoid the multiplication
8 # inside this function.
9 function *ₛ end
10 Broadcast.broadcasted(::typeof(*ₛ), out, beta) =
11 iszero(beta::Number) ? false : broadcasted(*, out, beta)
12
13 """
14 MulAddMul(alpha, beta)
15
16 A callable for operating short-circuiting version of `x * alpha + y * beta`.
17
18 # Examples
19 ```jldoctest
20 julia> using LinearAlgebra: MulAddMul
21
22 julia> _add = MulAddMul(1, 0);
23
24 julia> _add(123, nothing)
25 123
26
27 julia> MulAddMul(12, 34)(56, 78) == 56 * 12 + 78 * 34
28 true
29 ```
30 """
31 struct MulAddMul{ais1, bis0, TA, TB}
32 alpha::TA
33 beta::TB
34 end
35
36 @inline function MulAddMul(alpha::TA, beta::TB) where {TA,TB}
37 if isone(alpha)
38 if iszero(beta)
39 return MulAddMul{true,true,TA,TB}(alpha, beta)
40 else
41 return MulAddMul{true,false,TA,TB}(alpha, beta)
42 end
43 else
44 if iszero(beta)
45 return MulAddMul{false,true,TA,TB}(alpha, beta)
46 else
47 return MulAddMul{false,false,TA,TB}(alpha, beta)
48 end
49 end
50 end
51
52 MulAddMul() = MulAddMul{true,true,Bool,Bool}(true, false)
53
54 @inline (::MulAddMul{true})(x) = x
55 @inline (p::MulAddMul{false})(x) = x * p.alpha
56 @inline (::MulAddMul{true, true})(x, _) = x
57 @inline (p::MulAddMul{false, true})(x, _) = x * p.alpha
58 @inline (p::MulAddMul{true, false})(x, y) = x + y * p.beta
59 @inline (p::MulAddMul{false, false})(x, y) = x * p.alpha + y * p.beta
60
61 """
62 _modify!(_add::MulAddMul, x, C, idx)
63
64 Short-circuiting version of `C[idx] = _add(x, C[idx])`.
65
66 Short-circuiting the indexing `C[idx]` is necessary for avoiding `UndefRefError`
67 when mutating an array of non-primitive numbers such as `BigFloat`.
68
69 # Examples
70 ```jldoctest
71 julia> using LinearAlgebra: MulAddMul, _modify!
72
73 julia> _add = MulAddMul(1, 0);
74 C = Vector{BigFloat}(undef, 1);
75
76 julia> _modify!(_add, 123, C, 1)
77
78 julia> C
79 1-element Vector{BigFloat}:
80 123.0
81 ```
82 """
83 @inline @propagate_inbounds function _modify!(p::MulAddMul{ais1, bis0},
84 x, C, idx′) where {ais1, bis0}
85 # `idx′` may be an integer, a tuple of integer, or a `CartesianIndex`.
86 # Let `CartesianIndex` constructor normalize them so that it can be
87 # used uniformly. It also acts as a workaround for performance penalty
88 # of splatting a number (#29114):
89 idx = CartesianIndex(idx′)
90 if bis0
91 C[idx] = p(x)
92 else
93 C[idx] = p(x, C[idx])
94 end
95 return
96 end
97
98 @inline function _rmul_or_fill!(C::AbstractArray, beta::Number)
99 if isempty(C)
100 return C
101 end
102 if iszero(beta)
103 fill!(C, zero(eltype(C)))
104 else
105 rmul!(C, beta)
106 end
107 return C
108 end
109
110
111 function generic_mul!(C::AbstractArray, X::AbstractArray, s::Number, _add::MulAddMul)
112 if length(C) != length(X)
113 throw(DimensionMismatch("first array has length $(length(C)) which does not match the length of the second, $(length(X))."))
114 end
115 for (IC, IX) in zip(eachindex(C), eachindex(X))
116 @inbounds _modify!(_add, X[IX] * s, C, IC)
117 end
118 C
119 end
120
121 function generic_mul!(C::AbstractArray, s::Number, X::AbstractArray, _add::MulAddMul)
122 if length(C) != length(X)
123 throw(DimensionMismatch("first array has length $(length(C)) which does not
124 match the length of the second, $(length(X))."))
125 end
126 for (IC, IX) in zip(eachindex(C), eachindex(X))
127 @inbounds _modify!(_add, s * X[IX], C, IC)
128 end
129 C
130 end
131
132 @inline function mul!(C::AbstractArray, s::Number, X::AbstractArray, alpha::Number, beta::Number)
133 if axes(C) == axes(X)
134 C .= (s .* X) .*ₛ alpha .+ C .*ₛ beta
135 else
136 generic_mul!(C, s, X, MulAddMul(alpha, beta))
137 end
138 return C
139 end
140 @inline function mul!(C::AbstractArray, X::AbstractArray, s::Number, alpha::Number, beta::Number)
141 if axes(C) == axes(X)
142 C .= (X .* s) .*ₛ alpha .+ C .*ₛ beta
143 else
144 generic_mul!(C, X, s, MulAddMul(alpha, beta))
145 end
146 return C
147 end
148
149 # For better performance when input and output are the same array
150 # See https://github.com/JuliaLang/julia/issues/8415#issuecomment-56608729
151 """
152 rmul!(A::AbstractArray, b::Number)
153
154 Scale an array `A` by a scalar `b` overwriting `A` in-place. Use
155 [`lmul!`](@ref) to multiply scalar from left. The scaling operation
156 respects the semantics of the multiplication [`*`](@ref) between an
157 element of `A` and `b`. In particular, this also applies to
158 multiplication involving non-finite numbers such as `NaN` and `±Inf`.
159
160 !!! compat "Julia 1.1"
161 Prior to Julia 1.1, `NaN` and `±Inf` entries in `A` were treated
162 inconsistently.
163
164 # Examples
165 ```jldoctest
166 julia> A = [1 2; 3 4]
167 2×2 Matrix{Int64}:
168 1 2
169 3 4
170
171 julia> rmul!(A, 2)
172 2×2 Matrix{Int64}:
173 2 4
174 6 8
175
176 julia> rmul!([NaN], 0.0)
177 1-element Vector{Float64}:
178 NaN
179 ```
180 """
181 function rmul!(X::AbstractArray, s::Number)
182 @simd for I in eachindex(X)
183 @inbounds X[I] *= s
184 end
185 X
186 end
187
188
189 """
190 lmul!(a::Number, B::AbstractArray)
191
192 Scale an array `B` by a scalar `a` overwriting `B` in-place. Use
193 [`rmul!`](@ref) to multiply scalar from right. The scaling operation
194 respects the semantics of the multiplication [`*`](@ref) between `a`
195 and an element of `B`. In particular, this also applies to
196 multiplication involving non-finite numbers such as `NaN` and `±Inf`.
197
198 !!! compat "Julia 1.1"
199 Prior to Julia 1.1, `NaN` and `±Inf` entries in `B` were treated
200 inconsistently.
201
202 # Examples
203 ```jldoctest
204 julia> B = [1 2; 3 4]
205 2×2 Matrix{Int64}:
206 1 2
207 3 4
208
209 julia> lmul!(2, B)
210 2×2 Matrix{Int64}:
211 2 4
212 6 8
213
214 julia> lmul!(0.0, [Inf])
215 1-element Vector{Float64}:
216 NaN
217 ```
218 """
219 function lmul!(s::Number, X::AbstractArray)
220 @simd for I in eachindex(X)
221 @inbounds X[I] = s*X[I]
222 end
223 X
224 end
225
226 """
227 rdiv!(A::AbstractArray, b::Number)
228
229 Divide each entry in an array `A` by a scalar `b` overwriting `A`
230 in-place. Use [`ldiv!`](@ref) to divide scalar from left.
231
232 # Examples
233 ```jldoctest
234 julia> A = [1.0 2.0; 3.0 4.0]
235 2×2 Matrix{Float64}:
236 1.0 2.0
237 3.0 4.0
238
239 julia> rdiv!(A, 2.0)
240 2×2 Matrix{Float64}:
241 0.5 1.0
242 1.5 2.0
243 ```
244 """
245 function rdiv!(X::AbstractArray, s::Number)
246 @simd for I in eachindex(X)
247 @inbounds X[I] /= s
248 end
249 X
250 end
251
252 """
253 ldiv!(a::Number, B::AbstractArray)
254
255 Divide each entry in an array `B` by a scalar `a` overwriting `B`
256 in-place. Use [`rdiv!`](@ref) to divide scalar from right.
257
258 # Examples
259 ```jldoctest
260 julia> B = [1.0 2.0; 3.0 4.0]
261 2×2 Matrix{Float64}:
262 1.0 2.0
263 3.0 4.0
264
265 julia> ldiv!(2.0, B)
266 2×2 Matrix{Float64}:
267 0.5 1.0
268 1.5 2.0
269 ```
270 """
271 function ldiv!(s::Number, X::AbstractArray)
272 @simd for I in eachindex(X)
273 @inbounds X[I] = s\X[I]
274 end
275 X
276 end
277 ldiv!(Y::AbstractArray, s::Number, X::AbstractArray) = Y .= s .\ X
278
279 # Generic fallback. This assumes that B and Y have the same sizes.
280 ldiv!(Y::AbstractArray, A::AbstractMatrix, B::AbstractArray) = ldiv!(A, copyto!(Y, B))
281
282
283 """
284 cross(x, y)
285 ×(x,y)
286
287 Compute the cross product of two 3-vectors.
288
289 # Examples
290 ```jldoctest
291 julia> a = [0;1;0]
292 3-element Vector{Int64}:
293 0
294 1
295 0
296
297 julia> b = [0;0;1]
298 3-element Vector{Int64}:
299 0
300 0
301 1
302
303 julia> cross(a,b)
304 3-element Vector{Int64}:
305 1
306 0
307 0
308 ```
309 """
310 function cross(a::AbstractVector, b::AbstractVector)
311 if !(length(a) == length(b) == 3)
312 throw(DimensionMismatch("cross product is only defined for vectors of length 3"))
313 end
314 a1, a2, a3 = a
315 b1, b2, b3 = b
316 [a2*b3-a3*b2, a3*b1-a1*b3, a1*b2-a2*b1]
317 end
318
319 """
320 triu(M)
321
322 Upper triangle of a matrix.
323
324 # Examples
325 ```jldoctest
326 julia> a = fill(1.0, (4,4))
327 4×4 Matrix{Float64}:
328 1.0 1.0 1.0 1.0
329 1.0 1.0 1.0 1.0
330 1.0 1.0 1.0 1.0
331 1.0 1.0 1.0 1.0
332
333 julia> triu(a)
334 4×4 Matrix{Float64}:
335 1.0 1.0 1.0 1.0
336 0.0 1.0 1.0 1.0
337 0.0 0.0 1.0 1.0
338 0.0 0.0 0.0 1.0
339 ```
340 """
341 triu(M::AbstractMatrix) = triu!(copy(M))
342
343 """
344 tril(M)
345
346 Lower triangle of a matrix.
347
348 # Examples
349 ```jldoctest
350 julia> a = fill(1.0, (4,4))
351 4×4 Matrix{Float64}:
352 1.0 1.0 1.0 1.0
353 1.0 1.0 1.0 1.0
354 1.0 1.0 1.0 1.0
355 1.0 1.0 1.0 1.0
356
357 julia> tril(a)
358 4×4 Matrix{Float64}:
359 1.0 0.0 0.0 0.0
360 1.0 1.0 0.0 0.0
361 1.0 1.0 1.0 0.0
362 1.0 1.0 1.0 1.0
363 ```
364 """
365 tril(M::AbstractMatrix) = tril!(copy(M))
366
367 """
368 triu(M, k::Integer)
369
370 Return the upper triangle of `M` starting from the `k`th superdiagonal.
371
372 # Examples
373 ```jldoctest
374 julia> a = fill(1.0, (4,4))
375 4×4 Matrix{Float64}:
376 1.0 1.0 1.0 1.0
377 1.0 1.0 1.0 1.0
378 1.0 1.0 1.0 1.0
379 1.0 1.0 1.0 1.0
380
381 julia> triu(a,3)
382 4×4 Matrix{Float64}:
383 0.0 0.0 0.0 1.0
384 0.0 0.0 0.0 0.0
385 0.0 0.0 0.0 0.0
386 0.0 0.0 0.0 0.0
387
388 julia> triu(a,-3)
389 4×4 Matrix{Float64}:
390 1.0 1.0 1.0 1.0
391 1.0 1.0 1.0 1.0
392 1.0 1.0 1.0 1.0
393 1.0 1.0 1.0 1.0
394 ```
395 """
396 triu(M::AbstractMatrix,k::Integer) = triu!(copy(M),k)
397
398 """
399 tril(M, k::Integer)
400
401 Return the lower triangle of `M` starting from the `k`th superdiagonal.
402
403 # Examples
404 ```jldoctest
405 julia> a = fill(1.0, (4,4))
406 4×4 Matrix{Float64}:
407 1.0 1.0 1.0 1.0
408 1.0 1.0 1.0 1.0
409 1.0 1.0 1.0 1.0
410 1.0 1.0 1.0 1.0
411
412 julia> tril(a,3)
413 4×4 Matrix{Float64}:
414 1.0 1.0 1.0 1.0
415 1.0 1.0 1.0 1.0
416 1.0 1.0 1.0 1.0
417 1.0 1.0 1.0 1.0
418
419 julia> tril(a,-3)
420 4×4 Matrix{Float64}:
421 0.0 0.0 0.0 0.0
422 0.0 0.0 0.0 0.0
423 0.0 0.0 0.0 0.0
424 1.0 0.0 0.0 0.0
425 ```
426 """
427 tril(M::AbstractMatrix,k::Integer) = tril!(copy(M),k)
428
429 """
430 triu!(M)
431
432 Upper triangle of a matrix, overwriting `M` in the process.
433 See also [`triu`](@ref).
434 """
435 triu!(M::AbstractMatrix) = triu!(M,0)
436
437 """
438 tril!(M)
439
440 Lower triangle of a matrix, overwriting `M` in the process.
441 See also [`tril`](@ref).
442 """
443 tril!(M::AbstractMatrix) = tril!(M,0)
444
445 diag(A::AbstractVector) = throw(ArgumentError("use diagm instead of diag to construct a diagonal matrix"))
446
447 ###########################################################################################
448 # Dot products and norms
449
450 # special cases of norm; note that they don't need to handle isempty(x)
451 generic_normMinusInf(x) = float(mapreduce(norm, min, x))
452
453 generic_normInf(x) = float(mapreduce(norm, max, x))
454
455 generic_norm1(x) = mapreduce(float ∘ norm, +, x)
456
457 # faster computation of norm(x)^2, avoiding overflow for integers
458 norm_sqr(x) = norm(x)^2
459 norm_sqr(x::Number) = abs2(x)
460 norm_sqr(x::Union{T,Complex{T},Rational{T}}) where {T<:Integer} = abs2(float(x))
461
462 function generic_norm2(x)
463 maxabs = normInf(x)
464 (ismissing(maxabs) || iszero(maxabs) || isinf(maxabs)) && return maxabs
465 (v, s) = iterate(x)::Tuple
466 T = typeof(maxabs)
467 if isfinite(length(x)*maxabs*maxabs) && !iszero(maxabs*maxabs) # Scaling not necessary
468 sum::promote_type(Float64, T) = norm_sqr(v)
469 while true
470 y = iterate(x, s)
471 y === nothing && break
472 (v, s) = y
473 sum += norm_sqr(v)
474 end
475 ismissing(sum) && return missing
476 return convert(T, sqrt(sum))
477 else
478 sum = abs2(norm(v)/maxabs)
479 while true
480 y = iterate(x, s)
481 y === nothing && break
482 (v, s) = y
483 sum += (norm(v)/maxabs)^2
484 end
485 ismissing(sum) && return missing
486 return convert(T, maxabs*sqrt(sum))
487 end
488 end
489
490 # Compute L_p norm ‖x‖ₚ = sum(abs(x).^p)^(1/p)
491 # (Not technically a "norm" for p < 1.)
492 function generic_normp(x, p)
493 (v, s) = iterate(x)::Tuple
494 if p > 1 || p < -1 # might need to rescale to avoid overflow
495 maxabs = p > 1 ? normInf(x) : normMinusInf(x)
496 (ismissing(maxabs) || iszero(maxabs) || isinf(maxabs)) && return maxabs
497 T = typeof(maxabs)
498 else
499 T = typeof(float(norm(v)))
500 end
501 spp::promote_type(Float64, T) = p
502 if -1 <= p <= 1 || (isfinite(length(x)*maxabs^spp) && !iszero(maxabs^spp)) # scaling not necessary
503 sum::promote_type(Float64, T) = norm(v)^spp
504 while true
505 y = iterate(x, s)
506 y === nothing && break
507 (v, s) = y
508 ismissing(v) && return missing
509 sum += norm(v)^spp
510 end
511 return convert(T, sum^inv(spp))
512 else # rescaling
513 sum = (norm(v)/maxabs)^spp
514 ismissing(sum) && return missing
515 while true
516 y = iterate(x, s)
517 y === nothing && break
518 (v, s) = y
519 ismissing(v) && return missing
520 sum += (norm(v)/maxabs)^spp
521 end
522 return convert(T, maxabs*sum^inv(spp))
523 end
524 end
525
526 normMinusInf(x) = generic_normMinusInf(x)
527 normInf(x) = generic_normInf(x)
528 norm1(x) = generic_norm1(x)
529 norm2(x) = generic_norm2(x)
530 normp(x, p) = generic_normp(x, p)
531
532
533 """
534 norm(A, p::Real=2)
535
536 For any iterable container `A` (including arrays of any dimension) of numbers (or any
537 element type for which `norm` is defined), compute the `p`-norm (defaulting to `p=2`) as if
538 `A` were a vector of the corresponding length.
539
540 The `p`-norm is defined as
541 ```math
542 \\|A\\|_p = \\left( \\sum_{i=1}^n | a_i | ^p \\right)^{1/p}
543 ```
544 with ``a_i`` the entries of ``A``, ``| a_i |`` the [`norm`](@ref) of ``a_i``, and
545 ``n`` the length of ``A``. Since the `p`-norm is computed using the [`norm`](@ref)s
546 of the entries of `A`, the `p`-norm of a vector of vectors is not compatible with
547 the interpretation of it as a block vector in general if `p != 2`.
548
549 `p` can assume any numeric value (even though not all values produce a
550 mathematically valid vector norm). In particular, `norm(A, Inf)` returns the largest value
551 in `abs.(A)`, whereas `norm(A, -Inf)` returns the smallest. If `A` is a matrix and `p=2`,
552 then this is equivalent to the Frobenius norm.
553
554 The second argument `p` is not necessarily a part of the interface for `norm`, i.e. a custom
555 type may only implement `norm(A)` without second argument.
556
557 Use [`opnorm`](@ref) to compute the operator norm of a matrix.
558
559 # Examples
560 ```jldoctest
561 julia> v = [3, -2, 6]
562 3-element Vector{Int64}:
563 3
564 -2
565 6
566
567 julia> norm(v)
568 7.0
569
570 julia> norm(v, 1)
571 11.0
572
573 julia> norm(v, Inf)
574 6.0
575
576 julia> norm([1 2 3; 4 5 6; 7 8 9])
577 16.881943016134134
578
579 julia> norm([1 2 3 4 5 6 7 8 9])
580 16.881943016134134
581
582 julia> norm(1:9)
583 16.881943016134134
584
585 julia> norm(hcat(v,v), 1) == norm(vcat(v,v), 1) != norm([v,v], 1)
586 true
587
588 julia> norm(hcat(v,v), 2) == norm(vcat(v,v), 2) == norm([v,v], 2)
589 true
590
591 julia> norm(hcat(v,v), Inf) == norm(vcat(v,v), Inf) != norm([v,v], Inf)
592 true
593 ```
594 """
595
1 (14 %) samples spent in norm
1 (100 %) (incl.) when called from norm line 596
function norm(itr, p::Real=2)
596 1 (14 %)
1 (14 %) samples spent in norm
1 (100 %) (incl.) when called from residual! line 448
1 (100 %) samples spent calling norm
isempty(itr) && return float(norm(zero(eltype(itr))))
597 if p == 2
598 1 (14 %)
1 (100 %) samples spent calling norm2
return norm2(itr)
599 elseif p == 1
600 return norm1(itr)
601 elseif p == Inf
602 return normInf(itr)
603 elseif p == 0
604 return typeof(float(norm(first(itr))))(count(!iszero, itr))
605 elseif p == -Inf
606 return normMinusInf(itr)
607 else
608 normp(itr, p)
609 end
610 end
611
612 """
613 norm(x::Number, p::Real=2)
614
615 For numbers, return ``\\left( |x|^p \\right)^{1/p}``.
616
617 # Examples
618 ```jldoctest
619 julia> norm(2, 1)
620 2.0
621
622 julia> norm(-2, 1)
623 2.0
624
625 julia> norm(2, 2)
626 2.0
627
628 julia> norm(-2, 2)
629 2.0
630
631 julia> norm(2, Inf)
632 2.0
633
634 julia> norm(-2, Inf)
635 2.0
636 ```
637 """
638 @inline function norm(x::Number, p::Real=2)
639 afx = abs(float(x))
640 if p == 0
641 if iszero(x)
642 return zero(afx)
643 elseif !isnan(x)
644 return oneunit(afx)
645 else
646 return afx
647 end
648 else
649 return afx
650 end
651 end
652 norm(::Missing, p::Real=2) = missing
653
654 # special cases of opnorm
655 function opnorm1(A::AbstractMatrix{T}) where T
656 require_one_based_indexing(A)
657 m, n = size(A)
658 Tnorm = typeof(float(real(zero(T))))
659 Tsum = promote_type(Float64, Tnorm)
660 nrm::Tsum = 0
661 @inbounds begin
662 for j = 1:n
663 nrmj::Tsum = 0
664 for i = 1:m
665 nrmj += norm(A[i,j])
666 end
667 nrm = max(nrm,nrmj)
668 end
669 end
670 return convert(Tnorm, nrm)
671 end
672
673 function opnorm2(A::AbstractMatrix{T}) where T
674 require_one_based_indexing(A)
675 m,n = size(A)
676 Tnorm = typeof(float(real(zero(T))))
677 if m == 0 || n == 0 return zero(Tnorm) end
678 if m == 1 || n == 1 return norm2(A) end
679 return svdvals(A)[1]
680 end
681
682 function opnormInf(A::AbstractMatrix{T}) where T
683 require_one_based_indexing(A)
684 m,n = size(A)
685 Tnorm = typeof(float(real(zero(T))))
686 Tsum = promote_type(Float64, Tnorm)
687 nrm::Tsum = 0
688 @inbounds begin
689 for i = 1:m
690 nrmi::Tsum = 0
691 for j = 1:n
692 nrmi += norm(A[i,j])
693 end
694 nrm = max(nrm,nrmi)
695 end
696 end
697 return convert(Tnorm, nrm)
698 end
699
700
701 """
702 opnorm(A::AbstractMatrix, p::Real=2)
703
704 Compute the operator norm (or matrix norm) induced by the vector `p`-norm,
705 where valid values of `p` are `1`, `2`, or `Inf`. (Note that for sparse matrices,
706 `p=2` is currently not implemented.) Use [`norm`](@ref) to compute the Frobenius
707 norm.
708
709 When `p=1`, the operator norm is the maximum absolute column sum of `A`:
710 ```math
711 \\|A\\|_1 = \\max_{1 ≤ j ≤ n} \\sum_{i=1}^m | a_{ij} |
712 ```
713 with ``a_{ij}`` the entries of ``A``, and ``m`` and ``n`` its dimensions.
714
715 When `p=2`, the operator norm is the spectral norm, equal to the largest
716 singular value of `A`.
717
718 When `p=Inf`, the operator norm is the maximum absolute row sum of `A`:
719 ```math
720 \\|A\\|_\\infty = \\max_{1 ≤ i ≤ m} \\sum _{j=1}^n | a_{ij} |
721 ```
722
723 # Examples
724 ```jldoctest
725 julia> A = [1 -2 -3; 2 3 -1]
726 2×3 Matrix{Int64}:
727 1 -2 -3
728 2 3 -1
729
730 julia> opnorm(A, Inf)
731 6.0
732
733 julia> opnorm(A, 1)
734 5.0
735 ```
736 """
737 function opnorm(A::AbstractMatrix, p::Real=2)
738 if p == 2
739 return opnorm2(A)
740 elseif p == 1
741 return opnorm1(A)
742 elseif p == Inf
743 return opnormInf(A)
744 else
745 throw(ArgumentError("invalid p-norm p=$p. Valid: 1, 2, Inf"))
746 end
747 end
748
749 """
750 opnorm(x::Number, p::Real=2)
751
752 For numbers, return ``\\left( |x|^p \\right)^{1/p}``.
753 This is equivalent to [`norm`](@ref).
754 """
755 @inline opnorm(x::Number, p::Real=2) = norm(x, p)
756
757 """
758 opnorm(A::Adjoint{<:Any,<:AbstracVector}, q::Real=2)
759 opnorm(A::Transpose{<:Any,<:AbstracVector}, q::Real=2)
760
761 For Adjoint/Transpose-wrapped vectors, return the operator ``q``-norm of `A`, which is
762 equivalent to the `p`-norm with value `p = q/(q-1)`. They coincide at `p = q = 2`.
763 Use [`norm`](@ref) to compute the `p` norm of `A` as a vector.
764
765 The difference in norm between a vector space and its dual arises to preserve
766 the relationship between duality and the dot product, and the result is
767 consistent with the operator `p`-norm of a `1 × n` matrix.
768
769 # Examples
770 ```jldoctest
771 julia> v = [1; im];
772
773 julia> vc = v';
774
775 julia> opnorm(vc, 1)
776 1.0
777
778 julia> norm(vc, 1)
779 2.0
780
781 julia> norm(v, 1)
782 2.0
783
784 julia> opnorm(vc, 2)
785 1.4142135623730951
786
787 julia> norm(vc, 2)
788 1.4142135623730951
789
790 julia> norm(v, 2)
791 1.4142135623730951
792
793 julia> opnorm(vc, Inf)
794 2.0
795
796 julia> norm(vc, Inf)
797 1.0
798
799 julia> norm(v, Inf)
800 1.0
801 ```
802 """
803 opnorm(v::TransposeAbsVec, q::Real) = q == Inf ? norm(v.parent, 1) : norm(v.parent, q/(q-1))
804 opnorm(v::AdjointAbsVec, q::Real) = q == Inf ? norm(conj(v.parent), 1) : norm(conj(v.parent), q/(q-1))
805 opnorm(v::AdjointAbsVec) = norm(conj(v.parent))
806 opnorm(v::TransposeAbsVec) = norm(v.parent)
807
808 norm(v::AdjOrTrans, p::Real) = norm(v.parent, p)
809
810 """
811 dot(x, y)
812 x ⋅ y
813
814 Compute the dot product between two vectors. For complex vectors, the first
815 vector is conjugated.
816
817 `dot` also works on arbitrary iterable objects, including arrays of any dimension,
818 as long as `dot` is defined on the elements.
819
820 `dot` is semantically equivalent to `sum(dot(vx,vy) for (vx,vy) in zip(x, y))`,
821 with the added restriction that the arguments must have equal lengths.
822
823 `x ⋅ y` (where `⋅` can be typed by tab-completing `\\cdot` in the REPL) is a synonym for
824 `dot(x, y)`.
825
826 # Examples
827 ```jldoctest
828 julia> dot([1; 1], [2; 3])
829 5
830
831 julia> dot([im; im], [1; 1])
832 0 - 2im
833
834 julia> dot(1:5, 2:6)
835 70
836
837 julia> x = fill(2., (5,5));
838
839 julia> y = fill(3., (5,5));
840
841 julia> dot(x, y)
842 150.0
843 ```
844 """
845 function dot end
846
847 function dot(x, y) # arbitrary iterables
848 ix = iterate(x)
849 iy = iterate(y)
850 if ix === nothing
851 if iy !== nothing
852 throw(DimensionMismatch("x and y are of different lengths!"))
853 end
854 return dot(zero(eltype(x)), zero(eltype(y)))
855 end
856 if iy === nothing
857 throw(DimensionMismatch("x and y are of different lengths!"))
858 end
859 (vx, xs) = ix
860 (vy, ys) = iy
861 s = dot(vx, vy)
862 while true
863 ix = iterate(x, xs)
864 iy = iterate(y, ys)
865 ix === nothing && break
866 iy === nothing && break
867 (vx, xs), (vy, ys) = ix, iy
868 s += dot(vx, vy)
869 end
870 if !(iy === nothing && ix === nothing)
871 throw(DimensionMismatch("x and y are of different lengths!"))
872 end
873 return s
874 end
875
876 dot(x::Number, y::Number) = conj(x) * y
877
878 function dot(x::AbstractArray, y::AbstractArray)
879 lx = length(x)
880 if lx != length(y)
881 throw(DimensionMismatch("first array has length $(lx) which does not match the length of the second, $(length(y))."))
882 end
883 if lx == 0
884 return dot(zero(eltype(x)), zero(eltype(y)))
885 end
886 s = zero(dot(first(x), first(y)))
887 for (Ix, Iy) in zip(eachindex(x), eachindex(y))
888 @inbounds s += dot(x[Ix], y[Iy])
889 end
890 s
891 end
892
893 function dot(x::Adjoint{<:Union{Real,Complex}}, y::Adjoint{<:Union{Real,Complex}})
894 return conj(dot(parent(x), parent(y)))
895 end
896 dot(x::Transpose, y::Transpose) = dot(parent(x), parent(y))
897
898 """
899 dot(x, A, y)
900
901 Compute the generalized dot product `dot(x, A*y)` between two vectors `x` and `y`,
902 without storing the intermediate result of `A*y`. As for the two-argument
903 [`dot(_,_)`](@ref), this acts recursively. Moreover, for complex vectors, the
904 first vector is conjugated.
905
906 !!! compat "Julia 1.4"
907 Three-argument `dot` requires at least Julia 1.4.
908
909 # Examples
910 ```jldoctest
911 julia> dot([1; 1], [1 2; 3 4], [2; 3])
912 26
913
914 julia> dot(1:5, reshape(1:25, 5, 5), 2:6)
915 4850
916
917 julia> ⋅(1:5, reshape(1:25, 5, 5), 2:6) == dot(1:5, reshape(1:25, 5, 5), 2:6)
918 true
919 ```
920 """
921 dot(x, A, y) = dot(x, A*y) # generic fallback for cases that are not covered by specialized methods
922
923 function dot(x::AbstractVector, A::AbstractMatrix, y::AbstractVector)
924 (axes(x)..., axes(y)...) == axes(A) || throw(DimensionMismatch())
925 T = typeof(dot(first(x), first(A), first(y)))
926 s = zero(T)
927 i₁ = first(eachindex(x))
928 x₁ = first(x)
929 @inbounds for j in eachindex(y)
930 yj = y[j]
931 if !iszero(yj)
932 temp = zero(adjoint(A[i₁,j]) * x₁)
933 @simd for i in eachindex(x)
934 temp += adjoint(A[i,j]) * x[i]
935 end
936 s += dot(temp, yj)
937 end
938 end
939 return s
940 end
941 dot(x::AbstractVector, adjA::Adjoint, y::AbstractVector) = adjoint(dot(y, adjA.parent, x))
942 dot(x::AbstractVector, transA::Transpose{<:Real}, y::AbstractVector) = adjoint(dot(y, transA.parent, x))
943
944 ###########################################################################################
945
946 """
947 rank(A::AbstractMatrix; atol::Real=0, rtol::Real=atol>0 ? 0 : n*ϵ)
948 rank(A::AbstractMatrix, rtol::Real)
949
950 Compute the numerical rank of a matrix by counting how many outputs of
951 `svdvals(A)` are greater than `max(atol, rtol*σ₁)` where `σ₁` is `A`'s largest
952 calculated singular value. `atol` and `rtol` are the absolute and relative
953 tolerances, respectively. The default relative tolerance is `n*ϵ`, where `n`
954 is the size of the smallest dimension of `A`, and `ϵ` is the [`eps`](@ref) of
955 the element type of `A`.
956
957 !!! note
958 Numerical rank can be a sensitive and imprecise characterization of
959 ill-conditioned matrices with singular values that are close to the threshold
960 tolerance `max(atol, rtol*σ₁)`. In such cases, slight perturbations to the
961 singular-value computation or to the matrix can change the result of `rank`
962 by pushing one or more singular values across the threshold. These variations
963 can even occur due to changes in floating-point errors between different Julia
964 versions, architectures, compilers, or operating systems.
965
966 !!! compat "Julia 1.1"
967 The `atol` and `rtol` keyword arguments requires at least Julia 1.1.
968 In Julia 1.0 `rtol` is available as a positional argument, but this
969 will be deprecated in Julia 2.0.
970
971 # Examples
972 ```jldoctest
973 julia> rank(Matrix(I, 3, 3))
974 3
975
976 julia> rank(diagm(0 => [1, 0, 2]))
977 2
978
979 julia> rank(diagm(0 => [1, 0.001, 2]), rtol=0.1)
980 2
981
982 julia> rank(diagm(0 => [1, 0.001, 2]), rtol=0.00001)
983 3
984
985 julia> rank(diagm(0 => [1, 0.001, 2]), atol=1.5)
986 1
987 ```
988 """
989 function rank(A::AbstractMatrix; atol::Real = 0.0, rtol::Real = (min(size(A)...)*eps(real(float(one(eltype(A))))))*iszero(atol))
990 isempty(A) && return 0 # 0-dimensional case
991 s = svdvals(A)
992 tol = max(atol, rtol*s[1])
993 count(>(tol), s)
994 end
995 rank(x::Union{Number,AbstractVector}) = iszero(x) ? 0 : 1
996
997 """
998 tr(M)
999
1000 Matrix trace. Sums the diagonal elements of `M`.
1001
1002 # Examples
1003 ```jldoctest
1004 julia> A = [1 2; 3 4]
1005 2×2 Matrix{Int64}:
1006 1 2
1007 3 4
1008
1009 julia> tr(A)
1010 5
1011 ```
1012 """
1013 function tr(A::AbstractMatrix)
1014 checksquare(A)
1015 sum(diag(A))
1016 end
1017 tr(x::Number) = x
1018
1019 #kron(a::AbstractVector, b::AbstractVector)
1020 #kron(a::AbstractMatrix{T}, b::AbstractMatrix{S}) where {T,S}
1021
1022 #det(a::AbstractMatrix)
1023
1024 """
1025 inv(M)
1026
1027 Matrix inverse. Computes matrix `N` such that
1028 `M * N = I`, where `I` is the identity matrix.
1029 Computed by solving the left-division
1030 `N = M \\ I`.
1031
1032 # Examples
1033 ```jldoctest
1034 julia> M = [2 5; 1 3]
1035 2×2 Matrix{Int64}:
1036 2 5
1037 1 3
1038
1039 julia> N = inv(M)
1040 2×2 Matrix{Float64}:
1041 3.0 -5.0
1042 -1.0 2.0
1043
1044 julia> M*N == N*M == Matrix(I, 2, 2)
1045 true
1046 ```
1047 """
1048 function inv(A::AbstractMatrix{T}) where T
1049 n = checksquare(A)
1050 S = typeof(zero(T)/one(T)) # dimensionful
1051 S0 = typeof(zero(T)/oneunit(T)) # dimensionless
1052 dest = Matrix{S0}(I, n, n)
1053 ldiv!(factorize(convert(AbstractMatrix{S}, A)), dest)
1054 end
1055 inv(A::Adjoint) = adjoint(inv(parent(A)))
1056 inv(A::Transpose) = transpose(inv(parent(A)))
1057
1058 pinv(v::AbstractVector{T}, tol::Real = real(zero(T))) where {T<:Real} = _vectorpinv(transpose, v, tol)
1059 pinv(v::AbstractVector{T}, tol::Real = real(zero(T))) where {T<:Complex} = _vectorpinv(adjoint, v, tol)
1060 pinv(v::AbstractVector{T}, tol::Real = real(zero(T))) where {T} = _vectorpinv(adjoint, v, tol)
1061 function _vectorpinv(dualfn::Tf, v::AbstractVector{Tv}, tol) where {Tv,Tf}
1062 res = dualfn(similar(v, typeof(zero(Tv) / (abs2(one(Tv)) + abs2(one(Tv))))))
1063 den = sum(abs2, v)
1064 # as tol is the threshold relative to the maximum singular value, for a vector with
1065 # single singular value σ=√den, σ ≦ tol*σ is equivalent to den=0 ∨ tol≥1
1066 if iszero(den) || tol >= one(tol)
1067 fill!(res, zero(eltype(res)))
1068 else
1069 res .= dualfn(v) ./ den
1070 end
1071 return res
1072 end
1073
1074 # this method is just an optimization: literal negative powers of A are
1075 # already turned by literal_pow into powers of inv(A), but for A^-1 this
1076 # would turn into inv(A)^1 = copy(inv(A)), which makes an extra copy.
1077 @inline Base.literal_pow(::typeof(^), A::AbstractMatrix, ::Val{-1}) = inv(A)
1078
1079 """
1080 \\(A, B)
1081
1082 Matrix division using a polyalgorithm. For input matrices `A` and `B`, the result `X` is
1083 such that `A*X == B` when `A` is square. The solver that is used depends upon the structure
1084 of `A`. If `A` is upper or lower triangular (or diagonal), no factorization of `A` is
1085 required and the system is solved with either forward or backward substitution.
1086 For non-triangular square matrices, an LU factorization is used.
1087
1088 For rectangular `A` the result is the minimum-norm least squares solution computed by a
1089 pivoted QR factorization of `A` and a rank estimate of `A` based on the R factor.
1090
1091 When `A` is sparse, a similar polyalgorithm is used. For indefinite matrices, the `LDLt`
1092 factorization does not use pivoting during the numerical factorization and therefore the
1093 procedure can fail even for invertible matrices.
1094
1095 See also: [`factorize`](@ref), [`pinv`](@ref).
1096
1097 # Examples
1098 ```jldoctest
1099 julia> A = [1 0; 1 -2]; B = [32; -4];
1100
1101 julia> X = A \\ B
1102 2-element Vector{Float64}:
1103 32.0
1104 18.0
1105
1106 julia> A * X == B
1107 true
1108 ```
1109 """
1110 function (\)(A::AbstractMatrix, B::AbstractVecOrMat)
1111 require_one_based_indexing(A, B)
1112 m, n = size(A)
1113 if m == n
1114 if istril(A)
1115 if istriu(A)
1116 return Diagonal(A) \ B
1117 else
1118 return LowerTriangular(A) \ B
1119 end
1120 end
1121 if istriu(A)
1122 return UpperTriangular(A) \ B
1123 end
1124 return lu(A) \ B
1125 end
1126 return qr(A, ColumnNorm()) \ B
1127 end
1128
1129 (\)(a::AbstractVector, b::AbstractArray) = pinv(a) * b
1130 """
1131 A / B
1132
1133 Matrix right-division: `A / B` is equivalent to `(B' \\ A')'` where [`\\`](@ref) is the left-division operator.
1134 For square matrices, the result `X` is such that `A == X*B`.
1135
1136 See also: [`rdiv!`](@ref).
1137
1138 # Examples
1139 ```jldoctest
1140 julia> A = Float64[1 4 5; 3 9 2]; B = Float64[1 4 2; 3 4 2; 8 7 1];
1141
1142 julia> X = A / B
1143 2×3 Matrix{Float64}:
1144 -0.65 3.75 -1.2
1145 3.25 -2.75 1.0
1146
1147 julia> isapprox(A, X*B)
1148 true
1149
1150 julia> isapprox(X, A*pinv(B))
1151 true
1152 ```
1153 """
1154 function (/)(A::AbstractVecOrMat, B::AbstractVecOrMat)
1155 size(A,2) != size(B,2) && throw(DimensionMismatch("Both inputs should have the same number of columns"))
1156 return copy(adjoint(adjoint(B) \ adjoint(A)))
1157 end
1158
1159 cond(x::Number) = iszero(x) ? Inf : 1.0
1160 cond(x::Number, p) = cond(x)
1161
1162 #Skeel condition numbers
1163 condskeel(A::AbstractMatrix, p::Real=Inf) = opnorm(abs.(inv(A))*abs.(A), p)
1164
1165 """
1166 condskeel(M, [x, p::Real=Inf])
1167
1168 ```math
1169 \\kappa_S(M, p) = \\left\\Vert \\left\\vert M \\right\\vert \\left\\vert M^{-1} \\right\\vert \\right\\Vert_p \\\\
1170 \\kappa_S(M, x, p) = \\frac{\\left\\Vert \\left\\vert M \\right\\vert \\left\\vert M^{-1} \\right\\vert \\left\\vert x \\right\\vert \\right\\Vert_p}{\\left \\Vert x \\right \\Vert_p}
1171 ```
1172
1173 Skeel condition number ``\\kappa_S`` of the matrix `M`, optionally with respect to the
1174 vector `x`, as computed using the operator `p`-norm. ``\\left\\vert M \\right\\vert``
1175 denotes the matrix of (entry wise) absolute values of ``M``;
1176 ``\\left\\vert M \\right\\vert_{ij} = \\left\\vert M_{ij} \\right\\vert``.
1177 Valid values for `p` are `1`, `2` and `Inf` (default).
1178
1179 This quantity is also known in the literature as the Bauer condition number, relative
1180 condition number, or componentwise relative condition number.
1181 """
1182 function condskeel(A::AbstractMatrix, x::AbstractVector, p::Real=Inf)
1183 norm(abs.(inv(A))*(abs.(A)*abs.(x)), p) / norm(x, p)
1184 end
1185
1186 issymmetric(A::AbstractMatrix{<:Real}) = ishermitian(A)
1187
1188 """
1189 issymmetric(A) -> Bool
1190
1191 Test whether a matrix is symmetric.
1192
1193 # Examples
1194 ```jldoctest
1195 julia> a = [1 2; 2 -1]
1196 2×2 Matrix{Int64}:
1197 1 2
1198 2 -1
1199
1200 julia> issymmetric(a)
1201 true
1202
1203 julia> b = [1 im; -im 1]
1204 2×2 Matrix{Complex{Int64}}:
1205 1+0im 0+1im
1206 0-1im 1+0im
1207
1208 julia> issymmetric(b)
1209 false
1210 ```
1211 """
1212 function issymmetric(A::AbstractMatrix)
1213 indsm, indsn = axes(A)
1214 if indsm != indsn
1215 return false
1216 end
1217 for i = first(indsn):last(indsn), j = (i):last(indsn)
1218 if A[i,j] != transpose(A[j,i])
1219 return false
1220 end
1221 end
1222 return true
1223 end
1224
1225 issymmetric(x::Number) = x == x
1226
1227 """
1228 ishermitian(A) -> Bool
1229
1230 Test whether a matrix is Hermitian.
1231
1232 # Examples
1233 ```jldoctest
1234 julia> a = [1 2; 2 -1]
1235 2×2 Matrix{Int64}:
1236 1 2
1237 2 -1
1238
1239 julia> ishermitian(a)
1240 true
1241
1242 julia> b = [1 im; -im 1]
1243 2×2 Matrix{Complex{Int64}}:
1244 1+0im 0+1im
1245 0-1im 1+0im
1246
1247 julia> ishermitian(b)
1248 true
1249 ```
1250 """
1251 function ishermitian(A::AbstractMatrix)
1252 indsm, indsn = axes(A)
1253 if indsm != indsn
1254 return false
1255 end
1256 for i = indsn, j = i:last(indsn)
1257 if A[i,j] != adjoint(A[j,i])
1258 return false
1259 end
1260 end
1261 return true
1262 end
1263
1264 ishermitian(x::Number) = (x == conj(x))
1265
1266 """
1267 istriu(A::AbstractMatrix, k::Integer = 0) -> Bool
1268
1269 Test whether `A` is upper triangular starting from the `k`th superdiagonal.
1270
1271 # Examples
1272 ```jldoctest
1273 julia> a = [1 2; 2 -1]
1274 2×2 Matrix{Int64}:
1275 1 2
1276 2 -1
1277
1278 julia> istriu(a)
1279 false
1280
1281 julia> istriu(a, -1)
1282 true
1283
1284 julia> b = [1 im; 0 -1]
1285 2×2 Matrix{Complex{Int64}}:
1286 1+0im 0+1im
1287 0+0im -1+0im
1288
1289 julia> istriu(b)
1290 true
1291
1292 julia> istriu(b, 1)
1293 false
1294 ```
1295 """
1296 function istriu(A::AbstractMatrix, k::Integer = 0)
1297 require_one_based_indexing(A)
1298 return _istriu(A, k)
1299 end
1300 istriu(x::Number) = true
1301
1302 @inline function _istriu(A::AbstractMatrix, k)
1303 m, n = size(A)
1304 for j in 1:min(n, m + k - 1)
1305 all(iszero, view(A, max(1, j - k + 1):m, j)) || return false
1306 end
1307 return true
1308 end
1309
1310 """
1311 istril(A::AbstractMatrix, k::Integer = 0) -> Bool
1312
1313 Test whether `A` is lower triangular starting from the `k`th superdiagonal.
1314
1315 # Examples
1316 ```jldoctest
1317 julia> a = [1 2; 2 -1]
1318 2×2 Matrix{Int64}:
1319 1 2
1320 2 -1
1321
1322 julia> istril(a)
1323 false
1324
1325 julia> istril(a, 1)
1326 true
1327
1328 julia> b = [1 0; -im -1]
1329 2×2 Matrix{Complex{Int64}}:
1330 1+0im 0+0im
1331 0-1im -1+0im
1332
1333 julia> istril(b)
1334 true
1335
1336 julia> istril(b, -1)
1337 false
1338 ```
1339 """
1340 function istril(A::AbstractMatrix, k::Integer = 0)
1341 require_one_based_indexing(A)
1342 return _istril(A, k)
1343 end
1344 istril(x::Number) = true
1345
1346 @inline function _istril(A::AbstractMatrix, k)
1347 m, n = size(A)
1348 for j in max(1, k + 2):n
1349 all(iszero, view(A, 1:min(j - k - 1, m), j)) || return false
1350 end
1351 return true
1352 end
1353
1354 """
1355 isbanded(A::AbstractMatrix, kl::Integer, ku::Integer) -> Bool
1356
1357 Test whether `A` is banded with lower bandwidth starting from the `kl`th superdiagonal
1358 and upper bandwidth extending through the `ku`th superdiagonal.
1359
1360 # Examples
1361 ```jldoctest
1362 julia> a = [1 2; 2 -1]
1363 2×2 Matrix{Int64}:
1364 1 2
1365 2 -1
1366
1367 julia> LinearAlgebra.isbanded(a, 0, 0)
1368 false
1369
1370 julia> LinearAlgebra.isbanded(a, -1, 1)
1371 true
1372
1373 julia> b = [1 0; -im -1] # lower bidiagonal
1374 2×2 Matrix{Complex{Int64}}:
1375 1+0im 0+0im
1376 0-1im -1+0im
1377
1378 julia> LinearAlgebra.isbanded(b, 0, 0)
1379 false
1380
1381 julia> LinearAlgebra.isbanded(b, -1, 0)
1382 true
1383 ```
1384 """
1385 isbanded(A::AbstractMatrix, kl::Integer, ku::Integer) = istriu(A, kl) && istril(A, ku)
1386
1387 """
1388 isdiag(A) -> Bool
1389
1390 Test whether a matrix is diagonal in the sense that `iszero(A[i,j])` is true unless `i == j`.
1391 Note that it is not necessary for `A` to be square;
1392 if you would also like to check that, you need to check that `size(A, 1) == size(A, 2)`.
1393
1394 # Examples
1395 ```jldoctest
1396 julia> a = [1 2; 2 -1]
1397 2×2 Matrix{Int64}:
1398 1 2
1399 2 -1
1400
1401 julia> isdiag(a)
1402 false
1403
1404 julia> b = [im 0; 0 -im]
1405 2×2 Matrix{Complex{Int64}}:
1406 0+1im 0+0im
1407 0+0im 0-1im
1408
1409 julia> isdiag(b)
1410 true
1411
1412 julia> c = [1 0 0; 0 2 0]
1413 2×3 Matrix{Int64}:
1414 1 0 0
1415 0 2 0
1416
1417 julia> isdiag(c)
1418 true
1419
1420 julia> d = [1 0 0; 0 2 3]
1421 2×3 Matrix{Int64}:
1422 1 0 0
1423 0 2 3
1424
1425 julia> isdiag(d)
1426 false
1427 ```
1428 """
1429 isdiag(A::AbstractMatrix) = isbanded(A, 0, 0)
1430 isdiag(x::Number) = true
1431
1432 """
1433 axpy!(α, x::AbstractArray, y::AbstractArray)
1434
1435 Overwrite `y` with `x * α + y` and return `y`.
1436 If `x` and `y` have the same axes, it's equivalent with `y .+= x .* a`.
1437
1438 # Examples
1439 ```jldoctest
1440 julia> x = [1; 2; 3];
1441
1442 julia> y = [4; 5; 6];
1443
1444 julia> axpy!(2, x, y)
1445 3-element Vector{Int64}:
1446 6
1447 9
1448 12
1449 ```
1450 """
1451 function axpy!(α, x::AbstractArray, y::AbstractArray)
1452 n = length(x)
1453 if n != length(y)
1454 throw(DimensionMismatch("x has length $n, but y has length $(length(y))"))
1455 end
1456 iszero(α) && return y
1457 for (IY, IX) in zip(eachindex(y), eachindex(x))
1458 @inbounds y[IY] += x[IX]*α
1459 end
1460 return y
1461 end
1462
1463 function axpy!(α, x::AbstractArray, rx::AbstractArray{<:Integer}, y::AbstractArray, ry::AbstractArray{<:Integer})
1464 if length(rx) != length(ry)
1465 throw(DimensionMismatch("rx has length $(length(rx)), but ry has length $(length(ry))"))
1466 elseif !checkindex(Bool, eachindex(IndexLinear(), x), rx)
1467 throw(BoundsError(x, rx))
1468 elseif !checkindex(Bool, eachindex(IndexLinear(), y), ry)
1469 throw(BoundsError(y, ry))
1470 end
1471 iszero(α) && return y
1472 for (IY, IX) in zip(eachindex(ry), eachindex(rx))
1473 @inbounds y[ry[IY]] += x[rx[IX]]*α
1474 end
1475 return y
1476 end
1477
1478 """
1479 axpby!(α, x::AbstractArray, β, y::AbstractArray)
1480
1481 Overwrite `y` with `x * α + y * β` and return `y`.
1482 If `x` and `y` have the same axes, it's equivalent with `y .= x .* a .+ y .* β`.
1483
1484 # Examples
1485 ```jldoctest
1486 julia> x = [1; 2; 3];
1487
1488 julia> y = [4; 5; 6];
1489
1490 julia> axpby!(2, x, 2, y)
1491 3-element Vector{Int64}:
1492 10
1493 14
1494 18
1495 ```
1496 """
1497 function axpby!(α, x::AbstractArray, β, y::AbstractArray)
1498 if length(x) != length(y)
1499 throw(DimensionMismatch("x has length $(length(x)), but y has length $(length(y))"))
1500 end
1501 iszero(α) && isone(β) && return y
1502 for (IX, IY) in zip(eachindex(x), eachindex(y))
1503 @inbounds y[IY] = x[IX]*α + y[IY]*β
1504 end
1505 y
1506 end
1507
1508 DenseLike{T} = Union{DenseArray{T}, Base.StridedReshapedArray{T}, Base.StridedReinterpretArray{T}}
1509 StridedVecLike{T} = Union{DenseLike{T}, Base.FastSubArray{T,<:Any,<:DenseLike{T}}}
1510 axpy!(α::Number, x::StridedVecLike{T}, y::StridedVecLike{T}) where {T<:BlasFloat} = BLAS.axpy!(α, x, y)
1511 axpby!(α::Number, x::StridedVecLike{T}, β::Number, y::StridedVecLike{T}) where {T<:BlasFloat} = BLAS.axpby!(α, x, β, y)
1512 function axpy!(α::Number,
1513 x::StridedVecLike{T}, rx::AbstractRange{<:Integer},
1514 y::StridedVecLike{T}, ry::AbstractRange{<:Integer},
1515 ) where {T<:BlasFloat}
1516 if Base.has_offset_axes(rx, ry)
1517 return @invoke axpy!(α,
1518 x::AbstractArray, rx::AbstractArray{<:Integer},
1519 y::AbstractArray, ry::AbstractArray{<:Integer},
1520 )
1521 end
1522 @views BLAS.axpy!(α, x[rx], y[ry])
1523 return y
1524 end
1525
1526 """
1527 rotate!(x, y, c, s)
1528
1529 Overwrite `x` with `c*x + s*y` and `y` with `-conj(s)*x + c*y`.
1530 Returns `x` and `y`.
1531
1532 !!! compat "Julia 1.5"
1533 `rotate!` requires at least Julia 1.5.
1534 """
1535 function rotate!(x::AbstractVector, y::AbstractVector, c, s)
1536 require_one_based_indexing(x, y)
1537 n = length(x)
1538 if n != length(y)
1539 throw(DimensionMismatch("x has length $(length(x)), but y has length $(length(y))"))
1540 end
1541 @inbounds for i = 1:n
1542 xi, yi = x[i], y[i]
1543 x[i] = c *xi + s*yi
1544 y[i] = -conj(s)*xi + c*yi
1545 end
1546 return x, y
1547 end
1548
1549 """
1550 reflect!(x, y, c, s)
1551
1552 Overwrite `x` with `c*x + s*y` and `y` with `conj(s)*x - c*y`.
1553 Returns `x` and `y`.
1554
1555 !!! compat "Julia 1.5"
1556 `reflect!` requires at least Julia 1.5.
1557 """
1558 function reflect!(x::AbstractVector, y::AbstractVector, c, s)
1559 require_one_based_indexing(x, y)
1560 n = length(x)
1561 if n != length(y)
1562 throw(DimensionMismatch("x has length $(length(x)), but y has length $(length(y))"))
1563 end
1564 @inbounds for i = 1:n
1565 xi, yi = x[i], y[i]
1566 x[i] = c *xi + s*yi
1567 y[i] = conj(s)*xi - c*yi
1568 end
1569 return x, y
1570 end
1571
1572 # Elementary reflection similar to LAPACK. The reflector is not Hermitian but
1573 # ensures that tridiagonalization of Hermitian matrices become real. See lawn72
1574 @inline function reflector!(x::AbstractVector{T}) where {T}
1575 require_one_based_indexing(x)
1576 n = length(x)
1577 n == 0 && return zero(eltype(x))
1578 @inbounds begin
1579 ξ1 = x[1]
1580 normu = norm(x)
1581 if iszero(normu)
1582 return zero(ξ1/normu)
1583 end
1584 ν = T(copysign(normu, real(ξ1)))
1585 ξ1 += ν
1586 x[1] = -ν
1587 for i = 2:n
1588 x[i] /= ξ1
1589 end
1590 end
1591 ξ1/ν
1592 end
1593
1594 """
1595 reflectorApply!(x, τ, A)
1596
1597 Multiplies `A` in-place by a Householder reflection on the left. It is equivalent to `A .= (I - τ*[1; x] * [1; x]')*A`.
1598 """
1599 @inline function reflectorApply!(x::AbstractVector, τ::Number, A::AbstractVecOrMat)
1600 require_one_based_indexing(x)
1601 m, n = size(A, 1), size(A, 2)
1602 if length(x) != m
1603 throw(DimensionMismatch("reflector has length $(length(x)), which must match the first dimension of matrix A, $m"))
1604 end
1605 m == 0 && return A
1606 @inbounds for j = 1:n
1607 Aj, xj = view(A, 2:m, j), view(x, 2:m)
1608 vAj = conj(τ)*(A[1, j] + dot(xj, Aj))
1609 A[1, j] -= vAj
1610 axpy!(-vAj, xj, Aj)
1611 end
1612 return A
1613 end
1614
1615 """
1616 det(M)
1617
1618 Matrix determinant.
1619
1620 See also: [`logdet`](@ref) and [`logabsdet`](@ref).
1621
1622 # Examples
1623 ```jldoctest
1624 julia> M = [1 0; 2 2]
1625 2×2 Matrix{Int64}:
1626 1 0
1627 2 2
1628
1629 julia> det(M)
1630 2.0
1631 ```
1632 """
1633 function det(A::AbstractMatrix{T}) where {T}
1634 if istriu(A) || istril(A)
1635 S = promote_type(T, typeof((one(T)*zero(T) + zero(T))/one(T)))
1636 return convert(S, det(UpperTriangular(A)))
1637 end
1638 return det(lu(A; check = false))
1639 end
1640 det(x::Number) = x
1641
1642 # Resolve Issue #40128
1643 det(A::AbstractMatrix{BigInt}) = det_bareiss(A)
1644
1645 """
1646 logabsdet(M)
1647
1648 Log of absolute value of matrix determinant. Equivalent to
1649 `(log(abs(det(M))), sign(det(M)))`, but may provide increased accuracy and/or speed.
1650
1651 # Examples
1652 ```jldoctest
1653 julia> A = [-1. 0.; 0. 1.]
1654 2×2 Matrix{Float64}:
1655 -1.0 0.0
1656 0.0 1.0
1657
1658 julia> det(A)
1659 -1.0
1660
1661 julia> logabsdet(A)
1662 (0.0, -1.0)
1663
1664 julia> B = [2. 0.; 0. 1.]
1665 2×2 Matrix{Float64}:
1666 2.0 0.0
1667 0.0 1.0
1668
1669 julia> det(B)
1670 2.0
1671
1672 julia> logabsdet(B)
1673 (0.6931471805599453, 1.0)
1674 ```
1675 """
1676 logabsdet(A::AbstractMatrix) = logabsdet(lu(A, check=false))
1677
1678 logabsdet(a::Number) = log(abs(a)), sign(a)
1679
1680 """
1681 logdet(M)
1682
1683 Log of matrix determinant. Equivalent to `log(det(M))`, but may provide
1684 increased accuracy and/or speed.
1685
1686 # Examples
1687 ```jldoctest
1688 julia> M = [1 0; 2 2]
1689 2×2 Matrix{Int64}:
1690 1 0
1691 2 2
1692
1693 julia> logdet(M)
1694 0.6931471805599453
1695
1696 julia> logdet(Matrix(I, 3, 3))
1697 0.0
1698 ```
1699 """
1700 function logdet(A::AbstractMatrix)
1701 d,s = logabsdet(A)
1702 return d + log(s)
1703 end
1704
1705 logdet(A) = log(det(A))
1706
1707 const NumberArray{T<:Number} = AbstractArray{T}
1708
1709 exactdiv(a, b) = a/b
1710 exactdiv(a::Integer, b::Integer) = div(a, b)
1711
1712 """
1713 det_bareiss!(M)
1714
1715 Calculates the determinant of a matrix using the
1716 [Bareiss Algorithm](https://en.wikipedia.org/wiki/Bareiss_algorithm) using
1717 inplace operations.
1718
1719 # Examples
1720 ```jldoctest
1721 julia> M = [1 0; 2 2]
1722 2×2 Matrix{Int64}:
1723 1 0
1724 2 2
1725
1726 julia> LinearAlgebra.det_bareiss!(M)
1727 2
1728 ```
1729 """
1730 function det_bareiss!(M)
1731 n = checksquare(M)
1732 sign, prev = Int8(1), one(eltype(M))
1733 for i in 1:n-1
1734 if iszero(M[i,i]) # swap with another col to make nonzero
1735 swapto = findfirst(!iszero, @view M[i,i+1:end])
1736 isnothing(swapto) && return zero(prev)
1737 sign = -sign
1738 Base.swapcols!(M, i, i + swapto)
1739 end
1740 for k in i+1:n, j in i+1:n
1741 M[j,k] = exactdiv(M[j,k]*M[i,i] - M[j,i]*M[i,k], prev)
1742 end
1743 prev = M[i,i]
1744 end
1745 return sign * M[end,end]
1746 end
1747 """
1748 LinearAlgebra.det_bareiss(M)
1749
1750 Calculates the determinant of a matrix using the
1751 [Bareiss Algorithm](https://en.wikipedia.org/wiki/Bareiss_algorithm).
1752 Also refer to [`det_bareiss!`](@ref).
1753 """
1754 det_bareiss(M) = det_bareiss!(copy(M))
1755
1756
1757
1758 """
1759 promote_leaf_eltypes(itr)
1760
1761 For an (possibly nested) iterable object `itr`, promote the types of leaf
1762 elements. Equivalent to `promote_type(typeof(leaf1), typeof(leaf2), ...)`.
1763 Currently supports only numeric leaf elements.
1764
1765 # Examples
1766 ```jldoctest
1767 julia> a = [[1,2, [3,4]], 5.0, [6im, [7.0, 8.0]]]
1768 3-element Vector{Any}:
1769 Any[1, 2, [3, 4]]
1770 5.0
1771 Any[0 + 6im, [7.0, 8.0]]
1772
1773 julia> LinearAlgebra.promote_leaf_eltypes(a)
1774 ComplexF64 (alias for Complex{Float64})
1775 ```
1776 """
1777 promote_leaf_eltypes(x::Union{AbstractArray{T},Tuple{T,Vararg{T}}}) where {T<:Number} = T
1778 promote_leaf_eltypes(x::Union{AbstractArray{T},Tuple{T,Vararg{T}}}) where {T<:NumberArray} = eltype(T)
1779 promote_leaf_eltypes(x::T) where {T} = T
1780 promote_leaf_eltypes(x::Union{AbstractArray,Tuple}) = mapreduce(promote_leaf_eltypes, promote_type, x; init=Bool)
1781
1782 # isapprox: approximate equality of arrays [like isapprox(Number,Number)]
1783 # Supports nested arrays; e.g., for `a = [[1,2, [3,4]], 5.0, [6im, [7.0, 8.0]]]`
1784 # `a ≈ a` is `true`.
1785 function isapprox(x::AbstractArray, y::AbstractArray;
1786 atol::Real=0,
1787 rtol::Real=Base.rtoldefault(promote_leaf_eltypes(x),promote_leaf_eltypes(y),atol),
1788 nans::Bool=false, norm::Function=norm)
1789 d = norm(x - y)
1790 if isfinite(d)
1791 return iszero(rtol) ? d <= atol : d <= max(atol, rtol*max(norm(x), norm(y)))
1792 else
1793 # Fall back to a component-wise approximate comparison
1794 # (mapreduce instead of all for greater generality [#44893])
1795 return mapreduce((a, b) -> isapprox(a, b; rtol=rtol, atol=atol, nans=nans), &, x, y)
1796 end
1797 end
1798
1799 """
1800 normalize!(a::AbstractArray, p::Real=2)
1801
1802 Normalize the array `a` in-place so that its `p`-norm equals unity,
1803 i.e. `norm(a, p) == 1`.
1804 See also [`normalize`](@ref) and [`norm`](@ref).
1805 """
1806 function normalize!(a::AbstractArray, p::Real=2)
1807 nrm = norm(a, p)
1808 __normalize!(a, nrm)
1809 end
1810
1811 @inline function __normalize!(a::AbstractArray, nrm)
1812 # The largest positive floating point number whose inverse is less than infinity
1813 δ = inv(prevfloat(typemax(nrm)))
1814 if nrm ≥ δ # Safe to multiply with inverse
1815 invnrm = inv(nrm)
1816 rmul!(a, invnrm)
1817 else # scale elements to avoid overflow
1818 εδ = eps(one(nrm))/δ
1819 rmul!(a, εδ)
1820 rmul!(a, inv(nrm*εδ))
1821 end
1822 return a
1823 end
1824
1825 """
1826 normalize(a, p::Real=2)
1827
1828 Normalize `a` so that its `p`-norm equals unity,
1829 i.e. `norm(a, p) == 1`. For scalars, this is similar to sign(a),
1830 except normalize(0) = NaN.
1831 See also [`normalize!`](@ref), [`norm`](@ref), and [`sign`](@ref).
1832
1833 # Examples
1834 ```jldoctest
1835 julia> a = [1,2,4];
1836
1837 julia> b = normalize(a)
1838 3-element Vector{Float64}:
1839 0.2182178902359924
1840 0.4364357804719848
1841 0.8728715609439696
1842
1843 julia> norm(b)
1844 1.0
1845
1846 julia> c = normalize(a, 1)
1847 3-element Vector{Float64}:
1848 0.14285714285714285
1849 0.2857142857142857
1850 0.5714285714285714
1851
1852 julia> norm(c, 1)
1853 1.0
1854
1855 julia> a = [1 2 4 ; 1 2 4]
1856 2×3 Matrix{Int64}:
1857 1 2 4
1858 1 2 4
1859
1860 julia> norm(a)
1861 6.48074069840786
1862
1863 julia> normalize(a)
1864 2×3 Matrix{Float64}:
1865 0.154303 0.308607 0.617213
1866 0.154303 0.308607 0.617213
1867
1868 julia> normalize(3, 1)
1869 1.0
1870
1871 julia> normalize(-8, 1)
1872 -1.0
1873
1874 julia> normalize(0, 1)
1875 NaN
1876 ```
1877 """
1878 function normalize(a::AbstractArray, p::Real = 2)
1879 nrm = norm(a, p)
1880 if !isempty(a)
1881 aa = copymutable_oftype(a, typeof(first(a)/nrm))
1882 return __normalize!(aa, nrm)
1883 else
1884 T = typeof(zero(eltype(a))/nrm)
1885 return T[]
1886 end
1887 end
1888
1889 normalize(x) = x / norm(x)
1890 normalize(x, p::Real) = x / norm(x, p)