context_start_lineno
int64
1
913
line_no
int64
16
984
repo
stringclasses
5 values
id
int64
0
416
target_function_prompt
stringlengths
201
13.6k
function_signature
stringlengths
201
13.6k
solution_position
listlengths
2
2
raw_solution
stringlengths
201
13.6k
focal_code
stringlengths
201
13.6k
function_name
stringlengths
2
38
start_line
int64
1
913
end_line
int64
16
984
file_path
stringlengths
10
52
context
stringlengths
4.52k
9.85k
230
241
DataStructures.jl
0
function Base.intersect!(a::Accumulator, b::Accumulator) for k in union(keys(a), keys(b)) # union not intersection as we want to check both multiplicities va = a[k] vb = b[k] va >= 0 || throw(MultiplicityException(k, va)) vb >= 0 || throw(MultiplicityException(k, vb)) a[k] = min(va, vb) drop_nonpositive!(a, k) # Drop any that ended up zero end return a end
function Base.intersect!(a::Accumulator, b::Accumulator) for k in union(keys(a), keys(b)) # union not intersection as we want to check both multiplicities va = a[k] vb = b[k] va >= 0 || throw(MultiplicityException(k, va)) vb >= 0 || throw(MultiplicityException(k, vb)) a[k] = min(va, vb) drop_nonpositive!(a, k) # Drop any that ended up zero end return a end
[ 230, 241 ]
function Base.intersect!(a::Accumulator, b::Accumulator) for k in union(keys(a), keys(b)) # union not intersection as we want to check both multiplicities va = a[k] vb = b[k] va >= 0 || throw(MultiplicityException(k, va)) vb >= 0 || throw(MultiplicityException(k, vb)) a[k] = min(va, vb) drop_nonpositive!(a, k) # Drop any that ended up zero end return a end
function Base.intersect!(a::Accumulator, b::Accumulator) for k in union(keys(a), keys(b)) # union not intersection as we want to check both multiplicities va = a[k] vb = b[k] va >= 0 || throw(MultiplicityException(k, va)) vb >= 0 || throw(MultiplicityException(k, vb)) a[k] = min(va, vb) drop_nonpositive!(a, k) # Drop any that ended up zero end return a end
Base.intersect!
230
241
src/accumulator.jl
#FILE: DataStructures.jl/src/sparse_int_set.jl ##CHUNK 1 #Is there a more performant way to do this? Base.intersect!(s1::SparseIntSet, ns) = copy!(s1, intersect(s1, ns)) Base.setdiff(s::SparseIntSet, ns) = setdiff!(copy(s), ns) function Base.setdiff!(s::SparseIntSet, ns) for n in ns pop!(s, n, nothing) end return s end function Base.:(==)(s1::SparseIntSet, s2::SparseIntSet) length(s1) != length(s2) && return false return all(in(s1), s2) end Base.issubset(a::SparseIntSet, b::SparseIntSet) = isequal(a, intersect(a, b)) Base.:(<)(a::SparseIntSet, b::SparseIntSet) = ( a<=b ) && !isequal(a, b) Base.:(<=)(a::SparseIntSet, b::SparseIntSet) = issubset(a, b) #FILE: DataStructures.jl/test/test_int_set.jl ##CHUNK 1 p′ = complement(p) q′ = complement(q) function collect10(itr) r = eltype(itr)[] for i in itr i > 10 && break push!(r, i) end r end a = Set(p) b = Set(q) a′ = Set(collect10(p′)) b′ = Set(collect10(q′)) for f in (union, intersect, setdiff, symdiff) @test collect(f(p, p)) == sort(collect(f(a, a))) @test collect(f(q, q)) == sort(collect(f(b, b))) @test collect(f(p, q)) == sort(collect(f(a, b))) @test collect(f(q, p)) == sort(collect(f(b, a))) #FILE: DataStructures.jl/test/test_mutable_binheap.jl ##CHUNK 1 update!(h, 2, 20) @test isequal(heap_values(h), [0.5, 10.1, 3.0, 20.0]) end @testset "empty!" begin vs = [4, 1, 3, 2, 16, 9, 10, 14, 8, 7] vs2 = collect(enumerate(vs)) ordering = Base.Order.By(last) for h in (MutableBinaryMinHeap(vs), MutableBinaryMaxHeap(vs), MutableBinaryHeap(ordering, vs2)) @test length(h) == length(vs) @test !isempty(h) ret = empty!(h) @test ret === h @test length(ret) == 0 @test isempty(ret) @test verify_heap(ret) end end #FILE: DataStructures.jl/src/int_set.jl ##CHUNK 1 (l1 == l2 || all(unsafe_getindex(s1.bits, l2+1:l1))) end end const hashis_seed = UInt === UInt64 ? 0x88989f1fc7dea67d : 0xc7dea67d function Base.hash(s::IntSet, h::UInt) # Only hash the bits array up to the last-set bit to prevent extra empty # bits from changing the hash result l = findprev(s.bits, length(s.bits)) return hash(unsafe_getindex(s.bits, 1:l), h) ⊻ hash(s.inverse) ⊻ hashis_seed end Base.issubset(a::IntSet, b::IntSet) = isequal(a, intersect(a,b)) Base.:(<)(a::IntSet, b::IntSet) = (a<=b) && !isequal(a,b) Base.:(<=)(a::IntSet, b::IntSet) = issubset(a, b) #CURRENT FILE: DataStructures.jl/src/accumulator.jl ##CHUNK 1 va >= 0 || throw(MultiplicityException(kb, va)) a[kb] = max(va, vb) end return a end Base.intersect(a::Accumulator, b::Accumulator, c::Accumulator...) = intersect(intersect(a,b), c...) Base.intersect(a::Accumulator, b::Accumulator) = intersect!(copy(a), b) function Base.show(io::IO, acc::Accumulator{T,V}) where {T,V} l = length(acc) if l>0 print(io, "Accumulator(") else print(io,"Accumulator{$T,$V}(") end for (count, (k, v)) in enumerate(acc) print(io, k, " => ", v) if count < l print(io, ", ") ##CHUNK 2 end Base.issubset(a::Accumulator, b::Accumulator) = all(b[k] >= v for (k, v) in a) Base.union(a::Accumulator, b::Accumulator, c::Accumulator...) = union(union(a,b), c...) Base.union(a::Accumulator, b::Accumulator) = union!(copy(a), b) function Base.union!(a::Accumulator, b::Accumulator) for (kb, vb) in b va = a[kb] vb >= 0 || throw(MultiplicityException(kb, vb)) va >= 0 || throw(MultiplicityException(kb, va)) a[kb] = max(va, vb) end return a end Base.intersect(a::Accumulator, b::Accumulator, c::Accumulator...) = intersect(intersect(a,b), c...) Base.intersect(a::Accumulator, b::Accumulator) = intersect!(copy(a), b) function Base.show(io::IO, acc::Accumulator{T,V}) where {T,V} ##CHUNK 3 function Base.setdiff(a::Accumulator, b::Accumulator) ret = copy(a) for (k, v) in b v > 0 || throw(MultiplicityException(k, v)) dec!(ret, k, v) drop_nonpositive!(ret, k) end return ret end Base.issubset(a::Accumulator, b::Accumulator) = all(b[k] >= v for (k, v) in a) Base.union(a::Accumulator, b::Accumulator, c::Accumulator...) = union(union(a,b), c...) Base.union(a::Accumulator, b::Accumulator) = union!(copy(a), b) function Base.union!(a::Accumulator, b::Accumulator) for (kb, vb) in b va = a[kb] vb >= 0 || throw(MultiplicityException(kb, vb)) ##CHUNK 4 k::K v::V end function Base.showerror(io::IO, err::MultiplicityException) print(io, "When using an `Accumulator` as a multiset, all elements must have positive multiplicity") print(io, " element `$(err.k)` has multiplicity $(err.v)") end drop_nonpositive!(a::Accumulator, k) = (a[k] > 0 || delete!(a.map, k)) function Base.setdiff(a::Accumulator, b::Accumulator) ret = copy(a) for (k, v) in b v > 0 || throw(MultiplicityException(k, v)) dec!(ret, k, v) drop_nonpositive!(ret, k) end return ret ##CHUNK 5 (unless those elements are added to he accumulator directly, eg via `acc[foo]=0) """ nsmallest(acc::Accumulator) = sort!(collect(acc), by=last, rev=false) nsmallest(acc::Accumulator, n) = partialsort!(collect(acc), 1:n, by=last, rev=false) ########################################################### ## Multiset operations struct MultiplicityException{K, V} <: Exception k::K v::V end function Base.showerror(io::IO, err::MultiplicityException) print(io, "When using an `Accumulator` as a multiset, all elements must have positive multiplicity") print(io, " element `$(err.k)` has multiplicity $(err.v)") end drop_nonpositive!(a::Accumulator, k) = (a[k] > 0 || delete!(a.map, k)) ##CHUNK 6 ct = copy(ct1) merge!(ct,others...) end """ reset!(ct::Accumulator, x) Remove a key `x` from an accumulator, and return its current value """ reset!(ct::Accumulator{<:Any,V}, x) where V = haskey(ct.map, x) ? pop!(ct.map, x) : zero(V) """ nlargest(acc::Accumulator, [n]) Returns a sorted vector of the `n` most common elements, with their counts. If `n` is omitted, the full sorted collection is returned. This corresponds to Python's `Counter.most_common` function. Example
83
93
DataStructures.jl
1
function left_rotate(z::AVLTreeNode) y = z.rightChild α = y.leftChild y.leftChild = z z.rightChild = α z.height = compute_height(z) y.height = compute_height(y) z.subsize = compute_subtree_size(z) y.subsize = compute_subtree_size(y) return y end
function left_rotate(z::AVLTreeNode) y = z.rightChild α = y.leftChild y.leftChild = z z.rightChild = α z.height = compute_height(z) y.height = compute_height(y) z.subsize = compute_subtree_size(z) y.subsize = compute_subtree_size(y) return y end
[ 83, 93 ]
function left_rotate(z::AVLTreeNode) y = z.rightChild α = y.leftChild y.leftChild = z z.rightChild = α z.height = compute_height(z) y.height = compute_height(y) z.subsize = compute_subtree_size(z) y.subsize = compute_subtree_size(y) return y end
function left_rotate(z::AVLTreeNode) y = z.rightChild α = y.leftChild y.leftChild = z z.rightChild = α z.height = compute_height(z) y.height = compute_height(y) z.subsize = compute_subtree_size(z) y.subsize = compute_subtree_size(y) return y end
left_rotate
83
93
src/avl_tree.jl
#FILE: DataStructures.jl/src/red_black_tree.jl ##CHUNK 1 node_x.parent.rightChild = node_y end node_y.leftChild = node_x node_x.parent = node_y end """ right_rotate!(tree::RBTree, node_x::RBTreeNode) Performs a right-rotation on `node_x` and updates `tree.root`, if required. """ function right_rotate!(tree::RBTree, node_x::RBTreeNode) node_y = node_x.leftChild node_x.leftChild = node_y.rightChild if node_y.rightChild !== tree.nil node_y.rightChild.parent = node_x end node_y.parent = node_x.parent if (node_x.parent == nothing) tree.root = node_y ##CHUNK 2 end end node.parent = node_y if node_y == nothing tree.root = node elseif node.data < node_y.data node_y.leftChild = node else node_y.rightChild = node end end """ left_rotate!(tree::RBTree, node_x::RBTreeNode) Performs a left-rotation on `node_x` and updates `tree.root`, if required. """ function left_rotate!(tree::RBTree, node_x::RBTreeNode) node_y = node_x.rightChild ##CHUNK 3 end end """ left_rotate!(tree::RBTree, node_x::RBTreeNode) Performs a left-rotation on `node_x` and updates `tree.root`, if required. """ function left_rotate!(tree::RBTree, node_x::RBTreeNode) node_y = node_x.rightChild node_x.rightChild = node_y.leftChild if node_y.leftChild !== tree.nil node_y.leftChild.parent = node_x end node_y.parent = node_x.parent if (node_x.parent == nothing) tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else ##CHUNK 4 node_x.rightChild = node_y.leftChild if node_y.leftChild !== tree.nil node_y.leftChild.parent = node_x end node_y.parent = node_x.parent if (node_x.parent == nothing) tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else node_x.parent.rightChild = node_y end node_y.leftChild = node_x node_x.parent = node_y end """ right_rotate!(tree::RBTree, node_x::RBTreeNode) Performs a right-rotation on `node_x` and updates `tree.root`, if required. #CURRENT FILE: DataStructures.jl/src/avl_tree.jl ##CHUNK 1 Performs a left-rotation on `node_x`, updates height of the nodes, and returns the rotated node. """ """ right_rotate(node_x::AVLTreeNode) Performs a right-rotation on `node_x`, updates height of the nodes, and returns the rotated node. """ function right_rotate(z::AVLTreeNode) y = z.leftChild α = y.rightChild y.rightChild = z z.leftChild = α z.height = compute_height(z) y.height = compute_height(y) z.subsize = compute_subtree_size(z) y.subsize = compute_subtree_size(y) return y end ##CHUNK 2 else L = get_subsize(node.leftChild) R = get_subsize(node.rightChild) return (L + R + Int32(1)) end end """ left_rotate(node_x::AVLTreeNode) Performs a left-rotation on `node_x`, updates height of the nodes, and returns the rotated node. """ """ right_rotate(node_x::AVLTreeNode) Performs a right-rotation on `node_x`, updates height of the nodes, and returns the rotated node. """ function right_rotate(z::AVLTreeNode) y = z.leftChild ##CHUNK 3 function insert_node(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = insert_node(node.leftChild, key) else node.rightChild = insert_node(node.rightChild, key) end node.subsize = compute_subtree_size(node) node.height = compute_height(node) balance = get_balance(node) if balance > 1 if key < node.leftChild.data return right_rotate(node) else node.leftChild = left_rotate(node.leftChild) return right_rotate(node) end end ##CHUNK 4 node.height = compute_height(node) balance = get_balance(node) if balance > 1 if get_balance(node.leftChild) >= 0 return right_rotate(node) else node.leftChild = left_rotate(node.leftChild) return right_rotate(node) end end if balance < -1 if get_balance(node.rightChild) <= 0 return left_rotate(node) else node.rightChild = right_rotate(node.rightChild) return left_rotate(node) end end ##CHUNK 5 α = y.rightChild y.rightChild = z z.leftChild = α z.height = compute_height(z) y.height = compute_height(y) z.subsize = compute_subtree_size(z) y.subsize = compute_subtree_size(y) return y end """ minimum_node(tree::AVLTree, node::AVLTreeNode) Returns the AVLTreeNode with minimum value in subtree of `node`. """ function minimum_node(node::Union{AVLTreeNode, Nothing}) while node != nothing && node.leftChild != nothing node = node.leftChild end return node ##CHUNK 6 result = node.leftChild return result else result = minimum_node(node.rightChild) node.data = result.data node.rightChild = delete_node!(node.rightChild, result.data) end end node.subsize = compute_subtree_size(node) node.height = compute_height(node) balance = get_balance(node) if balance > 1 if get_balance(node.leftChild) >= 0 return right_rotate(node) else node.leftChild = left_rotate(node.leftChild) return right_rotate(node) end
100
110
DataStructures.jl
2
function right_rotate(z::AVLTreeNode) y = z.leftChild α = y.rightChild y.rightChild = z z.leftChild = α z.height = compute_height(z) y.height = compute_height(y) z.subsize = compute_subtree_size(z) y.subsize = compute_subtree_size(y) return y end
function right_rotate(z::AVLTreeNode) y = z.leftChild α = y.rightChild y.rightChild = z z.leftChild = α z.height = compute_height(z) y.height = compute_height(y) z.subsize = compute_subtree_size(z) y.subsize = compute_subtree_size(y) return y end
[ 100, 110 ]
function right_rotate(z::AVLTreeNode) y = z.leftChild α = y.rightChild y.rightChild = z z.leftChild = α z.height = compute_height(z) y.height = compute_height(y) z.subsize = compute_subtree_size(z) y.subsize = compute_subtree_size(y) return y end
function right_rotate(z::AVLTreeNode) y = z.leftChild α = y.rightChild y.rightChild = z z.leftChild = α z.height = compute_height(z) y.height = compute_height(y) z.subsize = compute_subtree_size(z) y.subsize = compute_subtree_size(y) return y end
right_rotate
100
110
src/avl_tree.jl
#FILE: DataStructures.jl/src/red_black_tree.jl ##CHUNK 1 end end node.parent = node_y if node_y == nothing tree.root = node elseif node.data < node_y.data node_y.leftChild = node else node_y.rightChild = node end end """ left_rotate!(tree::RBTree, node_x::RBTreeNode) Performs a left-rotation on `node_x` and updates `tree.root`, if required. """ function left_rotate!(tree::RBTree, node_x::RBTreeNode) node_y = node_x.rightChild ##CHUNK 2 node_x.parent.rightChild = node_y end node_y.leftChild = node_x node_x.parent = node_y end """ right_rotate!(tree::RBTree, node_x::RBTreeNode) Performs a right-rotation on `node_x` and updates `tree.root`, if required. """ function right_rotate!(tree::RBTree, node_x::RBTreeNode) node_y = node_x.leftChild node_x.leftChild = node_y.rightChild if node_y.rightChild !== tree.nil node_y.rightChild.parent = node_x end node_y.parent = node_x.parent if (node_x.parent == nothing) tree.root = node_y ##CHUNK 3 end end """ left_rotate!(tree::RBTree, node_x::RBTreeNode) Performs a left-rotation on `node_x` and updates `tree.root`, if required. """ function left_rotate!(tree::RBTree, node_x::RBTreeNode) node_y = node_x.rightChild node_x.rightChild = node_y.leftChild if node_y.leftChild !== tree.nil node_y.leftChild.parent = node_x end node_y.parent = node_x.parent if (node_x.parent == nothing) tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else ##CHUNK 4 node_x.rightChild = node_y.leftChild if node_y.leftChild !== tree.nil node_y.leftChild.parent = node_x end node_y.parent = node_x.parent if (node_x.parent == nothing) tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else node_x.parent.rightChild = node_y end node_y.leftChild = node_x node_x.parent = node_y end """ right_rotate!(tree::RBTree, node_x::RBTreeNode) Performs a right-rotation on `node_x` and updates `tree.root`, if required. #CURRENT FILE: DataStructures.jl/src/avl_tree.jl ##CHUNK 1 else L = get_subsize(node.leftChild) R = get_subsize(node.rightChild) return (L + R + Int32(1)) end end """ left_rotate(node_x::AVLTreeNode) Performs a left-rotation on `node_x`, updates height of the nodes, and returns the rotated node. """ function left_rotate(z::AVLTreeNode) y = z.rightChild α = y.leftChild y.leftChild = z z.rightChild = α z.height = compute_height(z) y.height = compute_height(y) z.subsize = compute_subtree_size(z) ##CHUNK 2 Performs a left-rotation on `node_x`, updates height of the nodes, and returns the rotated node. """ function left_rotate(z::AVLTreeNode) y = z.rightChild α = y.leftChild y.leftChild = z z.rightChild = α z.height = compute_height(z) y.height = compute_height(y) z.subsize = compute_subtree_size(z) y.subsize = compute_subtree_size(y) return y end """ right_rotate(node_x::AVLTreeNode) Performs a right-rotation on `node_x`, updates height of the nodes, and returns the rotated node. """ ##CHUNK 3 function insert_node(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = insert_node(node.leftChild, key) else node.rightChild = insert_node(node.rightChild, key) end node.subsize = compute_subtree_size(node) node.height = compute_height(node) balance = get_balance(node) if balance > 1 if key < node.leftChild.data return right_rotate(node) else node.leftChild = left_rotate(node.leftChild) return right_rotate(node) end end ##CHUNK 4 node.height = compute_height(node) balance = get_balance(node) if balance > 1 if get_balance(node.leftChild) >= 0 return right_rotate(node) else node.leftChild = left_rotate(node.leftChild) return right_rotate(node) end end if balance < -1 if get_balance(node.rightChild) <= 0 return left_rotate(node) else node.rightChild = right_rotate(node.rightChild) return left_rotate(node) end end ##CHUNK 5 result = node.leftChild return result else result = minimum_node(node.rightChild) node.data = result.data node.rightChild = delete_node!(node.rightChild, result.data) end end node.subsize = compute_subtree_size(node) node.height = compute_height(node) balance = get_balance(node) if balance > 1 if get_balance(node.leftChild) >= 0 return right_rotate(node) else node.leftChild = left_rotate(node.leftChild) return right_rotate(node) end ##CHUNK 6 # computes the height of the subtree, which basically is # one added the maximum of the height of the left subtree and right subtree compute_height(node::AVLTreeNode) = Int8(1) + max(get_height(node.leftChild), get_height(node.rightChild)) get_subsize(node::AVLTreeNode_or_null) = (node == nothing) ? Int32(0) : node.subsize # compute the subtree size function compute_subtree_size(node::AVLTreeNode_or_null) if node == nothing return Int32(0) else L = get_subsize(node.leftChild) R = get_subsize(node.rightChild) return (L + R + Int32(1)) end end """ left_rotate(node_x::AVLTreeNode)
124
138
DataStructures.jl
3
function search_node(tree::AVLTree{K}, d::K) where K prev = nothing node = tree.root while node != nothing && node.data != nothing && node.data != d prev = node if d < node.data node = node.leftChild else node = node.rightChild end end return (node == nothing) ? prev : node end
function search_node(tree::AVLTree{K}, d::K) where K prev = nothing node = tree.root while node != nothing && node.data != nothing && node.data != d prev = node if d < node.data node = node.leftChild else node = node.rightChild end end return (node == nothing) ? prev : node end
[ 124, 138 ]
function search_node(tree::AVLTree{K}, d::K) where K prev = nothing node = tree.root while node != nothing && node.data != nothing && node.data != d prev = node if d < node.data node = node.leftChild else node = node.rightChild end end return (node == nothing) ? prev : node end
function search_node(tree::AVLTree{K}, d::K) where K prev = nothing node = tree.root while node != nothing && node.data != nothing && node.data != d prev = node if d < node.data node = node.leftChild else node = node.rightChild end end return (node == nothing) ? prev : node end
search_node
124
138
src/avl_tree.jl
#FILE: DataStructures.jl/src/splay_tree.jl ##CHUNK 1 x = maximum_node(s) splay!(tree, x) x.rightChild = t t.parent = x return x end end function search_node(tree::SplayTree{K}, d::K) where K node = tree.root prev = nothing while node != nothing && node.data != d prev = node if node.data < d node = node.rightChild else node = node.leftChild end end return (node == nothing) ? prev : node ##CHUNK 2 prev = nothing while node != nothing && node.data != d prev = node if node.data < d node = node.rightChild else node = node.leftChild end end return (node == nothing) ? prev : node end function Base.haskey(tree::SplayTree{K}, d::K) where K node = tree.root if node === nothing return false else node = search_node(tree, d) (node === nothing) && return false is_found = (node.data == d) ##CHUNK 3 node = SplayTreeNode{K}(d) y = nothing x = tree.root while x !== nothing y = x if node.data > x.data x = x.rightChild else x = x.leftChild end end node.parent = y if y === nothing tree.root = node elseif node.data < y.data y.leftChild = node else y.rightChild = node ##CHUNK 4 return tree end function Base.push!(tree::SplayTree{K}, d0) where K d = convert(K, d0) is_present = search_node(tree, d) if (is_present !== nothing) && (is_present.data == d) return tree end # only unique keys are inserted node = SplayTreeNode{K}(d) y = nothing x = tree.root while x !== nothing y = x if node.data > x.data x = x.rightChild else x = x.leftChild #FILE: DataStructures.jl/src/red_black_tree.jl ##CHUNK 1 """ search_node(tree, key) function search_node(tree::RBTree{K}, d::K) where K node = tree.root while node !== tree.nil && d != node.data if d < node.data node = node.leftChild else node = node.rightChild end end return node end """ haskey(tree, key) Returns true if `key` is present in the `tree`, else returns false. """ ##CHUNK 2 function insert_node!(tree::RBTree, node::RBTreeNode) node_y = nothing node_x = tree.root while node_x !== tree.nil node_y = node_x if node.data < node_x.data node_x = node_x.leftChild else node_x = node_x.rightChild end end node.parent = node_y if node_y == nothing tree.root = node elseif node.data < node_y.data node_y.leftChild = node else node_y.rightChild = node ##CHUNK 3 end RBTree() = RBTree{Any}() Base.length(tree::RBTree) = tree.count """ search_node(tree, key) Returns the last visited node, while traversing through in binary-search-tree fashion looking for `key`. """ search_node(tree, key) function search_node(tree::RBTree{K}, d::K) where K node = tree.root while node !== tree.nil && d != node.data if d < node.data node = node.leftChild else node = node.rightChild ##CHUNK 4 function Base.haskey(tree::RBTree{K}, d::K) where K node = search_node(tree, d) return (node.data == d) end """ insert_node!(tree::RBTree, node::RBTreeNode) Inserts `node` at proper location by traversing through the `tree` in a binary-search-tree fashion. """ function insert_node!(tree::RBTree, node::RBTreeNode) node_y = nothing node_x = tree.root while node_x !== tree.nil node_y = node_x if node.data < node_x.data node_x = node_x.leftChild else node_x = node_x.rightChild #CURRENT FILE: DataStructures.jl/src/avl_tree.jl ##CHUNK 1 julia> sorted_rank(tree, 17) 9 ``` """ function sorted_rank(tree::AVLTree{K}, key::K) where K !haskey(tree, key) && throw(KeyError(key)) node = tree.root rank = 0 while node.data != key if (node.data < key) rank += (1 + get_subsize(node.leftChild)) node = node.rightChild else node = node.leftChild end end rank += (1 + get_subsize(node.leftChild)) return rank end ##CHUNK 2 """ function Base.push!(tree::AVLTree{K}, key) where K key0 = convert(K, key) insert!(tree, key0) end function delete_node!(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = delete_node!(node.leftChild, key) elseif key > node.data node.rightChild = delete_node!(node.rightChild, key) else if node.leftChild == nothing result = node.rightChild return result elseif node.rightChild == nothing result = node.leftChild return result else result = minimum_node(node.rightChild)
162
192
DataStructures.jl
4
function insert_node(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = insert_node(node.leftChild, key) else node.rightChild = insert_node(node.rightChild, key) end node.subsize = compute_subtree_size(node) node.height = compute_height(node) balance = get_balance(node) if balance > 1 if key < node.leftChild.data return right_rotate(node) else node.leftChild = left_rotate(node.leftChild) return right_rotate(node) end end if balance < -1 if key > node.rightChild.data return left_rotate(node) else node.rightChild = right_rotate(node.rightChild) return left_rotate(node) end end return node end
function insert_node(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = insert_node(node.leftChild, key) else node.rightChild = insert_node(node.rightChild, key) end node.subsize = compute_subtree_size(node) node.height = compute_height(node) balance = get_balance(node) if balance > 1 if key < node.leftChild.data return right_rotate(node) else node.leftChild = left_rotate(node.leftChild) return right_rotate(node) end end if balance < -1 if key > node.rightChild.data return left_rotate(node) else node.rightChild = right_rotate(node.rightChild) return left_rotate(node) end end return node end
[ 162, 192 ]
function insert_node(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = insert_node(node.leftChild, key) else node.rightChild = insert_node(node.rightChild, key) end node.subsize = compute_subtree_size(node) node.height = compute_height(node) balance = get_balance(node) if balance > 1 if key < node.leftChild.data return right_rotate(node) else node.leftChild = left_rotate(node.leftChild) return right_rotate(node) end end if balance < -1 if key > node.rightChild.data return left_rotate(node) else node.rightChild = right_rotate(node.rightChild) return left_rotate(node) end end return node end
function insert_node(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = insert_node(node.leftChild, key) else node.rightChild = insert_node(node.rightChild, key) end node.subsize = compute_subtree_size(node) node.height = compute_height(node) balance = get_balance(node) if balance > 1 if key < node.leftChild.data return right_rotate(node) else node.leftChild = left_rotate(node.leftChild) return right_rotate(node) end end if balance < -1 if key > node.rightChild.data return left_rotate(node) else node.rightChild = right_rotate(node.rightChild) return left_rotate(node) end end return node end
insert_node
162
192
src/avl_tree.jl
#FILE: DataStructures.jl/src/red_black_tree.jl ##CHUNK 1 """ function right_rotate!(tree::RBTree, node_x::RBTreeNode) node_y = node_x.leftChild node_x.leftChild = node_y.rightChild if node_y.rightChild !== tree.nil node_y.rightChild.parent = node_x end node_y.parent = node_x.parent if (node_x.parent == nothing) tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else node_x.parent.rightChild = node_y end node_y.rightChild = node_x node_x.parent = node_y end """ ##CHUNK 2 end end """ left_rotate!(tree::RBTree, node_x::RBTreeNode) Performs a left-rotation on `node_x` and updates `tree.root`, if required. """ function left_rotate!(tree::RBTree, node_x::RBTreeNode) node_y = node_x.rightChild node_x.rightChild = node_y.leftChild if node_y.leftChild !== tree.nil node_y.leftChild.parent = node_x end node_y.parent = node_x.parent if (node_x.parent == nothing) tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else ##CHUNK 3 end end node.parent = node_y if node_y == nothing tree.root = node elseif node.data < node_y.data node_y.leftChild = node else node_y.rightChild = node end end """ left_rotate!(tree::RBTree, node_x::RBTreeNode) Performs a left-rotation on `node_x` and updates `tree.root`, if required. """ function left_rotate!(tree::RBTree, node_x::RBTreeNode) node_y = node_x.rightChild ##CHUNK 4 node_x.rightChild = node_y.leftChild if node_y.leftChild !== tree.nil node_y.leftChild.parent = node_x end node_y.parent = node_x.parent if (node_x.parent == nothing) tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else node_x.parent.rightChild = node_y end node_y.leftChild = node_x node_x.parent = node_y end """ right_rotate!(tree::RBTree, node_x::RBTreeNode) Performs a right-rotation on `node_x` and updates `tree.root`, if required. #CURRENT FILE: DataStructures.jl/src/avl_tree.jl ##CHUNK 1 node.height = compute_height(node) balance = get_balance(node) if balance > 1 if get_balance(node.leftChild) >= 0 return right_rotate(node) else node.leftChild = left_rotate(node.leftChild) return right_rotate(node) end end if balance < -1 if get_balance(node.rightChild) <= 0 return left_rotate(node) else node.rightChild = right_rotate(node.rightChild) return left_rotate(node) end end ##CHUNK 2 result = node.leftChild return result else result = minimum_node(node.rightChild) node.data = result.data node.rightChild = delete_node!(node.rightChild, result.data) end end node.subsize = compute_subtree_size(node) node.height = compute_height(node) balance = get_balance(node) if balance > 1 if get_balance(node.leftChild) >= 0 return right_rotate(node) else node.leftChild = left_rotate(node.leftChild) return right_rotate(node) end ##CHUNK 3 function delete_node!(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = delete_node!(node.leftChild, key) elseif key > node.data node.rightChild = delete_node!(node.rightChild, key) else if node.leftChild == nothing result = node.rightChild return result elseif node.rightChild == nothing result = node.leftChild return result else result = minimum_node(node.rightChild) node.data = result.data node.rightChild = delete_node!(node.rightChild, result.data) end end node.subsize = compute_subtree_size(node) ##CHUNK 4 """ push!(tree::AVLTree{K}, key) where K Insert `key` in AVL tree `tree`. """ function Base.push!(tree::AVLTree{K}, key) where K key0 = convert(K, key) insert!(tree, key0) end function delete_node!(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = delete_node!(node.leftChild, key) elseif key > node.data node.rightChild = delete_node!(node.rightChild, key) else if node.leftChild == nothing result = node.rightChild return result elseif node.rightChild == nothing ##CHUNK 5 # computes the height of the subtree, which basically is # one added the maximum of the height of the left subtree and right subtree compute_height(node::AVLTreeNode) = Int8(1) + max(get_height(node.leftChild), get_height(node.rightChild)) get_subsize(node::AVLTreeNode_or_null) = (node == nothing) ? Int32(0) : node.subsize # compute the subtree size function compute_subtree_size(node::AVLTreeNode_or_null) if node == nothing return Int32(0) else L = get_subsize(node.leftChild) R = get_subsize(node.rightChild) return (L + R + Int32(1)) end end """ left_rotate(node_x::AVLTreeNode) ##CHUNK 6 else L = get_subsize(node.leftChild) R = get_subsize(node.rightChild) return (L + R + Int32(1)) end end """ left_rotate(node_x::AVLTreeNode) Performs a left-rotation on `node_x`, updates height of the nodes, and returns the rotated node. """ function left_rotate(z::AVLTreeNode) y = z.rightChild α = y.leftChild y.leftChild = z z.rightChild = α z.height = compute_height(z) y.height = compute_height(y) z.subsize = compute_subtree_size(z)
212
254
DataStructures.jl
5
function delete_node!(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = delete_node!(node.leftChild, key) elseif key > node.data node.rightChild = delete_node!(node.rightChild, key) else if node.leftChild == nothing result = node.rightChild return result elseif node.rightChild == nothing result = node.leftChild return result else result = minimum_node(node.rightChild) node.data = result.data node.rightChild = delete_node!(node.rightChild, result.data) end end node.subsize = compute_subtree_size(node) node.height = compute_height(node) balance = get_balance(node) if balance > 1 if get_balance(node.leftChild) >= 0 return right_rotate(node) else node.leftChild = left_rotate(node.leftChild) return right_rotate(node) end end if balance < -1 if get_balance(node.rightChild) <= 0 return left_rotate(node) else node.rightChild = right_rotate(node.rightChild) return left_rotate(node) end end return node end
function delete_node!(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = delete_node!(node.leftChild, key) elseif key > node.data node.rightChild = delete_node!(node.rightChild, key) else if node.leftChild == nothing result = node.rightChild return result elseif node.rightChild == nothing result = node.leftChild return result else result = minimum_node(node.rightChild) node.data = result.data node.rightChild = delete_node!(node.rightChild, result.data) end end node.subsize = compute_subtree_size(node) node.height = compute_height(node) balance = get_balance(node) if balance > 1 if get_balance(node.leftChild) >= 0 return right_rotate(node) else node.leftChild = left_rotate(node.leftChild) return right_rotate(node) end end if balance < -1 if get_balance(node.rightChild) <= 0 return left_rotate(node) else node.rightChild = right_rotate(node.rightChild) return left_rotate(node) end end return node end
[ 212, 254 ]
function delete_node!(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = delete_node!(node.leftChild, key) elseif key > node.data node.rightChild = delete_node!(node.rightChild, key) else if node.leftChild == nothing result = node.rightChild return result elseif node.rightChild == nothing result = node.leftChild return result else result = minimum_node(node.rightChild) node.data = result.data node.rightChild = delete_node!(node.rightChild, result.data) end end node.subsize = compute_subtree_size(node) node.height = compute_height(node) balance = get_balance(node) if balance > 1 if get_balance(node.leftChild) >= 0 return right_rotate(node) else node.leftChild = left_rotate(node.leftChild) return right_rotate(node) end end if balance < -1 if get_balance(node.rightChild) <= 0 return left_rotate(node) else node.rightChild = right_rotate(node.rightChild) return left_rotate(node) end end return node end
function delete_node!(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = delete_node!(node.leftChild, key) elseif key > node.data node.rightChild = delete_node!(node.rightChild, key) else if node.leftChild == nothing result = node.rightChild return result elseif node.rightChild == nothing result = node.leftChild return result else result = minimum_node(node.rightChild) node.data = result.data node.rightChild = delete_node!(node.rightChild, result.data) end end node.subsize = compute_subtree_size(node) node.height = compute_height(node) balance = get_balance(node) if balance > 1 if get_balance(node.leftChild) >= 0 return right_rotate(node) else node.leftChild = left_rotate(node.leftChild) return right_rotate(node) end end if balance < -1 if get_balance(node.rightChild) <= 0 return left_rotate(node) else node.rightChild = right_rotate(node.rightChild) return left_rotate(node) end end return node end
delete_node!
212
254
src/avl_tree.jl
#FILE: DataStructures.jl/src/splay_tree.jl ##CHUNK 1 # double rotation elseif node_x == parent.leftChild && parent == grand_parent.leftChild # zig-zig rotation right_rotate!(tree, grand_parent) right_rotate!(tree, parent) elseif node_x == parent.rightChild && parent == grand_parent.rightChild # zag-zag rotation left_rotate!(tree, grand_parent) left_rotate!(tree, parent) elseif node_x == parent.rightChild && parent == grand_parent.leftChild # zig-zag rotation left_rotate!(tree, node_x.parent) right_rotate!(tree, node_x.parent) else # zag-zig rotation right_rotate!(tree, node_x.parent) left_rotate!(tree, node_x.parent) end end end #FILE: DataStructures.jl/src/red_black_tree.jl ##CHUNK 1 """ function right_rotate!(tree::RBTree, node_x::RBTreeNode) node_y = node_x.leftChild node_x.leftChild = node_y.rightChild if node_y.rightChild !== tree.nil node_y.rightChild.parent = node_x end node_y.parent = node_x.parent if (node_x.parent == nothing) tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else node_x.parent.rightChild = node_y end node_y.rightChild = node_x node_x.parent = node_y end """ ##CHUNK 2 else # uncle is black in color if (node == parent.leftChild) # node is leftChild of its parent node = parent right_rotate!(tree, node) end # node is rightChild of its parent node.parent.color = false node.parent.parent.color = true left_rotate!(tree, node.parent.parent) end end end tree.root.color = false end """ insert!(tree, key) Inserts `key` in the `tree` if it is not present. """ ##CHUNK 3 end """ minimum_node(tree::RBTree, node::RBTreeNode) Returns the RBTreeNode with minimum value in subtree of `node`. """ function minimum_node(tree::RBTree, node::RBTreeNode) (node === tree.nil) && return node while node.leftChild !== tree.nil node = node.leftChild end return node end """ delete!(tree::RBTree, key) Deletes `key` from `tree`, if present, else returns the unmodified tree. """ ##CHUNK 4 end end """ left_rotate!(tree::RBTree, node_x::RBTreeNode) Performs a left-rotation on `node_x` and updates `tree.root`, if required. """ function left_rotate!(tree::RBTree, node_x::RBTreeNode) node_y = node_x.rightChild node_x.rightChild = node_y.leftChild if node_y.leftChild !== tree.nil node_y.leftChild.parent = node_x end node_y.parent = node_x.parent if (node_x.parent == nothing) tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else ##CHUNK 5 node_x.rightChild = node_y.leftChild if node_y.leftChild !== tree.nil node_y.leftChild.parent = node_x end node_y.parent = node_x.parent if (node_x.parent == nothing) tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else node_x.parent.rightChild = node_y end node_y.leftChild = node_x node_x.parent = node_y end """ right_rotate!(tree::RBTree, node_x::RBTreeNode) Performs a right-rotation on `node_x` and updates `tree.root`, if required. #FILE: DataStructures.jl/src/balanced_tree.jl ##CHUNK 1 end @inbounds thisnode = t.tree[curnode] cmp = thisnode.child3 == 0 ? cmp2le_leaf(t.ord, thisnode, k) : cmp3le_leaf(t.ord, thisnode, k) curnode = cmp == 1 ? thisnode.child1 : cmp == 2 ? thisnode.child2 : thisnode.child3 return curnode end ## The following are helper routines for the insert! and delete! functions. ## They replace the 'parent' field of either an internal tree node or ## a data node at the bottom tree level. function replaceparent!(data::Vector{KDRec{K,D}}, whichind::Int, newparent::Int) where {K,D} data[whichind] = KDRec{K,D}(newparent, data[whichind].k, data[whichind].d) return nothing end #CURRENT FILE: DataStructures.jl/src/avl_tree.jl ##CHUNK 1 balance = get_balance(node) if balance > 1 if key < node.leftChild.data return right_rotate(node) else node.leftChild = left_rotate(node.leftChild) return right_rotate(node) end end if balance < -1 if key > node.rightChild.data return left_rotate(node) else node.rightChild = right_rotate(node.rightChild) return left_rotate(node) end end ##CHUNK 2 if balance < -1 if key > node.rightChild.data return left_rotate(node) else node.rightChild = right_rotate(node.rightChild) return left_rotate(node) end end return node end function Base.insert!(tree::AVLTree{K}, d::K) where K haskey(tree, d) && return tree tree.root = insert_node(tree.root, d) tree.count += 1 return tree end ##CHUNK 3 # computes the height of the subtree, which basically is # one added the maximum of the height of the left subtree and right subtree compute_height(node::AVLTreeNode) = Int8(1) + max(get_height(node.leftChild), get_height(node.rightChild)) get_subsize(node::AVLTreeNode_or_null) = (node == nothing) ? Int32(0) : node.subsize # compute the subtree size function compute_subtree_size(node::AVLTreeNode_or_null) if node == nothing return Int32(0) else L = get_subsize(node.leftChild) R = get_subsize(node.rightChild) return (L + R + Int32(1)) end end """ left_rotate(node_x::AVLTreeNode)
290
304
DataStructures.jl
6
function sorted_rank(tree::AVLTree{K}, key::K) where K !haskey(tree, key) && throw(KeyError(key)) node = tree.root rank = 0 while node.data != key if (node.data < key) rank += (1 + get_subsize(node.leftChild)) node = node.rightChild else node = node.leftChild end end rank += (1 + get_subsize(node.leftChild)) return rank end
function sorted_rank(tree::AVLTree{K}, key::K) where K !haskey(tree, key) && throw(KeyError(key)) node = tree.root rank = 0 while node.data != key if (node.data < key) rank += (1 + get_subsize(node.leftChild)) node = node.rightChild else node = node.leftChild end end rank += (1 + get_subsize(node.leftChild)) return rank end
[ 290, 304 ]
function sorted_rank(tree::AVLTree{K}, key::K) where K !haskey(tree, key) && throw(KeyError(key)) node = tree.root rank = 0 while node.data != key if (node.data < key) rank += (1 + get_subsize(node.leftChild)) node = node.rightChild else node = node.leftChild end end rank += (1 + get_subsize(node.leftChild)) return rank end
function sorted_rank(tree::AVLTree{K}, key::K) where K !haskey(tree, key) && throw(KeyError(key)) node = tree.root rank = 0 while node.data != key if (node.data < key) rank += (1 + get_subsize(node.leftChild)) node = node.rightChild else node = node.leftChild end end rank += (1 + get_subsize(node.leftChild)) return rank end
sorted_rank
290
304
src/avl_tree.jl
#FILE: DataStructures.jl/src/red_black_tree.jl ##CHUNK 1 """ search_node(tree, key) function search_node(tree::RBTree{K}, d::K) where K node = tree.root while node !== tree.nil && d != node.data if d < node.data node = node.leftChild else node = node.rightChild end end return node end """ haskey(tree, key) Returns true if `key` is present in the `tree`, else returns false. """ ##CHUNK 2 end RBTree() = RBTree{Any}() Base.length(tree::RBTree) = tree.count """ search_node(tree, key) Returns the last visited node, while traversing through in binary-search-tree fashion looking for `key`. """ search_node(tree, key) function search_node(tree::RBTree{K}, d::K) where K node = tree.root while node !== tree.nil && d != node.data if d < node.data node = node.leftChild else node = node.rightChild ##CHUNK 3 function Base.haskey(tree::RBTree{K}, d::K) where K node = search_node(tree, d) return (node.data == d) end """ insert_node!(tree::RBTree, node::RBTreeNode) Inserts `node` at proper location by traversing through the `tree` in a binary-search-tree fashion. """ function insert_node!(tree::RBTree, node::RBTreeNode) node_y = nothing node_x = tree.root while node_x !== tree.nil node_y = node_x if node.data < node_x.data node_x = node_x.leftChild else node_x = node_x.rightChild #FILE: DataStructures.jl/src/splay_tree.jl ##CHUNK 1 node = SplayTreeNode{K}(d) y = nothing x = tree.root while x !== nothing y = x if node.data > x.data x = x.rightChild else x = x.leftChild end end node.parent = y if y === nothing tree.root = node elseif node.data < y.data y.leftChild = node else y.rightChild = node #CURRENT FILE: DataStructures.jl/src/avl_tree.jl ##CHUNK 1 if balance < -1 if key > node.rightChild.data return left_rotate(node) else node.rightChild = right_rotate(node.rightChild) return left_rotate(node) end end return node end function Base.insert!(tree::AVLTree{K}, d::K) where K haskey(tree, d) && return tree tree.root = insert_node(tree.root, d) tree.count += 1 return tree end ##CHUNK 2 """ push!(tree::AVLTree{K}, key) where K Insert `key` in AVL tree `tree`. """ function Base.push!(tree::AVLTree{K}, key) where K key0 = convert(K, key) insert!(tree, key0) end function delete_node!(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = delete_node!(node.leftChild, key) elseif key > node.data node.rightChild = delete_node!(node.rightChild, key) else if node.leftChild == nothing result = node.rightChild return result ##CHUNK 3 function insert_node(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = insert_node(node.leftChild, key) else node.rightChild = insert_node(node.rightChild, key) end node.subsize = compute_subtree_size(node) node.height = compute_height(node) balance = get_balance(node) if balance > 1 if key < node.leftChild.data return right_rotate(node) else node.leftChild = left_rotate(node.leftChild) return right_rotate(node) end end ##CHUNK 4 return node end function search_node(tree::AVLTree{K}, d::K) where K prev = nothing node = tree.root while node != nothing && node.data != nothing && node.data != d prev = node if d < node.data node = node.leftChild else node = node.rightChild end end return (node == nothing) ? prev : node end """ ##CHUNK 5 function Base.delete!(tree::AVLTree{K}, k::K) where K # if the key is not in the tree, do nothing and return the tree !haskey(tree, k) && return tree # if the key is present, delete it from the tree tree.root = delete_node!(tree.root, k) tree.count -= 1 return tree end """ sorted_rank(tree::AVLTree{K}, key::K) where K Returns the rank of `key` present in the `tree`, if it present. A `KeyError` is thrown if `key` is not present. # Examples ```jldoctest julia> tree = AVLTree{Int}(); ##CHUNK 6 function delete_node!(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = delete_node!(node.leftChild, key) elseif key > node.data node.rightChild = delete_node!(node.rightChild, key) else if node.leftChild == nothing result = node.rightChild return result elseif node.rightChild == nothing result = node.leftChild return result else result = minimum_node(node.rightChild) node.data = result.data node.rightChild = delete_node!(node.rightChild, result.data) end end
329
345
DataStructures.jl
7
function Base.getindex(tree::AVLTree{K}, ind::Integer) where K @boundscheck (1 <= ind <= tree.count) || throw(BoundsError("$ind should be in between 1 and $(tree.count)")) function traverse_tree(node::AVLTreeNode_or_null, idx) if (node != nothing) L = get_subsize(node.leftChild) if idx <= L return traverse_tree(node.leftChild, idx) elseif idx == L + 1 return node.data else return traverse_tree(node.rightChild, idx - L - 1) end end end value = traverse_tree(tree.root, ind) return value end
function Base.getindex(tree::AVLTree{K}, ind::Integer) where K @boundscheck (1 <= ind <= tree.count) || throw(BoundsError("$ind should be in between 1 and $(tree.count)")) function traverse_tree(node::AVLTreeNode_or_null, idx) if (node != nothing) L = get_subsize(node.leftChild) if idx <= L return traverse_tree(node.leftChild, idx) elseif idx == L + 1 return node.data else return traverse_tree(node.rightChild, idx - L - 1) end end end value = traverse_tree(tree.root, ind) return value end
[ 329, 345 ]
function Base.getindex(tree::AVLTree{K}, ind::Integer) where K @boundscheck (1 <= ind <= tree.count) || throw(BoundsError("$ind should be in between 1 and $(tree.count)")) function traverse_tree(node::AVLTreeNode_or_null, idx) if (node != nothing) L = get_subsize(node.leftChild) if idx <= L return traverse_tree(node.leftChild, idx) elseif idx == L + 1 return node.data else return traverse_tree(node.rightChild, idx - L - 1) end end end value = traverse_tree(tree.root, ind) return value end
function Base.getindex(tree::AVLTree{K}, ind::Integer) where K @boundscheck (1 <= ind <= tree.count) || throw(BoundsError("$ind should be in between 1 and $(tree.count)")) function traverse_tree(node::AVLTreeNode_or_null, idx) if (node != nothing) L = get_subsize(node.leftChild) if idx <= L return traverse_tree(node.leftChild, idx) elseif idx == L + 1 return node.data else return traverse_tree(node.rightChild, idx - L - 1) end end end value = traverse_tree(tree.root, ind) return value end
traverse_tree
329
345
src/avl_tree.jl
#FILE: DataStructures.jl/src/red_black_tree.jl ##CHUNK 1 Base.in(key, tree::RBTree) = haskey(tree, key) """ getindex(tree, ind) Gets the key present at index `ind` of the tree. Indexing is done in increasing order of key. """ function Base.getindex(tree::RBTree{K}, ind) where K @boundscheck (1 <= ind <= tree.count) || throw(ArgumentError("$ind should be in between 1 and $(tree.count)")) function traverse_tree_inorder(node::RBTreeNode{K}) where K if (node !== tree.nil) left = traverse_tree_inorder(node.leftChild) right = traverse_tree_inorder(node.rightChild) append!(push!(left, node.data), right) else return K[] end end arr = traverse_tree_inorder(tree.root) ##CHUNK 2 rb_transplant(tree, z, y) y.leftChild = z.leftChild y.leftChild.parent = y y.color = z.color end !y_original_color && delete_fix(tree, x) tree.count -= 1 return tree end Base.in(key, tree::RBTree) = haskey(tree, key) """ getindex(tree, ind) Gets the key present at index `ind` of the tree. Indexing is done in increasing order of key. """ function Base.getindex(tree::RBTree{K}, ind) where K @boundscheck (1 <= ind <= tree.count) || throw(ArgumentError("$ind should be in between 1 and $(tree.count)")) ##CHUNK 3 """ search_node(tree, key) function search_node(tree::RBTree{K}, d::K) where K node = tree.root while node !== tree.nil && d != node.data if d < node.data node = node.leftChild else node = node.rightChild end end return node end """ haskey(tree, key) Returns true if `key` is present in the `tree`, else returns false. """ ##CHUNK 4 end RBTree() = RBTree{Any}() Base.length(tree::RBTree) = tree.count """ search_node(tree, key) Returns the last visited node, while traversing through in binary-search-tree fashion looking for `key`. """ search_node(tree, key) function search_node(tree::RBTree{K}, d::K) where K node = tree.root while node !== tree.nil && d != node.data if d < node.data node = node.leftChild else node = node.rightChild ##CHUNK 5 function traverse_tree_inorder(node::RBTreeNode{K}) where K if (node !== tree.nil) left = traverse_tree_inorder(node.leftChild) right = traverse_tree_inorder(node.rightChild) append!(push!(left, node.data), right) else return K[] end end arr = traverse_tree_inorder(tree.root) return @inbounds arr[ind] end #FILE: DataStructures.jl/src/splay_tree.jl ##CHUNK 1 end splay!(tree, node) tree.count += 1 return tree end function Base.getindex(tree::SplayTree{K}, ind) where K @boundscheck (1 <= ind <= tree.count) || throw(KeyError("$ind should be in between 1 and $(tree.count)")) function traverse_tree_inorder(node::Union{SplayTreeNode, Nothing}) if (node != nothing) left = traverse_tree_inorder(node.leftChild) right = traverse_tree_inorder(node.rightChild) append!(push!(left, node.data), right) else return K[] end end arr = traverse_tree_inorder(tree.root) return @inbounds arr[ind] end ##CHUNK 2 end end node.parent = y if y === nothing tree.root = node elseif node.data < y.data y.leftChild = node else y.rightChild = node end splay!(tree, node) tree.count += 1 return tree end function Base.getindex(tree::SplayTree{K}, ind) where K @boundscheck (1 <= ind <= tree.count) || throw(KeyError("$ind should be in between 1 and $(tree.count)")) function traverse_tree_inorder(node::Union{SplayTreeNode, Nothing}) if (node != nothing) #CURRENT FILE: DataStructures.jl/src/avl_tree.jl ##CHUNK 1 !haskey(tree, key) && throw(KeyError(key)) node = tree.root rank = 0 while node.data != key if (node.data < key) rank += (1 + get_subsize(node.leftChild)) node = node.rightChild else node = node.leftChild end end rank += (1 + get_subsize(node.leftChild)) return rank end """ getindex(tree::AVLTree{K}, ind::Integer) where K Considering the elements of `tree` sorted, returns the `ind`-th element in `tree`. Search operation is performed in \$O(\\log n)\$ time complexity. ##CHUNK 2 return node end function search_node(tree::AVLTree{K}, d::K) where K prev = nothing node = tree.root while node != nothing && node.data != nothing && node.data != d prev = node if d < node.data node = node.leftChild else node = node.rightChild end end return (node == nothing) ? prev : node end """ ##CHUNK 3 """ push!(tree::AVLTree{K}, key) where K Insert `key` in AVL tree `tree`. """ function Base.push!(tree::AVLTree{K}, key) where K key0 = convert(K, key) insert!(tree, key0) end function delete_node!(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = delete_node!(node.leftChild, key) elseif key > node.data node.rightChild = delete_node!(node.rightChild, key) else if node.leftChild == nothing result = node.rightChild return result
238
250
DataStructures.jl
8
function Base.empty!(t::BalancedTree23) resize!(t.data,2) initializeData!(t.data) resize!(t.tree,1) initializeTree!(t.tree) t.depth = 1 t.rootloc = 1 empty!(t.freetreeinds) empty!(t.freedatainds) empty!(t.useddatacells) push!(t.useddatacells, 1, 2) return nothing end
function Base.empty!(t::BalancedTree23) resize!(t.data,2) initializeData!(t.data) resize!(t.tree,1) initializeTree!(t.tree) t.depth = 1 t.rootloc = 1 empty!(t.freetreeinds) empty!(t.freedatainds) empty!(t.useddatacells) push!(t.useddatacells, 1, 2) return nothing end
[ 238, 250 ]
function Base.empty!(t::BalancedTree23) resize!(t.data,2) initializeData!(t.data) resize!(t.tree,1) initializeTree!(t.tree) t.depth = 1 t.rootloc = 1 empty!(t.freetreeinds) empty!(t.freedatainds) empty!(t.useddatacells) push!(t.useddatacells, 1, 2) return nothing end
function Base.empty!(t::BalancedTree23) resize!(t.data,2) initializeData!(t.data) resize!(t.tree,1) initializeTree!(t.tree) t.depth = 1 t.rootloc = 1 empty!(t.freetreeinds) empty!(t.freedatainds) empty!(t.useddatacells) push!(t.useddatacells, 1, 2) return nothing end
Base.empty!
238
250
src/balanced_tree.jl
#FILE: DataStructures.jl/test/test_sorted_containers.jl ##CHUNK 1 end remove_spaces(s::String) = replace(s, r"\s+"=>"") ## Function checkcorrectness checks a balanced tree for correctness. function checkcorrectness(t::DataStructures.BalancedTree23{K,D,Ord}, allowdups=false) where {K,D,Ord <: Ordering} dsz = size(t.data, 1) tsz = size(t.tree, 1) r = t.rootloc bfstreenodes = Vector{Int}() tdpth = t.depth intree = BitSet() levstart = Vector{Int}(undef, tdpth) push!(bfstreenodes, r) levstart[1] = 1 ##CHUNK 2 throw(ErrorException("t.useddatacells has indices larger than t.data size")) end for i = 1 : dsz if (in(i, dataused) && !in(i, t.useddatacells)) || (!in(i,dataused) && in(i, t.useddatacells)) throw(ErrorException("Mismatch between actual data cells used and useddatacells array")) end if (in(i, freedata) && in(i, dataused)) || (!in(i,freedata) && !in(i, dataused)) throw(ErrorException("Mismatch between t.freedatainds and t.useddatacells")) end end freetree = BitSet() for i = 1 : size(t.freetreeinds,1) tfi = t.freetreeinds[i] if in(tfi, freetree) throw(ErrorException("Free tree index repeated twice")) end if tfi < 1 || tfi > tsz throw(ErrorException("Free tree index out of range")) #CURRENT FILE: DataStructures.jl/src/balanced_tree.jl ##CHUNK 1 mutable struct BalancedTree23{K, D, Ord <: Ordering} ord::Ord data::Vector{KDRec{K,D}} tree::Vector{TreeNode{K}} rootloc::Int depth::Int freetreeinds::Vector{Int} freedatainds::Vector{Int} useddatacells::BitSet # The next two arrays are used as a workspace by the delete! # function. deletionchild::Vector{Int} deletionleftkey::Vector{K} function BalancedTree23{K,D,Ord}(ord1::Ord) where {K,D,Ord<:Ordering} tree1 = Vector{TreeNode{K}}(undef, 1) initializeTree!(tree1) data1 = Vector{KDRec{K,D}}(undef, 2) initializeData!(data1) u1 = BitSet() push!(u1, 1, 2) ##CHUNK 2 if exactfound && !allowdups t.data[leafind] = KDRec{K,D}(parent, k,d) return false, leafind end # We get here if k was not already found in the tree or # if duplicates are allowed. # In this case we insert a new node. depth = t.depth ord = t.ord ## Store the new data item in the tree's data array. Later ## go back and fix the parent. newind = push_or_reuse!(t.data, t.freedatainds, KDRec{K,D}(0,k,d)) push!(t.useddatacells, newind) p1 = parent newchild = newind minkeynewchild = k splitroot = false ##CHUNK 3 t.tree[rightsib].splitkey1) if curdepth == t.depth replaceparent!(t.data, rc1, p) replaceparent!(t.data, rc2, p) else replaceparent!(t.tree, rc1, p) replaceparent!(t.tree, rc2, p) end push!(t.freetreeinds, rightsib) newchildcount = 1 t.deletionchild[1] = p else rc1 = t.tree[rightsib].child1 t.tree[p] = TreeNode{K}(t.deletionchild[1], rc1, 0, pparent, t.tree[pparent].splitkey1, defaultKey) sk1 = t.tree[rightsib].splitkey1 t.tree[rightsib] = TreeNode{K}(t.tree[rightsib].child2, t.tree[rightsib].child3, ##CHUNK 4 # if newchild4==0 if newchild4 == 0 # Change the parent from a 2-node to a 3-node t.tree[p1] = TreeNode{K}(newchild1, newchild2, newchild3, p1parent, minkeychild2, minkeychild3) if curdepth == depth replaceparent!(t.data, newchild, p1) else replaceparent!(t.tree, newchild, p1) end break end # Split the parent t.tree[p1] = TreeNode{K}(newchild1, newchild2, 0, p1parent, minkeychild2, minkeychild2) newtreenode = TreeNode{K}(newchild3, newchild4, 0, p1parent, minkeychild4, minkeychild2) newparentnum = push_or_reuse!(t.tree, t.freetreeinds, newtreenode) if curdepth == depth ##CHUNK 5 t.deletionchild[newchildcount] = c1 t.deletionleftkey[newchildcount] = t.data[c1].k end c2 = t.tree[p].child2 if c2 != it newchildcount += 1 t.deletionchild[newchildcount] = c2 t.deletionleftkey[newchildcount] = t.data[c2].k end c3 = t.tree[p].child3 if c3 != it && c3 > 0 newchildcount += 1 t.deletionchild[newchildcount] = c3 t.deletionleftkey[newchildcount] = t.data[c3].k end @invariant newchildcount == 1 || newchildcount == 2 push!(t.freedatainds, it) pop!(t.useddatacells,it) defaultKey = t.tree[1].splitkey1 curdepth = t.depth ##CHUNK 6 ## tree array (locations are freed due to deletion) ## freedatainds: Array of indices of free locations in the ## data array (locations are freed due to deletion) ## useddatacells: BitSet (i.e., bit vector) showing which ## data cells are taken. The complementary positions are ## exactly those stored in freedatainds. This array is ## used only for error checking. ## deletionchild and deletionleftkey are two work-arrays ## for the delete function. mutable struct BalancedTree23{K, D, Ord <: Ordering} ord::Ord data::Vector{KDRec{K,D}} tree::Vector{TreeNode{K}} rootloc::Int depth::Int freetreeinds::Vector{Int} freedatainds::Vector{Int} useddatacells::BitSet # The next two arrays are used as a workspace by the delete! ##CHUNK 7 lk) if curdepth == t.depth replaceparent!(t.data, lc1, p) replaceparent!(t.data, lc2, p) else replaceparent!(t.tree, lc1, p) replaceparent!(t.tree, lc2, p) end push!(t.freetreeinds, leftsib) newchildcount = 2 t.deletionchild[1] = t.tree[pparent].child1 t.deletionleftkey[2] = t.tree[pparent].splitkey1 t.deletionchild[2] = p else lc3 = t.tree[leftsib].child3 t.tree[p] = TreeNode{K}(lc3, t.deletionchild[1], 0, pparent, lk, defaultKey) sk2 = t.tree[leftsib].splitkey2 t.tree[leftsib] = TreeNode{K}(t.tree[leftsib].child1, t.tree[leftsib].child2, ##CHUNK 8 else replaceparent!(t.tree, lc1, p) replaceparent!(t.tree, lc2, p) end push!(t.freetreeinds, leftsib) newchildcount = 1 t.deletionchild[1] = p else lc3 = t.tree[leftsib].child3 t.tree[p] = TreeNode{K}(lc3, t.deletionchild[1], 0, pparent, lk, defaultKey) sk2 = t.tree[leftsib].splitkey2 t.tree[leftsib] = TreeNode{K}(t.tree[leftsib].child1, t.tree[leftsib].child2, 0, pparent, t.tree[leftsib].splitkey1, defaultKey) if curdepth == t.depth replaceparent!(t.data, lc3, p) else
292
309
DataStructures.jl
9
function findkeyless(t::BalancedTree23, k) curnode = t.rootloc for depthcount = 1 : t.depth - 1 @inbounds thisnode = t.tree[curnode] cmp = thisnode.child3 == 0 ? cmp2le_nonleaf(t.ord, thisnode, k) : cmp3le_nonleaf(t.ord, thisnode, k) curnode = cmp == 1 ? thisnode.child1 : cmp == 2 ? thisnode.child2 : thisnode.child3 end @inbounds thisnode = t.tree[curnode] cmp = thisnode.child3 == 0 ? cmp2le_leaf(t.ord, thisnode, k) : cmp3le_leaf(t.ord, thisnode, k) curnode = cmp == 1 ? thisnode.child1 : cmp == 2 ? thisnode.child2 : thisnode.child3 return curnode end
function findkeyless(t::BalancedTree23, k) curnode = t.rootloc for depthcount = 1 : t.depth - 1 @inbounds thisnode = t.tree[curnode] cmp = thisnode.child3 == 0 ? cmp2le_nonleaf(t.ord, thisnode, k) : cmp3le_nonleaf(t.ord, thisnode, k) curnode = cmp == 1 ? thisnode.child1 : cmp == 2 ? thisnode.child2 : thisnode.child3 end @inbounds thisnode = t.tree[curnode] cmp = thisnode.child3 == 0 ? cmp2le_leaf(t.ord, thisnode, k) : cmp3le_leaf(t.ord, thisnode, k) curnode = cmp == 1 ? thisnode.child1 : cmp == 2 ? thisnode.child2 : thisnode.child3 return curnode end
[ 292, 309 ]
function findkeyless(t::BalancedTree23, k) curnode = t.rootloc for depthcount = 1 : t.depth - 1 @inbounds thisnode = t.tree[curnode] cmp = thisnode.child3 == 0 ? cmp2le_nonleaf(t.ord, thisnode, k) : cmp3le_nonleaf(t.ord, thisnode, k) curnode = cmp == 1 ? thisnode.child1 : cmp == 2 ? thisnode.child2 : thisnode.child3 end @inbounds thisnode = t.tree[curnode] cmp = thisnode.child3 == 0 ? cmp2le_leaf(t.ord, thisnode, k) : cmp3le_leaf(t.ord, thisnode, k) curnode = cmp == 1 ? thisnode.child1 : cmp == 2 ? thisnode.child2 : thisnode.child3 return curnode end
function findkeyless(t::BalancedTree23, k) curnode = t.rootloc for depthcount = 1 : t.depth - 1 @inbounds thisnode = t.tree[curnode] cmp = thisnode.child3 == 0 ? cmp2le_nonleaf(t.ord, thisnode, k) : cmp3le_nonleaf(t.ord, thisnode, k) curnode = cmp == 1 ? thisnode.child1 : cmp == 2 ? thisnode.child2 : thisnode.child3 end @inbounds thisnode = t.tree[curnode] cmp = thisnode.child3 == 0 ? cmp2le_leaf(t.ord, thisnode, k) : cmp3le_leaf(t.ord, thisnode, k) curnode = cmp == 1 ? thisnode.child1 : cmp == 2 ? thisnode.child2 : thisnode.child3 return curnode end
findkeyless
292
309
src/balanced_tree.jl
#FILE: DataStructures.jl/test/test_sorted_containers.jl ##CHUNK 1 mk2 = minkeys[cp] cp += 1 if t.tree[c2].parent != anc throw(ErrorException("Parent/child2 links do not match")) end c3 = t.tree[anc].child3 my_assert(s == levstart[curdepth] || lt(t.ord,mk1,mk2) || (!lt(t.ord,mk2,mk1) && allowdups)) if c3 > 0 if t.tree[c3].parent != anc throw(ErrorException("Parent/child3 links do not match")) end mk3 = minkeys[cp] cp += 1 my_assert(lt(t.ord,mk2, mk3) || !lt(t.ord,mk3,mk2) && allowdups) end if s > levstart[curdepth] minkeys[s] = mk1 end ##CHUNK 2 my_assert(c1 == bfstreenodes[cp]) if s > levstart[curdepth] mk1 = minkeys[cp] end cp += 1 if t.tree[c1].parent != anc throw(ErrorException("Parent/child1 links do not match")) end c2 = t.tree[anc].child2 my_assert(c2 == bfstreenodes[cp]) mk2 = minkeys[cp] cp += 1 if t.tree[c2].parent != anc throw(ErrorException("Parent/child2 links do not match")) end c3 = t.tree[anc].child3 my_assert(s == levstart[curdepth] || lt(t.ord,mk1,mk2) || (!lt(t.ord,mk2,mk1) && allowdups)) if c3 > 0 if t.tree[c3].parent != anc #CURRENT FILE: DataStructures.jl/src/balanced_tree.jl ##CHUNK 1 @inbounds thisnode = t.tree[curnode] cmp = thisnode.child3 == 0 ? cmp2_nonleaf(t.ord, thisnode, k) : cmp3_nonleaf(t.ord, thisnode, k) curnode = cmp == 1 ? thisnode.child1 : cmp == 2 ? thisnode.child2 : thisnode.child3 end @inbounds thisnode = t.tree[curnode] cmp = thisnode.child3 == 0 ? cmp2_leaf(t.ord, thisnode, k) : cmp3_leaf(t.ord, thisnode, k) curnode = cmp == 1 ? thisnode.child1 : cmp == 2 ? thisnode.child2 : thisnode.child3 @inbounds return curnode, (curnode > 2 && eq(t.ord, t.data[curnode].k, k)) end ## The findkeyless function finds the index of a (key,data) pair in the tree that ## with the greatest key that is less than the given key. If there is no ## key less than the given key, then it returns 1 (the before-start node). ##CHUNK 2 ## where the given key lives (if it is present), or ## if the key is not present, to the lower bound for the key, ## i.e., the data item that comes immediately before it. ## If there are multiple equal keys, then it finds the last one. ## It returns the index of the key found and a boolean indicating ## whether the exact key was found or not. function findkey(t::BalancedTree23, k) curnode = t.rootloc for depthcount = 1 : t.depth - 1 @inbounds thisnode = t.tree[curnode] cmp = thisnode.child3 == 0 ? cmp2_nonleaf(t.ord, thisnode, k) : cmp3_nonleaf(t.ord, thisnode, k) curnode = cmp == 1 ? thisnode.child1 : cmp == 2 ? thisnode.child2 : thisnode.child3 end @inbounds thisnode = t.tree[curnode] cmp = thisnode.child3 == 0 ? cmp2_leaf(t.ord, thisnode, k) : ##CHUNK 3 treenode::TreeNode, k) !lt(o,treenode.splitkey1, k) ? 1 : !lt(o,treenode.splitkey2, k) ? 2 : 3 end @inline function cmp3le_leaf(o::Ordering, treenode::TreeNode, k) !lt(o,treenode.splitkey1,k) ? 1 : (treenode.child3 == 2 || !lt(o,treenode.splitkey2, k)) ? 2 : 3 end ## The empty! function deletes all data in the balanced tree. ## Therefore, it invalidates all indices. function Base.empty!(t::BalancedTree23) resize!(t.data,2) initializeData!(t.data) ##CHUNK 4 ## Function cmp3le checks a tree node with three children ## against a given key, and returns 1 if the given key is ## less than or equal to the node's splitkey1, 2 if less than or equal ## to splitkey2, or ## 3 else. Special case ## if the node is a leaf and its right child is the end ## of the sorted order. @inline function cmp3le_nonleaf(o::Ordering, treenode::TreeNode, k) !lt(o,treenode.splitkey1, k) ? 1 : !lt(o,treenode.splitkey2, k) ? 2 : 3 end @inline function cmp3le_leaf(o::Ordering, treenode::TreeNode, k) !lt(o,treenode.splitkey1,k) ? 1 : ##CHUNK 5 ## less than the node's splitkey1, 2 if the key is greater than or ## equal to splitkey1 but less than splitkey2, or 3 else. Special case ## if the node is a leaf and its right child is the end ## of the sorted order. @inline function cmp3_nonleaf(o::Ordering, treenode::TreeNode, k) lt(o, k, treenode.splitkey1) ? 1 : lt(o, k, treenode.splitkey2) ? 2 : 3 end @inline function cmp3_leaf(o::Ordering, treenode::TreeNode, k) lt(o, k, treenode.splitkey1) ? 1 : (treenode.child3 == 2 || lt(o, k, treenode.splitkey2)) ? 2 : 3 end ##CHUNK 6 end @inline function cmp3_leaf(o::Ordering, treenode::TreeNode, k) lt(o, k, treenode.splitkey1) ? 1 : (treenode.child3 == 2 || lt(o, k, treenode.splitkey2)) ? 2 : 3 end ## Function cmp2le checks a tree node with two children ## against a given key, and returns 1 if the given key is ## less than or equal to the node's splitkey or 2 else. Special case ## if the node is a leaf and its right child is the end ## of the sorted order. @inline function cmp2le_nonleaf(o::Ordering, treenode::TreeNode, k) ##CHUNK 7 !lt(o,treenode.splitkey1,k) ? 1 : 2 end @inline function cmp2le_leaf(o::Ordering, treenode::TreeNode, k) treenode.child2 == 2 || !lt(o,treenode.splitkey1,k) ? 1 : 2 end ## Function cmp3le checks a tree node with three children ## against a given key, and returns 1 if the given key is ## less than or equal to the node's splitkey1, 2 if less than or equal ## to splitkey2, or ## 3 else. Special case ## if the node is a leaf and its right child is the end ## of the sorted order. @inline function cmp3le_nonleaf(o::Ordering, ##CHUNK 8 ## if the node is a leaf and its right child is the end ## of the sorted order. @inline function cmp2_nonleaf(o::Ordering, treenode::TreeNode, k) lt(o, k, treenode.splitkey1) ? 1 : 2 end @inline function cmp2_leaf(o::Ordering, treenode::TreeNode, k) (treenode.child2 == 2) || lt(o, k, treenode.splitkey1) ? 1 : 2 end ## Function cmp3 checks a tree node with three children ## against a given key, and returns 1 if the given key is
358
520
DataStructures.jl
10
function Base.insert!(t::BalancedTree23{K,D,Ord}, k, d, allowdups::Bool) where {K,D,Ord <: Ordering} ## First we find the greatest data node that is <= k. leafind, exactfound = findkey(t, k) parent = t.data[leafind].parent ## The following code is necessary because in the case of a ## brand new tree, the initial tree and data entries were incompletely ## initialized by the constructor. In this case, the call to insert! ## underway carries ## valid K and D values, so these valid values may now be ## stored in the dummy placeholder nodes so that they no ## longer hold undefined references. if size(t.data,1) == 2 @invariant t.rootloc == 1 && t.depth == 1 t.tree[1] = TreeNode{K}(t.tree[1].child1, t.tree[1].child2, t.tree[1].child3, t.tree[1].parent, k, k) t.data[1] = KDRec{K,D}(t.data[1].parent, k, d) t.data[2] = KDRec{K,D}(t.data[2].parent, k, d) end ## If we have found exactly k in the tree, then we ## replace the data associated with k and return. if exactfound && !allowdups t.data[leafind] = KDRec{K,D}(parent, k,d) return false, leafind end # We get here if k was not already found in the tree or # if duplicates are allowed. # In this case we insert a new node. depth = t.depth ord = t.ord ## Store the new data item in the tree's data array. Later ## go back and fix the parent. newind = push_or_reuse!(t.data, t.freedatainds, KDRec{K,D}(0,k,d)) push!(t.useddatacells, newind) p1 = parent newchild = newind minkeynewchild = k splitroot = false curdepth = depth existingchild = leafind ## This loop ascends the tree (i.e., follows the path from a leaf to the root) ## starting from the parent p1 of ## where the new key k will go. ## Variables updated by the loop: ## p1: parent of where the new node goes ## newchild: index of the child to be inserted ## minkeynewchild: the minimum key in the subtree rooted at newchild ## existingchild: a child of p1; the newchild must ## be inserted in the slot to the right of existingchild ## curdepth: depth of newchild ## For each 3-node we encounter ## during the ascent, we add a new child, which requires splitting ## the 3-node into two 2-nodes. Then we keep going until we hit the root. ## If we encounter a 2-node, then the ascent can stop; we can ## change the 2-node to a 3-node with the new child. while true # Let newchild1,...newchild4 be the new children of # the parent node # Initially, take the three children of the existing parent # node and set newchild4 to 0. newchild1 = t.tree[p1].child1 newchild2 = t.tree[p1].child2 minkeychild2 = t.tree[p1].splitkey1 newchild3 = t.tree[p1].child3 minkeychild3 = t.tree[p1].splitkey2 p1parent = t.tree[p1].parent newchild4 = 0 # Now figure out which of the 4 children is the new node # and insert it into newchild1 ... newchild4 if newchild1 == existingchild newchild4 = newchild3 minkeychild4 = minkeychild3 newchild3 = newchild2 minkeychild3 = minkeychild2 newchild2 = newchild minkeychild2 = minkeynewchild elseif newchild2 == existingchild newchild4 = newchild3 minkeychild4 = minkeychild3 newchild3 = newchild minkeychild3 = minkeynewchild elseif newchild3 == existingchild newchild4 = newchild minkeychild4 = minkeynewchild else throw(AssertionError("Tree structure is corrupted 1")) end # Two cases: either we need to split the tree node # if newchild4>0 else we convert a 2-node to a 3-node # if newchild4==0 if newchild4 == 0 # Change the parent from a 2-node to a 3-node t.tree[p1] = TreeNode{K}(newchild1, newchild2, newchild3, p1parent, minkeychild2, minkeychild3) if curdepth == depth replaceparent!(t.data, newchild, p1) else replaceparent!(t.tree, newchild, p1) end break end # Split the parent t.tree[p1] = TreeNode{K}(newchild1, newchild2, 0, p1parent, minkeychild2, minkeychild2) newtreenode = TreeNode{K}(newchild3, newchild4, 0, p1parent, minkeychild4, minkeychild2) newparentnum = push_or_reuse!(t.tree, t.freetreeinds, newtreenode) if curdepth == depth replaceparent!(t.data, newchild2, p1) replaceparent!(t.data, newchild3, newparentnum) replaceparent!(t.data, newchild4, newparentnum) else replaceparent!(t.tree, newchild2, p1) replaceparent!(t.tree, newchild3, newparentnum) replaceparent!(t.tree, newchild4, newparentnum) end # Update the loop variables for the next level of the # ascension existingchild = p1 newchild = newparentnum p1 = p1parent minkeynewchild = minkeychild3 curdepth -= 1 if curdepth == 0 splitroot = true break end end # If the root has been split, then we need to add a level # to the tree that is the parent of the old root and the new node. if splitroot @invariant existingchild == t.rootloc newroot = TreeNode{K}(existingchild, newchild, 0, 0, minkeynewchild, minkeynewchild) newrootloc = push_or_reuse!(t.tree, t.freetreeinds, newroot) replaceparent!(t.tree, existingchild, newrootloc) replaceparent!(t.tree, newchild, newrootloc) t.rootloc = newrootloc t.depth += 1 end return true, newind end
function Base.insert!(t::BalancedTree23{K,D,Ord}, k, d, allowdups::Bool) where {K,D,Ord <: Ordering} ## First we find the greatest data node that is <= k. leafind, exactfound = findkey(t, k) parent = t.data[leafind].parent ## The following code is necessary because in the case of a ## brand new tree, the initial tree and data entries were incompletely ## initialized by the constructor. In this case, the call to insert! ## underway carries ## valid K and D values, so these valid values may now be ## stored in the dummy placeholder nodes so that they no ## longer hold undefined references. if size(t.data,1) == 2 @invariant t.rootloc == 1 && t.depth == 1 t.tree[1] = TreeNode{K}(t.tree[1].child1, t.tree[1].child2, t.tree[1].child3, t.tree[1].parent, k, k) t.data[1] = KDRec{K,D}(t.data[1].parent, k, d) t.data[2] = KDRec{K,D}(t.data[2].parent, k, d) end ## If we have found exactly k in the tree, then we ## replace the data associated with k and return. if exactfound && !allowdups t.data[leafind] = KDRec{K,D}(parent, k,d) return false, leafind end # We get here if k was not already found in the tree or # if duplicates are allowed. # In this case we insert a new node. depth = t.depth ord = t.ord ## Store the new data item in the tree's data array. Later ## go back and fix the parent. newind = push_or_reuse!(t.data, t.freedatainds, KDRec{K,D}(0,k,d)) push!(t.useddatacells, newind) p1 = parent newchild = newind minkeynewchild = k splitroot = false curdepth = depth existingchild = leafind ## This loop ascends the tree (i.e., follows the path from a leaf to the root) ## starting from the parent p1 of ## where the new key k will go. ## Variables updated by the loop: ## p1: parent of where the new node goes ## newchild: index of the child to be inserted ## minkeynewchild: the minimum key in the subtree rooted at newchild ## existingchild: a child of p1; the newchild must ## be inserted in the slot to the right of existingchild ## curdepth: depth of newchild ## For each 3-node we encounter ## during the ascent, we add a new child, which requires splitting ## the 3-node into two 2-nodes. Then we keep going until we hit the root. ## If we encounter a 2-node, then the ascent can stop; we can ## change the 2-node to a 3-node with the new child. while true # Let newchild1,...newchild4 be the new children of # the parent node # Initially, take the three children of the existing parent # node and set newchild4 to 0. newchild1 = t.tree[p1].child1 newchild2 = t.tree[p1].child2 minkeychild2 = t.tree[p1].splitkey1 newchild3 = t.tree[p1].child3 minkeychild3 = t.tree[p1].splitkey2 p1parent = t.tree[p1].parent newchild4 = 0 # Now figure out which of the 4 children is the new node # and insert it into newchild1 ... newchild4 if newchild1 == existingchild newchild4 = newchild3 minkeychild4 = minkeychild3 newchild3 = newchild2 minkeychild3 = minkeychild2 newchild2 = newchild minkeychild2 = minkeynewchild elseif newchild2 == existingchild newchild4 = newchild3 minkeychild4 = minkeychild3 newchild3 = newchild minkeychild3 = minkeynewchild elseif newchild3 == existingchild newchild4 = newchild minkeychild4 = minkeynewchild else throw(AssertionError("Tree structure is corrupted 1")) end # Two cases: either we need to split the tree node # if newchild4>0 else we convert a 2-node to a 3-node # if newchild4==0 if newchild4 == 0 # Change the parent from a 2-node to a 3-node t.tree[p1] = TreeNode{K}(newchild1, newchild2, newchild3, p1parent, minkeychild2, minkeychild3) if curdepth == depth replaceparent!(t.data, newchild, p1) else replaceparent!(t.tree, newchild, p1) end break end # Split the parent t.tree[p1] = TreeNode{K}(newchild1, newchild2, 0, p1parent, minkeychild2, minkeychild2) newtreenode = TreeNode{K}(newchild3, newchild4, 0, p1parent, minkeychild4, minkeychild2) newparentnum = push_or_reuse!(t.tree, t.freetreeinds, newtreenode) if curdepth == depth replaceparent!(t.data, newchild2, p1) replaceparent!(t.data, newchild3, newparentnum) replaceparent!(t.data, newchild4, newparentnum) else replaceparent!(t.tree, newchild2, p1) replaceparent!(t.tree, newchild3, newparentnum) replaceparent!(t.tree, newchild4, newparentnum) end # Update the loop variables for the next level of the # ascension existingchild = p1 newchild = newparentnum p1 = p1parent minkeynewchild = minkeychild3 curdepth -= 1 if curdepth == 0 splitroot = true break end end # If the root has been split, then we need to add a level # to the tree that is the parent of the old root and the new node. if splitroot @invariant existingchild == t.rootloc newroot = TreeNode{K}(existingchild, newchild, 0, 0, minkeynewchild, minkeynewchild) newrootloc = push_or_reuse!(t.tree, t.freetreeinds, newroot) replaceparent!(t.tree, existingchild, newrootloc) replaceparent!(t.tree, newchild, newrootloc) t.rootloc = newrootloc t.depth += 1 end return true, newind end
[ 358, 520 ]
function Base.insert!(t::BalancedTree23{K,D,Ord}, k, d, allowdups::Bool) where {K,D,Ord <: Ordering} ## First we find the greatest data node that is <= k. leafind, exactfound = findkey(t, k) parent = t.data[leafind].parent ## The following code is necessary because in the case of a ## brand new tree, the initial tree and data entries were incompletely ## initialized by the constructor. In this case, the call to insert! ## underway carries ## valid K and D values, so these valid values may now be ## stored in the dummy placeholder nodes so that they no ## longer hold undefined references. if size(t.data,1) == 2 @invariant t.rootloc == 1 && t.depth == 1 t.tree[1] = TreeNode{K}(t.tree[1].child1, t.tree[1].child2, t.tree[1].child3, t.tree[1].parent, k, k) t.data[1] = KDRec{K,D}(t.data[1].parent, k, d) t.data[2] = KDRec{K,D}(t.data[2].parent, k, d) end ## If we have found exactly k in the tree, then we ## replace the data associated with k and return. if exactfound && !allowdups t.data[leafind] = KDRec{K,D}(parent, k,d) return false, leafind end # We get here if k was not already found in the tree or # if duplicates are allowed. # In this case we insert a new node. depth = t.depth ord = t.ord ## Store the new data item in the tree's data array. Later ## go back and fix the parent. newind = push_or_reuse!(t.data, t.freedatainds, KDRec{K,D}(0,k,d)) push!(t.useddatacells, newind) p1 = parent newchild = newind minkeynewchild = k splitroot = false curdepth = depth existingchild = leafind ## This loop ascends the tree (i.e., follows the path from a leaf to the root) ## starting from the parent p1 of ## where the new key k will go. ## Variables updated by the loop: ## p1: parent of where the new node goes ## newchild: index of the child to be inserted ## minkeynewchild: the minimum key in the subtree rooted at newchild ## existingchild: a child of p1; the newchild must ## be inserted in the slot to the right of existingchild ## curdepth: depth of newchild ## For each 3-node we encounter ## during the ascent, we add a new child, which requires splitting ## the 3-node into two 2-nodes. Then we keep going until we hit the root. ## If we encounter a 2-node, then the ascent can stop; we can ## change the 2-node to a 3-node with the new child. while true # Let newchild1,...newchild4 be the new children of # the parent node # Initially, take the three children of the existing parent # node and set newchild4 to 0. newchild1 = t.tree[p1].child1 newchild2 = t.tree[p1].child2 minkeychild2 = t.tree[p1].splitkey1 newchild3 = t.tree[p1].child3 minkeychild3 = t.tree[p1].splitkey2 p1parent = t.tree[p1].parent newchild4 = 0 # Now figure out which of the 4 children is the new node # and insert it into newchild1 ... newchild4 if newchild1 == existingchild newchild4 = newchild3 minkeychild4 = minkeychild3 newchild3 = newchild2 minkeychild3 = minkeychild2 newchild2 = newchild minkeychild2 = minkeynewchild elseif newchild2 == existingchild newchild4 = newchild3 minkeychild4 = minkeychild3 newchild3 = newchild minkeychild3 = minkeynewchild elseif newchild3 == existingchild newchild4 = newchild minkeychild4 = minkeynewchild else throw(AssertionError("Tree structure is corrupted 1")) end # Two cases: either we need to split the tree node # if newchild4>0 else we convert a 2-node to a 3-node # if newchild4==0 if newchild4 == 0 # Change the parent from a 2-node to a 3-node t.tree[p1] = TreeNode{K}(newchild1, newchild2, newchild3, p1parent, minkeychild2, minkeychild3) if curdepth == depth replaceparent!(t.data, newchild, p1) else replaceparent!(t.tree, newchild, p1) end break end # Split the parent t.tree[p1] = TreeNode{K}(newchild1, newchild2, 0, p1parent, minkeychild2, minkeychild2) newtreenode = TreeNode{K}(newchild3, newchild4, 0, p1parent, minkeychild4, minkeychild2) newparentnum = push_or_reuse!(t.tree, t.freetreeinds, newtreenode) if curdepth == depth replaceparent!(t.data, newchild2, p1) replaceparent!(t.data, newchild3, newparentnum) replaceparent!(t.data, newchild4, newparentnum) else replaceparent!(t.tree, newchild2, p1) replaceparent!(t.tree, newchild3, newparentnum) replaceparent!(t.tree, newchild4, newparentnum) end # Update the loop variables for the next level of the # ascension existingchild = p1 newchild = newparentnum p1 = p1parent minkeynewchild = minkeychild3 curdepth -= 1 if curdepth == 0 splitroot = true break end end # If the root has been split, then we need to add a level # to the tree that is the parent of the old root and the new node. if splitroot @invariant existingchild == t.rootloc newroot = TreeNode{K}(existingchild, newchild, 0, 0, minkeynewchild, minkeynewchild) newrootloc = push_or_reuse!(t.tree, t.freetreeinds, newroot) replaceparent!(t.tree, existingchild, newrootloc) replaceparent!(t.tree, newchild, newrootloc) t.rootloc = newrootloc t.depth += 1 end return true, newind end
function Base.insert!(t::BalancedTree23{K,D,Ord}, k, d, allowdups::Bool) where {K,D,Ord <: Ordering} ## First we find the greatest data node that is <= k. leafind, exactfound = findkey(t, k) parent = t.data[leafind].parent ## The following code is necessary because in the case of a ## brand new tree, the initial tree and data entries were incompletely ## initialized by the constructor. In this case, the call to insert! ## underway carries ## valid K and D values, so these valid values may now be ## stored in the dummy placeholder nodes so that they no ## longer hold undefined references. if size(t.data,1) == 2 @invariant t.rootloc == 1 && t.depth == 1 t.tree[1] = TreeNode{K}(t.tree[1].child1, t.tree[1].child2, t.tree[1].child3, t.tree[1].parent, k, k) t.data[1] = KDRec{K,D}(t.data[1].parent, k, d) t.data[2] = KDRec{K,D}(t.data[2].parent, k, d) end ## If we have found exactly k in the tree, then we ## replace the data associated with k and return. if exactfound && !allowdups t.data[leafind] = KDRec{K,D}(parent, k,d) return false, leafind end # We get here if k was not already found in the tree or # if duplicates are allowed. # In this case we insert a new node. depth = t.depth ord = t.ord ## Store the new data item in the tree's data array. Later ## go back and fix the parent. newind = push_or_reuse!(t.data, t.freedatainds, KDRec{K,D}(0,k,d)) push!(t.useddatacells, newind) p1 = parent newchild = newind minkeynewchild = k splitroot = false curdepth = depth existingchild = leafind ## This loop ascends the tree (i.e., follows the path from a leaf to the root) ## starting from the parent p1 of ## where the new key k will go. ## Variables updated by the loop: ## p1: parent of where the new node goes ## newchild: index of the child to be inserted ## minkeynewchild: the minimum key in the subtree rooted at newchild ## existingchild: a child of p1; the newchild must ## be inserted in the slot to the right of existingchild ## curdepth: depth of newchild ## For each 3-node we encounter ## during the ascent, we add a new child, which requires splitting ## the 3-node into two 2-nodes. Then we keep going until we hit the root. ## If we encounter a 2-node, then the ascent can stop; we can ## change the 2-node to a 3-node with the new child. while true # Let newchild1,...newchild4 be the new children of # the parent node # Initially, take the three children of the existing parent # node and set newchild4 to 0. newchild1 = t.tree[p1].child1 newchild2 = t.tree[p1].child2 minkeychild2 = t.tree[p1].splitkey1 newchild3 = t.tree[p1].child3 minkeychild3 = t.tree[p1].splitkey2 p1parent = t.tree[p1].parent newchild4 = 0 # Now figure out which of the 4 children is the new node # and insert it into newchild1 ... newchild4 if newchild1 == existingchild newchild4 = newchild3 minkeychild4 = minkeychild3 newchild3 = newchild2 minkeychild3 = minkeychild2 newchild2 = newchild minkeychild2 = minkeynewchild elseif newchild2 == existingchild newchild4 = newchild3 minkeychild4 = minkeychild3 newchild3 = newchild minkeychild3 = minkeynewchild elseif newchild3 == existingchild newchild4 = newchild minkeychild4 = minkeynewchild else throw(AssertionError("Tree structure is corrupted 1")) end # Two cases: either we need to split the tree node # if newchild4>0 else we convert a 2-node to a 3-node # if newchild4==0 if newchild4 == 0 # Change the parent from a 2-node to a 3-node t.tree[p1] = TreeNode{K}(newchild1, newchild2, newchild3, p1parent, minkeychild2, minkeychild3) if curdepth == depth replaceparent!(t.data, newchild, p1) else replaceparent!(t.tree, newchild, p1) end break end # Split the parent t.tree[p1] = TreeNode{K}(newchild1, newchild2, 0, p1parent, minkeychild2, minkeychild2) newtreenode = TreeNode{K}(newchild3, newchild4, 0, p1parent, minkeychild4, minkeychild2) newparentnum = push_or_reuse!(t.tree, t.freetreeinds, newtreenode) if curdepth == depth replaceparent!(t.data, newchild2, p1) replaceparent!(t.data, newchild3, newparentnum) replaceparent!(t.data, newchild4, newparentnum) else replaceparent!(t.tree, newchild2, p1) replaceparent!(t.tree, newchild3, newparentnum) replaceparent!(t.tree, newchild4, newparentnum) end # Update the loop variables for the next level of the # ascension existingchild = p1 newchild = newparentnum p1 = p1parent minkeynewchild = minkeychild3 curdepth -= 1 if curdepth == 0 splitroot = true break end end # If the root has been split, then we need to add a level # to the tree that is the parent of the old root and the new node. if splitroot @invariant existingchild == t.rootloc newroot = TreeNode{K}(existingchild, newchild, 0, 0, minkeynewchild, minkeynewchild) newrootloc = push_or_reuse!(t.tree, t.freetreeinds, newroot) replaceparent!(t.tree, existingchild, newrootloc) replaceparent!(t.tree, newchild, newrootloc) t.rootloc = newrootloc t.depth += 1 end return true, newind end
size
358
520
src/balanced_tree.jl
#FILE: DataStructures.jl/src/red_black_tree.jl ##CHUNK 1 function Base.insert!(tree::RBTree{K}, d::K) where K # if the key exists in the tree, no need to insert haskey(tree, d) && return tree # insert, if not present in the tree node = RBTreeNode{K}(d) node.leftChild = node.rightChild = tree.nil insert_node!(tree, node) if node.parent == nothing node.color = false elseif node.parent.parent == nothing ; else fix_insert!(tree, node) end tree.count += 1 return tree end ##CHUNK 2 end end tree.root.color = false end """ insert!(tree, key) Inserts `key` in the `tree` if it is not present. """ function Base.insert!(tree::RBTree{K}, d::K) where K # if the key exists in the tree, no need to insert haskey(tree, d) && return tree # insert, if not present in the tree node = RBTreeNode{K}(d) node.leftChild = node.rightChild = tree.nil insert_node!(tree, node) #CURRENT FILE: DataStructures.jl/src/balanced_tree.jl ##CHUNK 1 end @inbounds thisnode = t.tree[curnode] cmp = thisnode.child3 == 0 ? cmp2le_leaf(t.ord, thisnode, k) : cmp3le_leaf(t.ord, thisnode, k) curnode = cmp == 1 ? thisnode.child1 : cmp == 2 ? thisnode.child2 : thisnode.child3 return curnode end ## The following are helper routines for the insert! and delete! functions. ## They replace the 'parent' field of either an internal tree node or ## a data node at the bottom tree level. function replaceparent!(data::Vector{KDRec{K,D}}, whichind::Int, newparent::Int) where {K,D} data[whichind] = KDRec{K,D}(newparent, data[whichind].k, data[whichind].d) return nothing end ##CHUNK 2 if c3 != it && c3 > 0 newchildcount += 1 t.deletionchild[newchildcount] = c3 t.deletionleftkey[newchildcount] = t.data[c3].k end @invariant newchildcount == 1 || newchildcount == 2 push!(t.freedatainds, it) pop!(t.useddatacells,it) defaultKey = t.tree[1].splitkey1 curdepth = t.depth mustdeleteroot = false pparent = -1 ## The following loop ascends the tree and contracts nodes (reduces their ## number of children) as ## needed. If newchildcount == 2 or 3, then the ascent is terminated ## and a node is created with 2 or 3 children. ## If newchildcount == 1, then the ascent must continue since a tree ## node cannot have one child. ##CHUNK 3 end # Build a balanced tree from an iterable in which the data is already # sorted function BalancedTree23{K,D,Ord}(::Val{true}, iterable, ord::Ord, allowdups::Bool) where {K, D, Ord <: Ordering} m = BalancedTree23{K,D,Ord}(ord) lengthdata = length(m.data) @assert lengthdata == 2 firsttrip = true for (k,d) in iterable # Must initialize the before-start and past-end markers # with live data to prevent references to undefined fields # later if firsttrip m.data[1] = KDRec{K,D}(m.data[1].parent, k, d) m.data[2] = KDRec{K,D}(m.data[2].parent, k, d) ##CHUNK 4 lengthdata = length(m.data) @assert lengthdata == 2 firsttrip = true for (k,d) in iterable # Must initialize the before-start and past-end markers # with live data to prevent references to undefined fields # later if firsttrip m.data[1] = KDRec{K,D}(m.data[1].parent, k, d) m.data[2] = KDRec{K,D}(m.data[2].parent, k, d) end if !firsttrip lt(ord, k, m.data[lengthdata].k) && throw(ArgumentError("Keys out of order")) if !allowdups !lt(ord, m.data[lengthdata].k, k) && throw(ArgumentError("Repeated key")) end end push!(m.data, KDRec{K,D}(0, convert(K,k), convert(D,d))) ##CHUNK 5 mutable struct BalancedTree23{K, D, Ord <: Ordering} ord::Ord data::Vector{KDRec{K,D}} tree::Vector{TreeNode{K}} rootloc::Int depth::Int freetreeinds::Vector{Int} freedatainds::Vector{Int} useddatacells::BitSet # The next two arrays are used as a workspace by the delete! # function. deletionchild::Vector{Int} deletionleftkey::Vector{K} function BalancedTree23{K,D,Ord}(ord1::Ord) where {K,D,Ord<:Ordering} tree1 = Vector{TreeNode{K}}(undef, 1) initializeTree!(tree1) data1 = Vector{KDRec{K,D}}(undef, 2) initializeData!(data1) u1 = BitSet() push!(u1, 1, 2) ##CHUNK 6 function Base.delete!(t::BalancedTree23{K,D,Ord}, it::Int) where {K,D,Ord<:Ordering} ## Put the cell indexed by 'it' into the deletion list. ## ## Create the following data items maintained in the ## upcoming loop. ## ## p is a tree-node ancestor of the deleted node ## The children of p are stored in ## t.deletionchild[..] ## The number of these children is newchildcount, which is 1, 2 or 3. ## The keys that lower bound the children ## are stored in t.deletionleftkey[..] ## There is a special case for t.deletionleftkey[1]; the ## flag deletionleftkey1_valid indicates that the left key ## for the immediate right neighbor of the ## deleted node has not yet been been stored in the tree. ## Once it is stored, t.deletionleftkey[1] is no longer needed ## or used. ##CHUNK 7 # function. deletionchild::Vector{Int} deletionleftkey::Vector{K} function BalancedTree23{K,D,Ord}(ord1::Ord) where {K,D,Ord<:Ordering} tree1 = Vector{TreeNode{K}}(undef, 1) initializeTree!(tree1) data1 = Vector{KDRec{K,D}}(undef, 2) initializeData!(data1) u1 = BitSet() push!(u1, 1, 2) new{K,D,Ord}(ord1, data1, tree1, 1, 1, Vector{Int}(), Vector{Int}(), u1, Vector{Int}(undef, 3), Vector{K}(undef, 3)) end end ## Function cmp2 checks a tree node with two children ## against a given key, and returns 1 if the given key is ## less than the node's splitkey or 2 else. Special case ##CHUNK 8 ## The first and second entries of the data array are dummy placeholders ## for the beginning and end of the sorted order of the keys ## tree: the nodes of a 2-3 tree that sits above the data. ## rootloc: the index of the entry of tree (i.e., a subscript to ## treenodes) that is the tree's root ## depth: the depth of the tree, (number ## of tree levels, not counting the level of data at the bottom) ## depth==1 means that there is a single root node ## whose children are data nodes. ## freetreeinds: Array of indices of free locations in the ## tree array (locations are freed due to deletion) ## freedatainds: Array of indices of free locations in the ## data array (locations are freed due to deletion) ## useddatacells: BitSet (i.e., bit vector) showing which ## data cells are taken. The complementary positions are ## exactly those stored in freedatainds. This array is ## used only for error checking. ## deletionchild and deletionleftkey are two work-arrays ## for the delete function.
655
984
DataStructures.jl
11
function Base.delete!(t::BalancedTree23{K,D,Ord}, it::Int) where {K,D,Ord<:Ordering} ## Put the cell indexed by 'it' into the deletion list. ## ## Create the following data items maintained in the ## upcoming loop. ## ## p is a tree-node ancestor of the deleted node ## The children of p are stored in ## t.deletionchild[..] ## The number of these children is newchildcount, which is 1, 2 or 3. ## The keys that lower bound the children ## are stored in t.deletionleftkey[..] ## There is a special case for t.deletionleftkey[1]; the ## flag deletionleftkey1_valid indicates that the left key ## for the immediate right neighbor of the ## deleted node has not yet been been stored in the tree. ## Once it is stored, t.deletionleftkey[1] is no longer needed ## or used. ## The flag mustdeleteroot means that the tree has contracted ## enough that it loses a level. p = t.data[it].parent newchildcount = 0 c1 = t.tree[p].child1 deletionleftkey1_valid = true if c1 != it deletionleftkey1_valid = false newchildcount += 1 t.deletionchild[newchildcount] = c1 t.deletionleftkey[newchildcount] = t.data[c1].k end c2 = t.tree[p].child2 if c2 != it newchildcount += 1 t.deletionchild[newchildcount] = c2 t.deletionleftkey[newchildcount] = t.data[c2].k end c3 = t.tree[p].child3 if c3 != it && c3 > 0 newchildcount += 1 t.deletionchild[newchildcount] = c3 t.deletionleftkey[newchildcount] = t.data[c3].k end @invariant newchildcount == 1 || newchildcount == 2 push!(t.freedatainds, it) pop!(t.useddatacells,it) defaultKey = t.tree[1].splitkey1 curdepth = t.depth mustdeleteroot = false pparent = -1 ## The following loop ascends the tree and contracts nodes (reduces their ## number of children) as ## needed. If newchildcount == 2 or 3, then the ascent is terminated ## and a node is created with 2 or 3 children. ## If newchildcount == 1, then the ascent must continue since a tree ## node cannot have one child. while true pparent = t.tree[p].parent ## Simple cases when the new child count is 2 or 3 if newchildcount == 2 t.tree[p] = TreeNode{K}(t.deletionchild[1], t.deletionchild[2], 0, pparent, t.deletionleftkey[2], defaultKey) break end if newchildcount == 3 t.tree[p] = TreeNode{K}(t.deletionchild[1], t.deletionchild[2], t.deletionchild[3], pparent, t.deletionleftkey[2], t.deletionleftkey[3]) break end @invariant newchildcount == 1 ## For the rest of this loop, we cover the case ## that p has one child. ## If newchildcount == 1 and curdepth==1, this means that ## the root of the tree has only one child. In this case, we can ## delete the root and make its one child the new root (see below). if curdepth == 1 mustdeleteroot = true break end ## We now branch on three cases depending on whether p is child1, ## child2 or child3 of its parent. if t.tree[pparent].child1 == p rightsib = t.tree[pparent].child2 ## Here p is child1 and rightsib is child2. ## If rightsib has 2 children, then p and ## rightsib are merged into a single node ## that has three children. ## If rightsib has 3 children, then p and ## rightsib are reformed so that each has ## two children. if t.tree[rightsib].child3 == 0 rc1 = t.tree[rightsib].child1 rc2 = t.tree[rightsib].child2 t.tree[p] = TreeNode{K}(t.deletionchild[1], rc1, rc2, pparent, t.tree[pparent].splitkey1, t.tree[rightsib].splitkey1) if curdepth == t.depth replaceparent!(t.data, rc1, p) replaceparent!(t.data, rc2, p) else replaceparent!(t.tree, rc1, p) replaceparent!(t.tree, rc2, p) end push!(t.freetreeinds, rightsib) newchildcount = 1 t.deletionchild[1] = p else rc1 = t.tree[rightsib].child1 t.tree[p] = TreeNode{K}(t.deletionchild[1], rc1, 0, pparent, t.tree[pparent].splitkey1, defaultKey) sk1 = t.tree[rightsib].splitkey1 t.tree[rightsib] = TreeNode{K}(t.tree[rightsib].child2, t.tree[rightsib].child3, 0, pparent, t.tree[rightsib].splitkey2, defaultKey) if curdepth == t.depth replaceparent!(t.data, rc1, p) else replaceparent!(t.tree, rc1, p) end newchildcount = 2 t.deletionchild[1] = p t.deletionchild[2] = rightsib t.deletionleftkey[2] = sk1 end ## If pparent had a third child (besides p and rightsib) ## then we add this to t.deletionchild c3 = t.tree[pparent].child3 if c3 > 0 newchildcount += 1 t.deletionchild[newchildcount] = c3 t.deletionleftkey[newchildcount] = t.tree[pparent].splitkey2 end p = pparent elseif t.tree[pparent].child2 == p ## Here p is child2 and leftsib is child1. ## If leftsib has 2 children, then p and ## leftsib are merged into a single node ## that has three children. ## If leftsib has 3 children, then p and ## leftsib are reformed so that each has ## two children. leftsib = t.tree[pparent].child1 lk = deletionleftkey1_valid ? t.deletionleftkey[1] : t.tree[pparent].splitkey1 if t.tree[leftsib].child3 == 0 lc1 = t.tree[leftsib].child1 lc2 = t.tree[leftsib].child2 t.tree[p] = TreeNode{K}(lc1, lc2, t.deletionchild[1], pparent, t.tree[leftsib].splitkey1, lk) if curdepth == t.depth replaceparent!(t.data, lc1, p) replaceparent!(t.data, lc2, p) else replaceparent!(t.tree, lc1, p) replaceparent!(t.tree, lc2, p) end push!(t.freetreeinds, leftsib) newchildcount = 1 t.deletionchild[1] = p else lc3 = t.tree[leftsib].child3 t.tree[p] = TreeNode{K}(lc3, t.deletionchild[1], 0, pparent, lk, defaultKey) sk2 = t.tree[leftsib].splitkey2 t.tree[leftsib] = TreeNode{K}(t.tree[leftsib].child1, t.tree[leftsib].child2, 0, pparent, t.tree[leftsib].splitkey1, defaultKey) if curdepth == t.depth replaceparent!(t.data, lc3, p) else replaceparent!(t.tree, lc3, p) end newchildcount = 2 t.deletionchild[1] = leftsib t.deletionchild[2] = p t.deletionleftkey[2] = sk2 end ## If pparent had a third child (besides p and leftsib) ## then we add this to t.deletionchild c3 = t.tree[pparent].child3 if c3 > 0 newchildcount += 1 t.deletionchild[newchildcount] = c3 t.deletionleftkey[newchildcount] = t.tree[pparent].splitkey2 end p = pparent deletionleftkey1_valid = false else ## Here p is child3 and leftsib is child2. ## If leftsib has 2 children, then p and ## leftsib are merged into a single node ## that has three children. ## If leftsib has 3 children, then p and ## leftsib are reformed so that each has ## two children. @invariant t.tree[pparent].child3 == p leftsib = t.tree[pparent].child2 lk = deletionleftkey1_valid ? t.deletionleftkey[1] : t.tree[pparent].splitkey2 if t.tree[leftsib].child3 == 0 lc1 = t.tree[leftsib].child1 lc2 = t.tree[leftsib].child2 t.tree[p] = TreeNode{K}(lc1, lc2, t.deletionchild[1], pparent, t.tree[leftsib].splitkey1, lk) if curdepth == t.depth replaceparent!(t.data, lc1, p) replaceparent!(t.data, lc2, p) else replaceparent!(t.tree, lc1, p) replaceparent!(t.tree, lc2, p) end push!(t.freetreeinds, leftsib) newchildcount = 2 t.deletionchild[1] = t.tree[pparent].child1 t.deletionleftkey[2] = t.tree[pparent].splitkey1 t.deletionchild[2] = p else lc3 = t.tree[leftsib].child3 t.tree[p] = TreeNode{K}(lc3, t.deletionchild[1], 0, pparent, lk, defaultKey) sk2 = t.tree[leftsib].splitkey2 t.tree[leftsib] = TreeNode{K}(t.tree[leftsib].child1, t.tree[leftsib].child2, 0, pparent, t.tree[leftsib].splitkey1, defaultKey) if curdepth == t.depth replaceparent!(t.data, lc3, p) else replaceparent!(t.tree, lc3, p) end newchildcount = 3 t.deletionchild[1] = t.tree[pparent].child1 t.deletionchild[2] = leftsib t.deletionchild[3] = p t.deletionleftkey[2] = t.tree[pparent].splitkey1 t.deletionleftkey[3] = sk2 end p = pparent deletionleftkey1_valid = false end curdepth -= 1 end if mustdeleteroot @invariant !deletionleftkey1_valid @invariant p == t.rootloc t.rootloc = t.deletionchild[1] t.depth -= 1 push!(t.freetreeinds, p) end ## If deletionleftkey1_valid, this means that the new ## min key of the deleted node and its right neighbors ## has never been stored in the tree. It must be stored ## as splitkey1 or splitkey2 of some ancestor of the ## deleted node, so we continue ascending the tree ## until we find a node which has p (and therefore the ## deleted node) as its descendent through its second ## or third child. ## It cannot be the case that the deleted node is ## is a descendent of the root always through ## first children, since this would mean the deleted ## node is the leftmost placeholder, which ## cannot be deleted. if deletionleftkey1_valid while true pparentnode = t.tree[pparent] if pparentnode.child2 == p t.tree[pparent] = TreeNode{K}(pparentnode.child1, pparentnode.child2, pparentnode.child3, pparentnode.parent, t.deletionleftkey[1], pparentnode.splitkey2) break elseif pparentnode.child3 == p t.tree[pparent] = TreeNode{K}(pparentnode.child1, pparentnode.child2, pparentnode.child3, pparentnode.parent, pparentnode.splitkey1, t.deletionleftkey[1]) break else p = pparent pparent = pparentnode.parent curdepth -= 1 @invariant curdepth > 0 end end end return nothing end
function Base.delete!(t::BalancedTree23{K,D,Ord}, it::Int) where {K,D,Ord<:Ordering} ## Put the cell indexed by 'it' into the deletion list. ## ## Create the following data items maintained in the ## upcoming loop. ## ## p is a tree-node ancestor of the deleted node ## The children of p are stored in ## t.deletionchild[..] ## The number of these children is newchildcount, which is 1, 2 or 3. ## The keys that lower bound the children ## are stored in t.deletionleftkey[..] ## There is a special case for t.deletionleftkey[1]; the ## flag deletionleftkey1_valid indicates that the left key ## for the immediate right neighbor of the ## deleted node has not yet been been stored in the tree. ## Once it is stored, t.deletionleftkey[1] is no longer needed ## or used. ## The flag mustdeleteroot means that the tree has contracted ## enough that it loses a level. p = t.data[it].parent newchildcount = 0 c1 = t.tree[p].child1 deletionleftkey1_valid = true if c1 != it deletionleftkey1_valid = false newchildcount += 1 t.deletionchild[newchildcount] = c1 t.deletionleftkey[newchildcount] = t.data[c1].k end c2 = t.tree[p].child2 if c2 != it newchildcount += 1 t.deletionchild[newchildcount] = c2 t.deletionleftkey[newchildcount] = t.data[c2].k end c3 = t.tree[p].child3 if c3 != it && c3 > 0 newchildcount += 1 t.deletionchild[newchildcount] = c3 t.deletionleftkey[newchildcount] = t.data[c3].k end @invariant newchildcount == 1 || newchildcount == 2 push!(t.freedatainds, it) pop!(t.useddatacells,it) defaultKey = t.tree[1].splitkey1 curdepth = t.depth mustdeleteroot = false pparent = -1 ## The following loop ascends the tree and contracts nodes (reduces their ## number of children) as ## needed. If newchildcount == 2 or 3, then the ascent is terminated ## and a node is created with 2 or 3 children. ## If newchildcount == 1, then the ascent must continue since a tree ## node cannot have one child. while true pparent = t.tree[p].parent ## Simple cases when the new child count is 2 or 3 if newchildcount == 2 t.tree[p] = TreeNode{K}(t.deletionchild[1], t.deletionchild[2], 0, pparent, t.deletionleftkey[2], defaultKey) break end if newchildcount == 3 t.tree[p] = TreeNode{K}(t.deletionchild[1], t.deletionchild[2], t.deletionchild[3], pparent, t.deletionleftkey[2], t.deletionleftkey[3]) break end @invariant newchildcount == 1 ## For the rest of this loop, we cover the case ## that p has one child. ## If newchildcount == 1 and curdepth==1, this means that ## the root of the tree has only one child. In this case, we can ## delete the root and make its one child the new root (see below). if curdepth == 1 mustdeleteroot = true break end ## We now branch on three cases depending on whether p is child1, ## child2 or child3 of its parent. if t.tree[pparent].child1 == p rightsib = t.tree[pparent].child2 ## Here p is child1 and rightsib is child2. ## If rightsib has 2 children, then p and ## rightsib are merged into a single node ## that has three children. ## If rightsib has 3 children, then p and ## rightsib are reformed so that each has ## two children. if t.tree[rightsib].child3 == 0 rc1 = t.tree[rightsib].child1 rc2 = t.tree[rightsib].child2 t.tree[p] = TreeNode{K}(t.deletionchild[1], rc1, rc2, pparent, t.tree[pparent].splitkey1, t.tree[rightsib].splitkey1) if curdepth == t.depth replaceparent!(t.data, rc1, p) replaceparent!(t.data, rc2, p) else replaceparent!(t.tree, rc1, p) replaceparent!(t.tree, rc2, p) end push!(t.freetreeinds, rightsib) newchildcount = 1 t.deletionchild[1] = p else rc1 = t.tree[rightsib].child1 t.tree[p] = TreeNode{K}(t.deletionchild[1], rc1, 0, pparent, t.tree[pparent].splitkey1, defaultKey) sk1 = t.tree[rightsib].splitkey1 t.tree[rightsib] = TreeNode{K}(t.tree[rightsib].child2, t.tree[rightsib].child3, 0, pparent, t.tree[rightsib].splitkey2, defaultKey) if curdepth == t.depth replaceparent!(t.data, rc1, p) else replaceparent!(t.tree, rc1, p) end newchildcount = 2 t.deletionchild[1] = p t.deletionchild[2] = rightsib t.deletionleftkey[2] = sk1 end ## If pparent had a third child (besides p and rightsib) ## then we add this to t.deletionchild c3 = t.tree[pparent].child3 if c3 > 0 newchildcount += 1 t.deletionchild[newchildcount] = c3 t.deletionleftkey[newchildcount] = t.tree[pparent].splitkey2 end p = pparent elseif t.tree[pparent].child2 == p ## Here p is child2 and leftsib is child1. ## If leftsib has 2 children, then p and ## leftsib are merged into a single node ## that has three children. ## If leftsib has 3 children, then p and ## leftsib are reformed so that each has ## two children. leftsib = t.tree[pparent].child1 lk = deletionleftkey1_valid ? t.deletionleftkey[1] : t.tree[pparent].splitkey1 if t.tree[leftsib].child3 == 0 lc1 = t.tree[leftsib].child1 lc2 = t.tree[leftsib].child2 t.tree[p] = TreeNode{K}(lc1, lc2, t.deletionchild[1], pparent, t.tree[leftsib].splitkey1, lk) if curdepth == t.depth replaceparent!(t.data, lc1, p) replaceparent!(t.data, lc2, p) else replaceparent!(t.tree, lc1, p) replaceparent!(t.tree, lc2, p) end push!(t.freetreeinds, leftsib) newchildcount = 1 t.deletionchild[1] = p else lc3 = t.tree[leftsib].child3 t.tree[p] = TreeNode{K}(lc3, t.deletionchild[1], 0, pparent, lk, defaultKey) sk2 = t.tree[leftsib].splitkey2 t.tree[leftsib] = TreeNode{K}(t.tree[leftsib].child1, t.tree[leftsib].child2, 0, pparent, t.tree[leftsib].splitkey1, defaultKey) if curdepth == t.depth replaceparent!(t.data, lc3, p) else replaceparent!(t.tree, lc3, p) end newchildcount = 2 t.deletionchild[1] = leftsib t.deletionchild[2] = p t.deletionleftkey[2] = sk2 end ## If pparent had a third child (besides p and leftsib) ## then we add this to t.deletionchild c3 = t.tree[pparent].child3 if c3 > 0 newchildcount += 1 t.deletionchild[newchildcount] = c3 t.deletionleftkey[newchildcount] = t.tree[pparent].splitkey2 end p = pparent deletionleftkey1_valid = false else ## Here p is child3 and leftsib is child2. ## If leftsib has 2 children, then p and ## leftsib are merged into a single node ## that has three children. ## If leftsib has 3 children, then p and ## leftsib are reformed so that each has ## two children. @invariant t.tree[pparent].child3 == p leftsib = t.tree[pparent].child2 lk = deletionleftkey1_valid ? t.deletionleftkey[1] : t.tree[pparent].splitkey2 if t.tree[leftsib].child3 == 0 lc1 = t.tree[leftsib].child1 lc2 = t.tree[leftsib].child2 t.tree[p] = TreeNode{K}(lc1, lc2, t.deletionchild[1], pparent, t.tree[leftsib].splitkey1, lk) if curdepth == t.depth replaceparent!(t.data, lc1, p) replaceparent!(t.data, lc2, p) else replaceparent!(t.tree, lc1, p) replaceparent!(t.tree, lc2, p) end push!(t.freetreeinds, leftsib) newchildcount = 2 t.deletionchild[1] = t.tree[pparent].child1 t.deletionleftkey[2] = t.tree[pparent].splitkey1 t.deletionchild[2] = p else lc3 = t.tree[leftsib].child3 t.tree[p] = TreeNode{K}(lc3, t.deletionchild[1], 0, pparent, lk, defaultKey) sk2 = t.tree[leftsib].splitkey2 t.tree[leftsib] = TreeNode{K}(t.tree[leftsib].child1, t.tree[leftsib].child2, 0, pparent, t.tree[leftsib].splitkey1, defaultKey) if curdepth == t.depth replaceparent!(t.data, lc3, p) else replaceparent!(t.tree, lc3, p) end newchildcount = 3 t.deletionchild[1] = t.tree[pparent].child1 t.deletionchild[2] = leftsib t.deletionchild[3] = p t.deletionleftkey[2] = t.tree[pparent].splitkey1 t.deletionleftkey[3] = sk2 end p = pparent deletionleftkey1_valid = false end curdepth -= 1 end if mustdeleteroot @invariant !deletionleftkey1_valid @invariant p == t.rootloc t.rootloc = t.deletionchild[1] t.depth -= 1 push!(t.freetreeinds, p) end ## If deletionleftkey1_valid, this means that the new ## min key of the deleted node and its right neighbors ## has never been stored in the tree. It must be stored ## as splitkey1 or splitkey2 of some ancestor of the ## deleted node, so we continue ascending the tree ## until we find a node which has p (and therefore the ## deleted node) as its descendent through its second ## or third child. ## It cannot be the case that the deleted node is ## is a descendent of the root always through ## first children, since this would mean the deleted ## node is the leftmost placeholder, which ## cannot be deleted. if deletionleftkey1_valid while true pparentnode = t.tree[pparent] if pparentnode.child2 == p t.tree[pparent] = TreeNode{K}(pparentnode.child1, pparentnode.child2, pparentnode.child3, pparentnode.parent, t.deletionleftkey[1], pparentnode.splitkey2) break elseif pparentnode.child3 == p t.tree[pparent] = TreeNode{K}(pparentnode.child1, pparentnode.child2, pparentnode.child3, pparentnode.parent, pparentnode.splitkey1, t.deletionleftkey[1]) break else p = pparent pparent = pparentnode.parent curdepth -= 1 @invariant curdepth > 0 end end end return nothing end
[ 655, 984 ]
function Base.delete!(t::BalancedTree23{K,D,Ord}, it::Int) where {K,D,Ord<:Ordering} ## Put the cell indexed by 'it' into the deletion list. ## ## Create the following data items maintained in the ## upcoming loop. ## ## p is a tree-node ancestor of the deleted node ## The children of p are stored in ## t.deletionchild[..] ## The number of these children is newchildcount, which is 1, 2 or 3. ## The keys that lower bound the children ## are stored in t.deletionleftkey[..] ## There is a special case for t.deletionleftkey[1]; the ## flag deletionleftkey1_valid indicates that the left key ## for the immediate right neighbor of the ## deleted node has not yet been been stored in the tree. ## Once it is stored, t.deletionleftkey[1] is no longer needed ## or used. ## The flag mustdeleteroot means that the tree has contracted ## enough that it loses a level. p = t.data[it].parent newchildcount = 0 c1 = t.tree[p].child1 deletionleftkey1_valid = true if c1 != it deletionleftkey1_valid = false newchildcount += 1 t.deletionchild[newchildcount] = c1 t.deletionleftkey[newchildcount] = t.data[c1].k end c2 = t.tree[p].child2 if c2 != it newchildcount += 1 t.deletionchild[newchildcount] = c2 t.deletionleftkey[newchildcount] = t.data[c2].k end c3 = t.tree[p].child3 if c3 != it && c3 > 0 newchildcount += 1 t.deletionchild[newchildcount] = c3 t.deletionleftkey[newchildcount] = t.data[c3].k end @invariant newchildcount == 1 || newchildcount == 2 push!(t.freedatainds, it) pop!(t.useddatacells,it) defaultKey = t.tree[1].splitkey1 curdepth = t.depth mustdeleteroot = false pparent = -1 ## The following loop ascends the tree and contracts nodes (reduces their ## number of children) as ## needed. If newchildcount == 2 or 3, then the ascent is terminated ## and a node is created with 2 or 3 children. ## If newchildcount == 1, then the ascent must continue since a tree ## node cannot have one child. while true pparent = t.tree[p].parent ## Simple cases when the new child count is 2 or 3 if newchildcount == 2 t.tree[p] = TreeNode{K}(t.deletionchild[1], t.deletionchild[2], 0, pparent, t.deletionleftkey[2], defaultKey) break end if newchildcount == 3 t.tree[p] = TreeNode{K}(t.deletionchild[1], t.deletionchild[2], t.deletionchild[3], pparent, t.deletionleftkey[2], t.deletionleftkey[3]) break end @invariant newchildcount == 1 ## For the rest of this loop, we cover the case ## that p has one child. ## If newchildcount == 1 and curdepth==1, this means that ## the root of the tree has only one child. In this case, we can ## delete the root and make its one child the new root (see below). if curdepth == 1 mustdeleteroot = true break end ## We now branch on three cases depending on whether p is child1, ## child2 or child3 of its parent. if t.tree[pparent].child1 == p rightsib = t.tree[pparent].child2 ## Here p is child1 and rightsib is child2. ## If rightsib has 2 children, then p and ## rightsib are merged into a single node ## that has three children. ## If rightsib has 3 children, then p and ## rightsib are reformed so that each has ## two children. if t.tree[rightsib].child3 == 0 rc1 = t.tree[rightsib].child1 rc2 = t.tree[rightsib].child2 t.tree[p] = TreeNode{K}(t.deletionchild[1], rc1, rc2, pparent, t.tree[pparent].splitkey1, t.tree[rightsib].splitkey1) if curdepth == t.depth replaceparent!(t.data, rc1, p) replaceparent!(t.data, rc2, p) else replaceparent!(t.tree, rc1, p) replaceparent!(t.tree, rc2, p) end push!(t.freetreeinds, rightsib) newchildcount = 1 t.deletionchild[1] = p else rc1 = t.tree[rightsib].child1 t.tree[p] = TreeNode{K}(t.deletionchild[1], rc1, 0, pparent, t.tree[pparent].splitkey1, defaultKey) sk1 = t.tree[rightsib].splitkey1 t.tree[rightsib] = TreeNode{K}(t.tree[rightsib].child2, t.tree[rightsib].child3, 0, pparent, t.tree[rightsib].splitkey2, defaultKey) if curdepth == t.depth replaceparent!(t.data, rc1, p) else replaceparent!(t.tree, rc1, p) end newchildcount = 2 t.deletionchild[1] = p t.deletionchild[2] = rightsib t.deletionleftkey[2] = sk1 end ## If pparent had a third child (besides p and rightsib) ## then we add this to t.deletionchild c3 = t.tree[pparent].child3 if c3 > 0 newchildcount += 1 t.deletionchild[newchildcount] = c3 t.deletionleftkey[newchildcount] = t.tree[pparent].splitkey2 end p = pparent elseif t.tree[pparent].child2 == p ## Here p is child2 and leftsib is child1. ## If leftsib has 2 children, then p and ## leftsib are merged into a single node ## that has three children. ## If leftsib has 3 children, then p and ## leftsib are reformed so that each has ## two children. leftsib = t.tree[pparent].child1 lk = deletionleftkey1_valid ? t.deletionleftkey[1] : t.tree[pparent].splitkey1 if t.tree[leftsib].child3 == 0 lc1 = t.tree[leftsib].child1 lc2 = t.tree[leftsib].child2 t.tree[p] = TreeNode{K}(lc1, lc2, t.deletionchild[1], pparent, t.tree[leftsib].splitkey1, lk) if curdepth == t.depth replaceparent!(t.data, lc1, p) replaceparent!(t.data, lc2, p) else replaceparent!(t.tree, lc1, p) replaceparent!(t.tree, lc2, p) end push!(t.freetreeinds, leftsib) newchildcount = 1 t.deletionchild[1] = p else lc3 = t.tree[leftsib].child3 t.tree[p] = TreeNode{K}(lc3, t.deletionchild[1], 0, pparent, lk, defaultKey) sk2 = t.tree[leftsib].splitkey2 t.tree[leftsib] = TreeNode{K}(t.tree[leftsib].child1, t.tree[leftsib].child2, 0, pparent, t.tree[leftsib].splitkey1, defaultKey) if curdepth == t.depth replaceparent!(t.data, lc3, p) else replaceparent!(t.tree, lc3, p) end newchildcount = 2 t.deletionchild[1] = leftsib t.deletionchild[2] = p t.deletionleftkey[2] = sk2 end ## If pparent had a third child (besides p and leftsib) ## then we add this to t.deletionchild c3 = t.tree[pparent].child3 if c3 > 0 newchildcount += 1 t.deletionchild[newchildcount] = c3 t.deletionleftkey[newchildcount] = t.tree[pparent].splitkey2 end p = pparent deletionleftkey1_valid = false else ## Here p is child3 and leftsib is child2. ## If leftsib has 2 children, then p and ## leftsib are merged into a single node ## that has three children. ## If leftsib has 3 children, then p and ## leftsib are reformed so that each has ## two children. @invariant t.tree[pparent].child3 == p leftsib = t.tree[pparent].child2 lk = deletionleftkey1_valid ? t.deletionleftkey[1] : t.tree[pparent].splitkey2 if t.tree[leftsib].child3 == 0 lc1 = t.tree[leftsib].child1 lc2 = t.tree[leftsib].child2 t.tree[p] = TreeNode{K}(lc1, lc2, t.deletionchild[1], pparent, t.tree[leftsib].splitkey1, lk) if curdepth == t.depth replaceparent!(t.data, lc1, p) replaceparent!(t.data, lc2, p) else replaceparent!(t.tree, lc1, p) replaceparent!(t.tree, lc2, p) end push!(t.freetreeinds, leftsib) newchildcount = 2 t.deletionchild[1] = t.tree[pparent].child1 t.deletionleftkey[2] = t.tree[pparent].splitkey1 t.deletionchild[2] = p else lc3 = t.tree[leftsib].child3 t.tree[p] = TreeNode{K}(lc3, t.deletionchild[1], 0, pparent, lk, defaultKey) sk2 = t.tree[leftsib].splitkey2 t.tree[leftsib] = TreeNode{K}(t.tree[leftsib].child1, t.tree[leftsib].child2, 0, pparent, t.tree[leftsib].splitkey1, defaultKey) if curdepth == t.depth replaceparent!(t.data, lc3, p) else replaceparent!(t.tree, lc3, p) end newchildcount = 3 t.deletionchild[1] = t.tree[pparent].child1 t.deletionchild[2] = leftsib t.deletionchild[3] = p t.deletionleftkey[2] = t.tree[pparent].splitkey1 t.deletionleftkey[3] = sk2 end p = pparent deletionleftkey1_valid = false end curdepth -= 1 end if mustdeleteroot @invariant !deletionleftkey1_valid @invariant p == t.rootloc t.rootloc = t.deletionchild[1] t.depth -= 1 push!(t.freetreeinds, p) end ## If deletionleftkey1_valid, this means that the new ## min key of the deleted node and its right neighbors ## has never been stored in the tree. It must be stored ## as splitkey1 or splitkey2 of some ancestor of the ## deleted node, so we continue ascending the tree ## until we find a node which has p (and therefore the ## deleted node) as its descendent through its second ## or third child. ## It cannot be the case that the deleted node is ## is a descendent of the root always through ## first children, since this would mean the deleted ## node is the leftmost placeholder, which ## cannot be deleted. if deletionleftkey1_valid while true pparentnode = t.tree[pparent] if pparentnode.child2 == p t.tree[pparent] = TreeNode{K}(pparentnode.child1, pparentnode.child2, pparentnode.child3, pparentnode.parent, t.deletionleftkey[1], pparentnode.splitkey2) break elseif pparentnode.child3 == p t.tree[pparent] = TreeNode{K}(pparentnode.child1, pparentnode.child2, pparentnode.child3, pparentnode.parent, pparentnode.splitkey1, t.deletionleftkey[1]) break else p = pparent pparent = pparentnode.parent curdepth -= 1 @invariant curdepth > 0 end end end return nothing end
function Base.delete!(t::BalancedTree23{K,D,Ord}, it::Int) where {K,D,Ord<:Ordering} ## Put the cell indexed by 'it' into the deletion list. ## ## Create the following data items maintained in the ## upcoming loop. ## ## p is a tree-node ancestor of the deleted node ## The children of p are stored in ## t.deletionchild[..] ## The number of these children is newchildcount, which is 1, 2 or 3. ## The keys that lower bound the children ## are stored in t.deletionleftkey[..] ## There is a special case for t.deletionleftkey[1]; the ## flag deletionleftkey1_valid indicates that the left key ## for the immediate right neighbor of the ## deleted node has not yet been been stored in the tree. ## Once it is stored, t.deletionleftkey[1] is no longer needed ## or used. ## The flag mustdeleteroot means that the tree has contracted ## enough that it loses a level. p = t.data[it].parent newchildcount = 0 c1 = t.tree[p].child1 deletionleftkey1_valid = true if c1 != it deletionleftkey1_valid = false newchildcount += 1 t.deletionchild[newchildcount] = c1 t.deletionleftkey[newchildcount] = t.data[c1].k end c2 = t.tree[p].child2 if c2 != it newchildcount += 1 t.deletionchild[newchildcount] = c2 t.deletionleftkey[newchildcount] = t.data[c2].k end c3 = t.tree[p].child3 if c3 != it && c3 > 0 newchildcount += 1 t.deletionchild[newchildcount] = c3 t.deletionleftkey[newchildcount] = t.data[c3].k end @invariant newchildcount == 1 || newchildcount == 2 push!(t.freedatainds, it) pop!(t.useddatacells,it) defaultKey = t.tree[1].splitkey1 curdepth = t.depth mustdeleteroot = false pparent = -1 ## The following loop ascends the tree and contracts nodes (reduces their ## number of children) as ## needed. If newchildcount == 2 or 3, then the ascent is terminated ## and a node is created with 2 or 3 children. ## If newchildcount == 1, then the ascent must continue since a tree ## node cannot have one child. while true pparent = t.tree[p].parent ## Simple cases when the new child count is 2 or 3 if newchildcount == 2 t.tree[p] = TreeNode{K}(t.deletionchild[1], t.deletionchild[2], 0, pparent, t.deletionleftkey[2], defaultKey) break end if newchildcount == 3 t.tree[p] = TreeNode{K}(t.deletionchild[1], t.deletionchild[2], t.deletionchild[3], pparent, t.deletionleftkey[2], t.deletionleftkey[3]) break end @invariant newchildcount == 1 ## For the rest of this loop, we cover the case ## that p has one child. ## If newchildcount == 1 and curdepth==1, this means that ## the root of the tree has only one child. In this case, we can ## delete the root and make its one child the new root (see below). if curdepth == 1 mustdeleteroot = true break end ## We now branch on three cases depending on whether p is child1, ## child2 or child3 of its parent. if t.tree[pparent].child1 == p rightsib = t.tree[pparent].child2 ## Here p is child1 and rightsib is child2. ## If rightsib has 2 children, then p and ## rightsib are merged into a single node ## that has three children. ## If rightsib has 3 children, then p and ## rightsib are reformed so that each has ## two children. if t.tree[rightsib].child3 == 0 rc1 = t.tree[rightsib].child1 rc2 = t.tree[rightsib].child2 t.tree[p] = TreeNode{K}(t.deletionchild[1], rc1, rc2, pparent, t.tree[pparent].splitkey1, t.tree[rightsib].splitkey1) if curdepth == t.depth replaceparent!(t.data, rc1, p) replaceparent!(t.data, rc2, p) else replaceparent!(t.tree, rc1, p) replaceparent!(t.tree, rc2, p) end push!(t.freetreeinds, rightsib) newchildcount = 1 t.deletionchild[1] = p else rc1 = t.tree[rightsib].child1 t.tree[p] = TreeNode{K}(t.deletionchild[1], rc1, 0, pparent, t.tree[pparent].splitkey1, defaultKey) sk1 = t.tree[rightsib].splitkey1 t.tree[rightsib] = TreeNode{K}(t.tree[rightsib].child2, t.tree[rightsib].child3, 0, pparent, t.tree[rightsib].splitkey2, defaultKey) if curdepth == t.depth replaceparent!(t.data, rc1, p) else replaceparent!(t.tree, rc1, p) end newchildcount = 2 t.deletionchild[1] = p t.deletionchild[2] = rightsib t.deletionleftkey[2] = sk1 end ## If pparent had a third child (besides p and rightsib) ## then we add this to t.deletionchild c3 = t.tree[pparent].child3 if c3 > 0 newchildcount += 1 t.deletionchild[newchildcount] = c3 t.deletionleftkey[newchildcount] = t.tree[pparent].splitkey2 end p = pparent elseif t.tree[pparent].child2 == p ## Here p is child2 and leftsib is child1. ## If leftsib has 2 children, then p and ## leftsib are merged into a single node ## that has three children. ## If leftsib has 3 children, then p and ## leftsib are reformed so that each has ## two children. leftsib = t.tree[pparent].child1 lk = deletionleftkey1_valid ? t.deletionleftkey[1] : t.tree[pparent].splitkey1 if t.tree[leftsib].child3 == 0 lc1 = t.tree[leftsib].child1 lc2 = t.tree[leftsib].child2 t.tree[p] = TreeNode{K}(lc1, lc2, t.deletionchild[1], pparent, t.tree[leftsib].splitkey1, lk) if curdepth == t.depth replaceparent!(t.data, lc1, p) replaceparent!(t.data, lc2, p) else replaceparent!(t.tree, lc1, p) replaceparent!(t.tree, lc2, p) end push!(t.freetreeinds, leftsib) newchildcount = 1 t.deletionchild[1] = p else lc3 = t.tree[leftsib].child3 t.tree[p] = TreeNode{K}(lc3, t.deletionchild[1], 0, pparent, lk, defaultKey) sk2 = t.tree[leftsib].splitkey2 t.tree[leftsib] = TreeNode{K}(t.tree[leftsib].child1, t.tree[leftsib].child2, 0, pparent, t.tree[leftsib].splitkey1, defaultKey) if curdepth == t.depth replaceparent!(t.data, lc3, p) else replaceparent!(t.tree, lc3, p) end newchildcount = 2 t.deletionchild[1] = leftsib t.deletionchild[2] = p t.deletionleftkey[2] = sk2 end ## If pparent had a third child (besides p and leftsib) ## then we add this to t.deletionchild c3 = t.tree[pparent].child3 if c3 > 0 newchildcount += 1 t.deletionchild[newchildcount] = c3 t.deletionleftkey[newchildcount] = t.tree[pparent].splitkey2 end p = pparent deletionleftkey1_valid = false else ## Here p is child3 and leftsib is child2. ## If leftsib has 2 children, then p and ## leftsib are merged into a single node ## that has three children. ## If leftsib has 3 children, then p and ## leftsib are reformed so that each has ## two children. @invariant t.tree[pparent].child3 == p leftsib = t.tree[pparent].child2 lk = deletionleftkey1_valid ? t.deletionleftkey[1] : t.tree[pparent].splitkey2 if t.tree[leftsib].child3 == 0 lc1 = t.tree[leftsib].child1 lc2 = t.tree[leftsib].child2 t.tree[p] = TreeNode{K}(lc1, lc2, t.deletionchild[1], pparent, t.tree[leftsib].splitkey1, lk) if curdepth == t.depth replaceparent!(t.data, lc1, p) replaceparent!(t.data, lc2, p) else replaceparent!(t.tree, lc1, p) replaceparent!(t.tree, lc2, p) end push!(t.freetreeinds, leftsib) newchildcount = 2 t.deletionchild[1] = t.tree[pparent].child1 t.deletionleftkey[2] = t.tree[pparent].splitkey1 t.deletionchild[2] = p else lc3 = t.tree[leftsib].child3 t.tree[p] = TreeNode{K}(lc3, t.deletionchild[1], 0, pparent, lk, defaultKey) sk2 = t.tree[leftsib].splitkey2 t.tree[leftsib] = TreeNode{K}(t.tree[leftsib].child1, t.tree[leftsib].child2, 0, pparent, t.tree[leftsib].splitkey1, defaultKey) if curdepth == t.depth replaceparent!(t.data, lc3, p) else replaceparent!(t.tree, lc3, p) end newchildcount = 3 t.deletionchild[1] = t.tree[pparent].child1 t.deletionchild[2] = leftsib t.deletionchild[3] = p t.deletionleftkey[2] = t.tree[pparent].splitkey1 t.deletionleftkey[3] = sk2 end p = pparent deletionleftkey1_valid = false end curdepth -= 1 end if mustdeleteroot @invariant !deletionleftkey1_valid @invariant p == t.rootloc t.rootloc = t.deletionchild[1] t.depth -= 1 push!(t.freetreeinds, p) end ## If deletionleftkey1_valid, this means that the new ## min key of the deleted node and its right neighbors ## has never been stored in the tree. It must be stored ## as splitkey1 or splitkey2 of some ancestor of the ## deleted node, so we continue ascending the tree ## until we find a node which has p (and therefore the ## deleted node) as its descendent through its second ## or third child. ## It cannot be the case that the deleted node is ## is a descendent of the root always through ## first children, since this would mean the deleted ## node is the leftmost placeholder, which ## cannot be deleted. if deletionleftkey1_valid while true pparentnode = t.tree[pparent] if pparentnode.child2 == p t.tree[pparent] = TreeNode{K}(pparentnode.child1, pparentnode.child2, pparentnode.child3, pparentnode.parent, t.deletionleftkey[1], pparentnode.splitkey2) break elseif pparentnode.child3 == p t.tree[pparent] = TreeNode{K}(pparentnode.child1, pparentnode.child2, pparentnode.child3, pparentnode.parent, pparentnode.splitkey1, t.deletionleftkey[1]) break else p = pparent pparent = pparentnode.parent curdepth -= 1 @invariant curdepth > 0 end end end return nothing end
Base.delete!
655
984
src/balanced_tree.jl
#CURRENT FILE: DataStructures.jl/src/balanced_tree.jl ##CHUNK 1 end @inbounds thisnode = t.tree[curnode] cmp = thisnode.child3 == 0 ? cmp2le_leaf(t.ord, thisnode, k) : cmp3le_leaf(t.ord, thisnode, k) curnode = cmp == 1 ? thisnode.child1 : cmp == 2 ? thisnode.child2 : thisnode.child3 return curnode end ## The following are helper routines for the insert! and delete! functions. ## They replace the 'parent' field of either an internal tree node or ## a data node at the bottom tree level. function replaceparent!(data::Vector{KDRec{K,D}}, whichind::Int, newparent::Int) where {K,D} data[whichind] = KDRec{K,D}(newparent, data[whichind].k, data[whichind].d) return nothing end ##CHUNK 2 leafind, exactfound = findkey(t, k) parent = t.data[leafind].parent ## The following code is necessary because in the case of a ## brand new tree, the initial tree and data entries were incompletely ## initialized by the constructor. In this case, the call to insert! ## underway carries ## valid K and D values, so these valid values may now be ## stored in the dummy placeholder nodes so that they no ## longer hold undefined references. if size(t.data,1) == 2 @invariant t.rootloc == 1 && t.depth == 1 t.tree[1] = TreeNode{K}(t.tree[1].child1, t.tree[1].child2, t.tree[1].child3, t.tree[1].parent, k, k) t.data[1] = KDRec{K,D}(t.data[1].parent, k, d) t.data[2] = KDRec{K,D}(t.data[2].parent, k, d) end ##CHUNK 3 if size(t.data,1) == 2 @invariant t.rootloc == 1 && t.depth == 1 t.tree[1] = TreeNode{K}(t.tree[1].child1, t.tree[1].child2, t.tree[1].child3, t.tree[1].parent, k, k) t.data[1] = KDRec{K,D}(t.data[1].parent, k, d) t.data[2] = KDRec{K,D}(t.data[2].parent, k, d) end ## If we have found exactly k in the tree, then we ## replace the data associated with k and return. if exactfound && !allowdups t.data[leafind] = KDRec{K,D}(parent, k,d) return false, leafind end # We get here if k was not already found in the tree or # if duplicates are allowed. ##CHUNK 4 break end end # If the root has been split, then we need to add a level # to the tree that is the parent of the old root and the new node. if splitroot @invariant existingchild == t.rootloc newroot = TreeNode{K}(existingchild, newchild, 0, 0, minkeynewchild, minkeynewchild) newrootloc = push_or_reuse!(t.tree, t.freetreeinds, newroot) replaceparent!(t.tree, existingchild, newrootloc) replaceparent!(t.tree, newchild, newrootloc) t.rootloc = newrootloc t.depth += 1 end return true, newind end ##CHUNK 5 function findkeyless(t::BalancedTree23, k) curnode = t.rootloc for depthcount = 1 : t.depth - 1 @inbounds thisnode = t.tree[curnode] cmp = thisnode.child3 == 0 ? cmp2le_nonleaf(t.ord, thisnode, k) : cmp3le_nonleaf(t.ord, thisnode, k) curnode = cmp == 1 ? thisnode.child1 : cmp == 2 ? thisnode.child2 : thisnode.child3 end @inbounds thisnode = t.tree[curnode] cmp = thisnode.child3 == 0 ? cmp2le_leaf(t.ord, thisnode, k) : cmp3le_leaf(t.ord, thisnode, k) curnode = cmp == 1 ? thisnode.child1 : cmp == 2 ? thisnode.child2 : thisnode.child3 return curnode end ##CHUNK 6 # Two cases: either we need to split the tree node # if newchild4>0 else we convert a 2-node to a 3-node # if newchild4==0 if newchild4 == 0 # Change the parent from a 2-node to a 3-node t.tree[p1] = TreeNode{K}(newchild1, newchild2, newchild3, p1parent, minkeychild2, minkeychild3) if curdepth == depth replaceparent!(t.data, newchild, p1) else replaceparent!(t.tree, newchild, p1) end break end # Split the parent t.tree[p1] = TreeNode{K}(newchild1, newchild2, 0, p1parent, minkeychild2, minkeychild2) newtreenode = TreeNode{K}(newchild3, newchild4, 0, ##CHUNK 7 # In this case we insert a new node. depth = t.depth ord = t.ord ## Store the new data item in the tree's data array. Later ## go back and fix the parent. newind = push_or_reuse!(t.data, t.freedatainds, KDRec{K,D}(0,k,d)) push!(t.useddatacells, newind) p1 = parent newchild = newind minkeynewchild = k splitroot = false curdepth = depth existingchild = leafind ## This loop ascends the tree (i.e., follows the path from a leaf to the root) ## starting from the parent p1 of ## where the new key k will go. ##CHUNK 8 replaceparent!(t.data, newchild, p1) else replaceparent!(t.tree, newchild, p1) end break end # Split the parent t.tree[p1] = TreeNode{K}(newchild1, newchild2, 0, p1parent, minkeychild2, minkeychild2) newtreenode = TreeNode{K}(newchild3, newchild4, 0, p1parent, minkeychild4, minkeychild2) newparentnum = push_or_reuse!(t.tree, t.freetreeinds, newtreenode) if curdepth == depth replaceparent!(t.data, newchild2, p1) replaceparent!(t.data, newchild3, newparentnum) replaceparent!(t.data, newchild4, newparentnum) else replaceparent!(t.tree, newchild2, p1) replaceparent!(t.tree, newchild3, newparentnum) replaceparent!(t.tree, newchild4, newparentnum) ##CHUNK 9 p1parent, minkeychild4, minkeychild2) newparentnum = push_or_reuse!(t.tree, t.freetreeinds, newtreenode) if curdepth == depth replaceparent!(t.data, newchild2, p1) replaceparent!(t.data, newchild3, newparentnum) replaceparent!(t.data, newchild4, newparentnum) else replaceparent!(t.tree, newchild2, p1) replaceparent!(t.tree, newchild3, newparentnum) replaceparent!(t.tree, newchild4, newparentnum) end # Update the loop variables for the next level of the # ascension existingchild = p1 newchild = newparentnum p1 = p1parent minkeynewchild = minkeychild3 curdepth -= 1 if curdepth == 0 splitroot = true ##CHUNK 10 ## Variables updated by the loop: ## p1: parent of where the new node goes ## newchild: index of the child to be inserted ## minkeynewchild: the minimum key in the subtree rooted at newchild ## existingchild: a child of p1; the newchild must ## be inserted in the slot to the right of existingchild ## curdepth: depth of newchild ## For each 3-node we encounter ## during the ascent, we add a new child, which requires splitting ## the 3-node into two 2-nodes. Then we keep going until we hit the root. ## If we encounter a 2-node, then the ascent can stop; we can ## change the 2-node to a 3-node with the new child. while true # Let newchild1,...newchild4 be the new children of # the parent node # Initially, take the three children of the existing parent # node and set newchild4 to 0.
243
257
DataStructures.jl
12
function Base.resize!(cb::CircularBuffer, n::Integer) if n != capacity(cb) buf_new = Vector{eltype(cb)}(undef, n) len_new = min(length(cb), n) for i in 1:len_new @inbounds buf_new[i] = cb[i] end cb.capacity = n cb.first = 1 cb.length = len_new cb.buffer = buf_new end return cb end
function Base.resize!(cb::CircularBuffer, n::Integer) if n != capacity(cb) buf_new = Vector{eltype(cb)}(undef, n) len_new = min(length(cb), n) for i in 1:len_new @inbounds buf_new[i] = cb[i] end cb.capacity = n cb.first = 1 cb.length = len_new cb.buffer = buf_new end return cb end
[ 243, 257 ]
function Base.resize!(cb::CircularBuffer, n::Integer) if n != capacity(cb) buf_new = Vector{eltype(cb)}(undef, n) len_new = min(length(cb), n) for i in 1:len_new @inbounds buf_new[i] = cb[i] end cb.capacity = n cb.first = 1 cb.length = len_new cb.buffer = buf_new end return cb end
function Base.resize!(cb::CircularBuffer, n::Integer) if n != capacity(cb) buf_new = Vector{eltype(cb)}(undef, n) len_new = min(length(cb), n) for i in 1:len_new @inbounds buf_new[i] = cb[i] end cb.capacity = n cb.first = 1 cb.length = len_new cb.buffer = buf_new end return cb end
Base.resize!
243
257
src/circular_buffer.jl
#FILE: DataStructures.jl/src/circ_deque.jl ##CHUNK 1 Create a double-ended queue of maximum capacity `n`, implemented as a circular buffer. The element type is `T`. """ CircularDeque{T}(n::Int) where {T} = CircularDeque(Vector{T}(undef, n), n, 0, 1, n) CircularDeque{T}(n::Integer) where {T} = CircularDeque(Vector{T}(undef, Int(n)), Int(n), 0, 1, Int(n)) Base.length(D::CircularDeque) = D.n Base.eltype(::Type{CircularDeque{T}}) where {T} = T """ capacity(D::CircularDeque) Return the capacity of the circular deque """ capacity(D::CircularDeque) = D.capacity function Base.empty!(D::CircularDeque) D.n = 0 D.first = 1 D.last = D.capacity #CURRENT FILE: DataStructures.jl/src/circular_buffer.jl ##CHUNK 1 Get the first element of CircularBuffer. """ Base.@propagate_inbounds function Base.first(cb::CircularBuffer) @boundscheck (cb.length == 0) && throw(BoundsError(cb, 1)) return cb.buffer[cb.first] end """ last(cb::CircularBuffer) Get the last element of CircularBuffer. """ Base.@propagate_inbounds function Base.last(cb::CircularBuffer) @boundscheck (cb.length == 0) && throw(BoundsError(cb, 1)) return cb.buffer[_buffer_index(cb, cb.length)] end """ resize!(cb::CircularBuffer, n) Resize CircularBuffer to the maximum capacity of n elements. ##CHUNK 2 """ Base.@propagate_inbounds function Base.last(cb::CircularBuffer) @boundscheck (cb.length == 0) && throw(BoundsError(cb, 1)) return cb.buffer[_buffer_index(cb, cb.length)] end """ resize!(cb::CircularBuffer, n) Resize CircularBuffer to the maximum capacity of n elements. If n is smaller than the current buffer length, the first n elements will be retained. """ ##CHUNK 3 function Base.pushfirst!(cb::CircularBuffer, data) # if full, decrement and overwrite, otherwise pushfirst cb.first = (cb.first == 1 ? cb.capacity : cb.first - 1) if length(cb) < cb.capacity cb.length += 1 end @inbounds cb.buffer[cb.first] = data return cb end """ append!(cb::CircularBuffer, datavec::AbstractVector) Push at most last `capacity` items. """ function Base.append!(cb::CircularBuffer, datavec::AbstractVector) # push at most last `capacity` items n = length(datavec) for i in max(1, n-capacity(cb)+1):n push!(cb, datavec[i]) ##CHUNK 4 function Base.fill!(cb::CircularBuffer, data) for i in 1:capacity(cb)-length(cb) push!(cb, data) end return cb end """ length(cb::CircularBuffer) Return the number of elements currently in the buffer. """ Base.length(cb::CircularBuffer) = cb.length """ size(cb::CircularBuffer) Return a tuple with the size of the buffer. """ Base.size(cb::CircularBuffer) = (length(cb),) ##CHUNK 5 cb.length -= 1 return @inbounds cb.buffer[i] end """ pushfirst!(cb::CircularBuffer, data) Insert one or more items at the beginning of CircularBuffer and overwrite back if full. """ function Base.pushfirst!(cb::CircularBuffer, data) # if full, decrement and overwrite, otherwise pushfirst cb.first = (cb.first == 1 ? cb.capacity : cb.first - 1) if length(cb) < cb.capacity cb.length += 1 end @inbounds cb.buffer[cb.first] = data return cb end ##CHUNK 6 # if full, increment and overwrite, otherwise push if cb.length == cb.capacity cb.first = (cb.first == cb.capacity ? 1 : cb.first + 1) else cb.length += 1 end @inbounds cb.buffer[_buffer_index(cb, cb.length)] = data_converted return cb end """ popfirst!(cb::CircularBuffer) Remove the element from the front of the `CircularBuffer`. """ function Base.popfirst!(cb::CircularBuffer) @boundscheck (cb.length == 0) && throw(ArgumentError("array must be non-empty")) i = cb.first cb.first = (cb.first + 1 > cb.capacity ? 1 : cb.first + 1) ##CHUNK 7 end CircularBuffer(iter) = CircularBuffer{eltype(iter)}(iter) """ empty!(cb::CircularBuffer) Reset the buffer. """ function Base.empty!(cb::CircularBuffer) cb.length = 0 return cb end Base.@propagate_inbounds function _buffer_index_checked(cb::CircularBuffer, i::Int) @boundscheck if i < 1 || i > cb.length throw(BoundsError(cb, i)) end _buffer_index(cb, i) end ##CHUNK 8 end return cb end """ fill!(cb::CircularBuffer, data) Grows the buffer up-to capacity, and fills it entirely. It doesn't overwrite existing elements. """ function Base.fill!(cb::CircularBuffer, data) for i in 1:capacity(cb)-length(cb) push!(cb, data) end return cb end """ length(cb::CircularBuffer) ##CHUNK 9 """ append!(cb::CircularBuffer, datavec::AbstractVector) Push at most last `capacity` items. """ function Base.append!(cb::CircularBuffer, datavec::AbstractVector) # push at most last `capacity` items n = length(datavec) for i in max(1, n-capacity(cb)+1):n push!(cb, datavec[i]) end return cb end """ fill!(cb::CircularBuffer, data) Grows the buffer up-to capacity, and fills it entirely. It doesn't overwrite existing elements. """
141
152
DataStructures.jl
13
function Base.iterate(di::DequeIterator{T}, (cb, i) = (di.d.head, di.d.head.front)) where T i > cb.back && return nothing x = cb.data[i] i += 1 if i > cb.back && !isrear(cb) cb = cb.next i = 1 end return (x, (cb, i)) end
function Base.iterate(di::DequeIterator{T}, (cb, i) = (di.d.head, di.d.head.front)) where T i > cb.back && return nothing x = cb.data[i] i += 1 if i > cb.back && !isrear(cb) cb = cb.next i = 1 end return (x, (cb, i)) end
[ 141, 152 ]
function Base.iterate(di::DequeIterator{T}, (cb, i) = (di.d.head, di.d.head.front)) where T i > cb.back && return nothing x = cb.data[i] i += 1 if i > cb.back && !isrear(cb) cb = cb.next i = 1 end return (x, (cb, i)) end
function Base.iterate(di::DequeIterator{T}, (cb, i) = (di.d.head, di.d.head.front)) where T i > cb.back && return nothing x = cb.data[i] i += 1 if i > cb.back && !isrear(cb) cb = cb.next i = 1 end return (x, (cb, i)) end
iterate
141
152
src/deque.jl
#FILE: DataStructures.jl/src/circ_deque.jl ##CHUNK 1 # getindex sans bounds checking @inline function _unsafe_getindex(D::CircularDeque, i::Integer) j = D.first + i - 1 if j > D.capacity j -= D.capacity end @inbounds ret = D.buffer[j] return ret end @inline function Base.getindex(D::CircularDeque, i::Integer) @boundscheck 1 <= i <= D.n || throw(BoundsError()) return _unsafe_getindex(D, i) end # Iteration via getindex @inline function Base.iterate(d::CircularDeque, i = 1) i == d.n + 1 ? nothing : (_unsafe_getindex(d, i), i+1) end #CURRENT FILE: DataStructures.jl/src/deque.jl ##CHUNK 1 # Backwards deque iteration function Base.iterate(di::Iterators.Reverse{<:Deque}, (cb, i) = (di.itr.rear, di.itr.rear.back)) i < cb.front && return nothing x = cb.data[i] i -= 1 # If we're past the beginning of a block, go to the previous one if i < cb.front && !ishead(cb) cb = cb.prev i = cb.back end return (x, (cb, i)) end Base.iterate(d::Deque{T}, s...) where {T} = iterate(DequeIterator{T}(d), s...) Base.length(di::DequeIterator{T}) where {T} = di.d.len ##CHUNK 2 # Iteration struct DequeIterator{T} d::Deque{T} end Base.last(di::DequeIterator) = last(di.d) # Backwards deque iteration function Base.iterate(di::Iterators.Reverse{<:Deque}, (cb, i) = (di.itr.rear, di.itr.rear.back)) i < cb.front && return nothing x = cb.data[i] i -= 1 # If we're past the beginning of a block, go to the previous one if i < cb.front && !ishead(cb) ##CHUNK 3 println(io, "Deque (length = $(d.len), nblocks = $(d.nblocks))") cb::DequeBlock = d.head i = 1 while true print(io, "block $i [$(cb.front):$(cb.back)] ==> ") for j = cb.front : cb.back print(io, cb.data[j]) print(io, ' ') end println(io) cb_next::DequeBlock = cb.next if cb !== cb_next cb = cb_next i += 1 else break end end end ##CHUNK 4 d.nblocks += 1 end d.len += 1 return d end """ pop!(d::Deque{T}) where T Remove the element at the back of deque `d`. """ function Base.pop!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) rear = d.rear @assert rear.back >= rear.front @inbounds x = rear.data[rear.back] Base._unsetindex!(rear.data, rear.back) # see issue/884 rear.back -= 1 if rear.back < rear.front ##CHUNK 5 """ last(d::Deque) Returns the last element of the deque `d`. """ function Base.last(d::Deque) isempty(d) && throw(ArgumentError("Deque must be non-empty")) blk = d.rear return blk.data[blk.back] end # Iteration struct DequeIterator{T} d::Deque{T} end Base.last(di::DequeIterator) = last(di.d) ##CHUNK 6 # release all blocks except the head if d.nblocks > 1 cb::DequeBlock{T} = d.rear while cb != d.head empty!(cb.data) cb = cb.prev end end # clean the head block (but retain the block itself) reset!(d.head, 1) # reset queue fields d.nblocks = 1 d.len = 0 d.rear = d.head return d end ##CHUNK 7 """ push!(d::Deque{T}, x) where T Add an element to the back of deque `d`. """ function Base.push!(d::Deque{T}, x) where T rear = d.rear if isempty(rear) rear.front = 1 rear.back = 0 end if rear.back < rear.capa @inbounds rear.data[rear.back += 1] = convert(T, x) else new_rear = rear_deque_block(T, d.blksize) new_rear.back = 1 new_rear.data[1] = convert(T, x) new_rear.prev = rear ##CHUNK 8 rear.back = 0 end if rear.back < rear.capa @inbounds rear.data[rear.back += 1] = convert(T, x) else new_rear = rear_deque_block(T, d.blksize) new_rear.back = 1 new_rear.data[1] = convert(T, x) new_rear.prev = rear d.rear = rear.next = new_rear d.nblocks += 1 end d.len += 1 return d end """ pushfirst!(d::Deque{T}, x) where T ##CHUNK 9 # block at the head of the train, elements towards the back head_deque_block(ty::Type{T}, n::Integer) where {T} = DequeBlock{T}(n, n+1) capacity(blk::DequeBlock) = blk.capa Base.length(blk::DequeBlock) = blk.back - blk.front + 1 Base.isempty(blk::DequeBlock) = blk.back < blk.front ishead(blk::DequeBlock) = blk.prev === blk isrear(blk::DequeBlock) = blk.next === blk # reset the block to empty, and position function reset!(blk::DequeBlock{T}, front::Integer) where T empty!(blk.data) resize!(blk.data, blk.capa) blk.front = front blk.back = front - 1 blk.prev = blk blk.next = blk end
156
168
DataStructures.jl
14
function Base.iterate(di::Iterators.Reverse{<:Deque}, (cb, i) = (di.itr.rear, di.itr.rear.back)) i < cb.front && return nothing x = cb.data[i] i -= 1 # If we're past the beginning of a block, go to the previous one if i < cb.front && !ishead(cb) cb = cb.prev i = cb.back end return (x, (cb, i)) end
function Base.iterate(di::Iterators.Reverse{<:Deque}, (cb, i) = (di.itr.rear, di.itr.rear.back)) i < cb.front && return nothing x = cb.data[i] i -= 1 # If we're past the beginning of a block, go to the previous one if i < cb.front && !ishead(cb) cb = cb.prev i = cb.back end return (x, (cb, i)) end
[ 156, 168 ]
function Base.iterate(di::Iterators.Reverse{<:Deque}, (cb, i) = (di.itr.rear, di.itr.rear.back)) i < cb.front && return nothing x = cb.data[i] i -= 1 # If we're past the beginning of a block, go to the previous one if i < cb.front && !ishead(cb) cb = cb.prev i = cb.back end return (x, (cb, i)) end
function Base.iterate(di::Iterators.Reverse{<:Deque}, (cb, i) = (di.itr.rear, di.itr.rear.back)) i < cb.front && return nothing x = cb.data[i] i -= 1 # If we're past the beginning of a block, go to the previous one if i < cb.front && !ishead(cb) cb = cb.prev i = cb.back end return (x, (cb, i)) end
iterate
156
168
src/deque.jl
#CURRENT FILE: DataStructures.jl/src/deque.jl ##CHUNK 1 function Base.iterate(di::DequeIterator{T}, (cb, i) = (di.d.head, di.d.head.front)) where T i > cb.back && return nothing x = cb.data[i] i += 1 if i > cb.back && !isrear(cb) cb = cb.next i = 1 end return (x, (cb, i)) end # Backwards deque iteration Base.iterate(d::Deque{T}, s...) where {T} = iterate(DequeIterator{T}(d), s...) Base.length(di::DequeIterator{T}) where {T} = di.d.len ##CHUNK 2 # Iteration struct DequeIterator{T} d::Deque{T} end Base.last(di::DequeIterator) = last(di.d) function Base.iterate(di::DequeIterator{T}, (cb, i) = (di.d.head, di.d.head.front)) where T i > cb.back && return nothing x = cb.data[i] i += 1 if i > cb.back && !isrear(cb) cb = cb.next i = 1 end ##CHUNK 3 cb::DequeBlock = d.head i = 1 while true print(io, "block $i [$(cb.front):$(cb.back)] ==> ") for j = cb.front : cb.back print(io, cb.data[j]) print(io, ' ') end println(io) cb_next::DequeBlock = cb.next if cb !== cb_next cb = cb_next i += 1 else break end end end ##CHUNK 4 if d.nblocks > 1 cb::DequeBlock{T} = d.rear while cb != d.head empty!(cb.data) cb = cb.prev end end # clean the head block (but retain the block itself) reset!(d.head, 1) # reset queue fields d.nblocks = 1 d.len = 0 d.rear = d.head return d end """ ##CHUNK 5 Base.collect(d::Deque{T}) where {T} = T[x for x in d] # Showing function Base.show(io::IO, d::Deque) print(io, "Deque [$(collect(d))]") end function Base.dump(io::IO, d::Deque) println(io, "Deque (length = $(d.len), nblocks = $(d.nblocks))") cb::DequeBlock = d.head i = 1 while true print(io, "block $i [$(cb.front):$(cb.back)] ==> ") for j = cb.front : cb.back print(io, cb.data[j]) print(io, ' ') end println(io) ##CHUNK 6 # block at the head of the train, elements towards the back head_deque_block(ty::Type{T}, n::Integer) where {T} = DequeBlock{T}(n, n+1) capacity(blk::DequeBlock) = blk.capa Base.length(blk::DequeBlock) = blk.back - blk.front + 1 Base.isempty(blk::DequeBlock) = blk.back < blk.front ishead(blk::DequeBlock) = blk.prev === blk isrear(blk::DequeBlock) = blk.next === blk # reset the block to empty, and position function reset!(blk::DequeBlock{T}, front::Integer) where T empty!(blk.data) resize!(blk.data, blk.capa) blk.front = front blk.back = front - 1 blk.prev = blk blk.next = blk end ##CHUNK 7 # Manipulation """ empty!(d::Deque{T}) where T Reset the deque `d`. """ function Base.empty!(d::Deque{T}) where T # release all blocks except the head if d.nblocks > 1 cb::DequeBlock{T} = d.rear while cb != d.head empty!(cb.data) cb = cb.prev end end # clean the head block (but retain the block itself) reset!(d.head, 1) ##CHUNK 8 """ last(d::Deque) Returns the last element of the deque `d`. """ function Base.last(d::Deque) isempty(d) && throw(ArgumentError("Deque must be non-empty")) blk = d.rear return blk.data[blk.back] end # Iteration struct DequeIterator{T} d::Deque{T} end Base.last(di::DequeIterator) = last(di.d) ##CHUNK 9 function Base.pop!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) rear = d.rear @assert rear.back >= rear.front @inbounds x = rear.data[rear.back] Base._unsetindex!(rear.data, rear.back) # see issue/884 rear.back -= 1 if rear.back < rear.front if d.nblocks > 1 # release and detach the rear block empty!(rear.data) d.rear = rear.prev::DequeBlock{T} d.rear.next = d.rear d.nblocks -= 1 end end d.len -= 1 return x end ##CHUNK 10 front::Int back::Int prev::DequeBlock{T} # ref to previous block next::DequeBlock{T} # ref to next block function DequeBlock{T}(capa::Int, front::Int) where T data = Vector{T}(undef, capa) blk = new{T}(data, capa, front, front-1) blk.prev = blk blk.next = blk return blk end # Convert any `Integer` to whatever `Int` is on the relevant machine DequeBlock{T}(capa::Integer, front::Integer) where T = DequeBlock{T}(Int(capa), Int(front)) end # block at the rear of the chain, elements towards the front rear_deque_block(ty::Type{T}, n::Integer) where {T} = DequeBlock{T}(n, 1)
212
230
DataStructures.jl
15
function Base.empty!(d::Deque{T}) where T # release all blocks except the head if d.nblocks > 1 cb::DequeBlock{T} = d.rear while cb != d.head empty!(cb.data) cb = cb.prev end end # clean the head block (but retain the block itself) reset!(d.head, 1) # reset queue fields d.nblocks = 1 d.len = 0 d.rear = d.head return d end
function Base.empty!(d::Deque{T}) where T # release all blocks except the head if d.nblocks > 1 cb::DequeBlock{T} = d.rear while cb != d.head empty!(cb.data) cb = cb.prev end end # clean the head block (but retain the block itself) reset!(d.head, 1) # reset queue fields d.nblocks = 1 d.len = 0 d.rear = d.head return d end
[ 212, 230 ]
function Base.empty!(d::Deque{T}) where T # release all blocks except the head if d.nblocks > 1 cb::DequeBlock{T} = d.rear while cb != d.head empty!(cb.data) cb = cb.prev end end # clean the head block (but retain the block itself) reset!(d.head, 1) # reset queue fields d.nblocks = 1 d.len = 0 d.rear = d.head return d end
function Base.empty!(d::Deque{T}) where T # release all blocks except the head if d.nblocks > 1 cb::DequeBlock{T} = d.rear while cb != d.head empty!(cb.data) cb = cb.prev end end # clean the head block (but retain the block itself) reset!(d.head, 1) # reset queue fields d.nblocks = 1 d.len = 0 d.rear = d.head return d end
Base.empty!
212
230
src/deque.jl
#CURRENT FILE: DataStructures.jl/src/deque.jl ##CHUNK 1 function Base.popfirst!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) head = d.head @assert head.back >= head.front @inbounds x = head.data[head.front] Base._unsetindex!(head.data, head.front) # see issue/884 head.front += 1 if head.back < head.front if d.nblocks > 1 # release and detach the head block empty!(head.data) d.head = head.next::DequeBlock{T} d.head.prev = d.head d.nblocks -= 1 end end d.len -= 1 return x end ##CHUNK 2 pop!(d::Deque{T}) where T Remove the element at the back of deque `d`. """ function Base.pop!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) rear = d.rear @assert rear.back >= rear.front @inbounds x = rear.data[rear.back] Base._unsetindex!(rear.data, rear.back) # see issue/884 rear.back -= 1 if rear.back < rear.front if d.nblocks > 1 # release and detach the rear block empty!(rear.data) d.rear = rear.prev::DequeBlock{T} d.rear.next = d.rear d.nblocks -= 1 end ##CHUNK 3 Base._unsetindex!(rear.data, rear.back) # see issue/884 rear.back -= 1 if rear.back < rear.front if d.nblocks > 1 # release and detach the rear block empty!(rear.data) d.rear = rear.prev::DequeBlock{T} d.rear.next = d.rear d.nblocks -= 1 end end d.len -= 1 return x end """ popfirst!(d::Deque{T}) where T Remove the element at the front of deque `d`. """ ##CHUNK 4 # block at the head of the train, elements towards the back head_deque_block(ty::Type{T}, n::Integer) where {T} = DequeBlock{T}(n, n+1) capacity(blk::DequeBlock) = blk.capa Base.length(blk::DequeBlock) = blk.back - blk.front + 1 Base.isempty(blk::DequeBlock) = blk.back < blk.front ishead(blk::DequeBlock) = blk.prev === blk isrear(blk::DequeBlock) = blk.next === blk # reset the block to empty, and position function reset!(blk::DequeBlock{T}, front::Integer) where T empty!(blk.data) resize!(blk.data, blk.capa) blk.front = front blk.back = front - 1 blk.prev = blk blk.next = blk end ##CHUNK 5 # release and detach the head block empty!(head.data) d.head = head.next::DequeBlock{T} d.head.prev = d.head d.nblocks -= 1 end end d.len -= 1 return x end const _deque_hashseed = UInt === UInt64 ? 0x950aa17a3246be82 : 0x4f26f881 function Base.hash(x::Deque, h::UInt) h += _deque_hashseed for (i, x) in enumerate(x) h += i * hash(x) end return h end ##CHUNK 6 end d.len -= 1 return x end """ popfirst!(d::Deque{T}) where T Remove the element at the front of deque `d`. """ function Base.popfirst!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) head = d.head @assert head.back >= head.front @inbounds x = head.data[head.front] Base._unsetindex!(head.data, head.front) # see issue/884 head.front += 1 if head.back < head.front if d.nblocks > 1 ##CHUNK 7 function Base.dump(io::IO, d::Deque) println(io, "Deque (length = $(d.len), nblocks = $(d.nblocks))") cb::DequeBlock = d.head i = 1 while true print(io, "block $i [$(cb.front):$(cb.back)] ==> ") for j = cb.front : cb.back print(io, cb.data[j]) print(io, ' ') end println(io) cb_next::DequeBlock = cb.next if cb !== cb_next cb = cb_next i += 1 else break end ##CHUNK 8 """ """ push!(d::Deque{T}, x) where T Add an element to the back of deque `d`. """ function Base.push!(d::Deque{T}, x) where T rear = d.rear if isempty(rear) rear.front = 1 rear.back = 0 end if rear.back < rear.capa @inbounds rear.data[rear.back += 1] = convert(T, x) else new_rear = rear_deque_block(T, d.blksize) ##CHUNK 9 if isempty(rear) rear.front = 1 rear.back = 0 end if rear.back < rear.capa @inbounds rear.data[rear.back += 1] = convert(T, x) else new_rear = rear_deque_block(T, d.blksize) new_rear.back = 1 new_rear.data[1] = convert(T, x) new_rear.prev = rear d.rear = rear.next = new_rear d.nblocks += 1 end d.len += 1 return d end ##CHUNK 10 new_head.data[n] = convert(T, x) new_head.next = head d.head = head.prev = new_head d.nblocks += 1 end d.len += 1 return d end """ pop!(d::Deque{T}) where T Remove the element at the back of deque `d`. """ function Base.pop!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) rear = d.rear @assert rear.back >= rear.front @inbounds x = rear.data[rear.back]
238
258
DataStructures.jl
16
function Base.push!(d::Deque{T}, x) where T rear = d.rear if isempty(rear) rear.front = 1 rear.back = 0 end if rear.back < rear.capa @inbounds rear.data[rear.back += 1] = convert(T, x) else new_rear = rear_deque_block(T, d.blksize) new_rear.back = 1 new_rear.data[1] = convert(T, x) new_rear.prev = rear d.rear = rear.next = new_rear d.nblocks += 1 end d.len += 1 return d end
function Base.push!(d::Deque{T}, x) where T rear = d.rear if isempty(rear) rear.front = 1 rear.back = 0 end if rear.back < rear.capa @inbounds rear.data[rear.back += 1] = convert(T, x) else new_rear = rear_deque_block(T, d.blksize) new_rear.back = 1 new_rear.data[1] = convert(T, x) new_rear.prev = rear d.rear = rear.next = new_rear d.nblocks += 1 end d.len += 1 return d end
[ 238, 258 ]
function Base.push!(d::Deque{T}, x) where T rear = d.rear if isempty(rear) rear.front = 1 rear.back = 0 end if rear.back < rear.capa @inbounds rear.data[rear.back += 1] = convert(T, x) else new_rear = rear_deque_block(T, d.blksize) new_rear.back = 1 new_rear.data[1] = convert(T, x) new_rear.prev = rear d.rear = rear.next = new_rear d.nblocks += 1 end d.len += 1 return d end
function Base.push!(d::Deque{T}, x) where T rear = d.rear if isempty(rear) rear.front = 1 rear.back = 0 end if rear.back < rear.capa @inbounds rear.data[rear.back += 1] = convert(T, x) else new_rear = rear_deque_block(T, d.blksize) new_rear.back = 1 new_rear.data[1] = convert(T, x) new_rear.prev = rear d.rear = rear.next = new_rear d.nblocks += 1 end d.len += 1 return d end
Base.push!
238
258
src/deque.jl
#CURRENT FILE: DataStructures.jl/src/deque.jl ##CHUNK 1 Remove the element at the back of deque `d`. """ function Base.pop!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) rear = d.rear @assert rear.back >= rear.front @inbounds x = rear.data[rear.back] Base._unsetindex!(rear.data, rear.back) # see issue/884 rear.back -= 1 if rear.back < rear.front if d.nblocks > 1 # release and detach the rear block empty!(rear.data) d.rear = rear.prev::DequeBlock{T} d.rear.next = d.rear d.nblocks -= 1 end end d.len -= 1 ##CHUNK 2 d.head = head.prev = new_head d.nblocks += 1 end d.len += 1 return d end """ pop!(d::Deque{T}) where T Remove the element at the back of deque `d`. """ function Base.pop!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) rear = d.rear @assert rear.back >= rear.front @inbounds x = rear.data[rear.back] Base._unsetindex!(rear.data, rear.back) # see issue/884 rear.back -= 1 ##CHUNK 3 if rear.back < rear.front if d.nblocks > 1 # release and detach the rear block empty!(rear.data) d.rear = rear.prev::DequeBlock{T} d.rear.next = d.rear d.nblocks -= 1 end end d.len -= 1 return x end """ popfirst!(d::Deque{T}) where T Remove the element at the front of deque `d`. """ function Base.popfirst!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) ##CHUNK 4 end if head.front > 1 @inbounds head.data[head.front -= 1] = convert(T, x) else n::Int = d.blksize new_head = head_deque_block(T, n) new_head.front = n new_head.data[n] = convert(T, x) new_head.next = head d.head = head.prev = new_head d.nblocks += 1 end d.len += 1 return d end """ pop!(d::Deque{T}) where T ##CHUNK 5 """ function Base.empty!(d::Deque{T}) where T # release all blocks except the head if d.nblocks > 1 cb::DequeBlock{T} = d.rear while cb != d.head empty!(cb.data) cb = cb.prev end end # clean the head block (but retain the block itself) reset!(d.head, 1) # reset queue fields d.nblocks = 1 d.len = 0 d.rear = d.head return d end ##CHUNK 6 Add an element to the front of deque `d`. """ function Base.pushfirst!(d::Deque{T}, x) where T head = d.head if isempty(head) n = head.capa head.front = n + 1 head.back = n end if head.front > 1 @inbounds head.data[head.front -= 1] = convert(T, x) else n::Int = d.blksize new_head = head_deque_block(T, n) new_head.front = n new_head.data[n] = convert(T, x) new_head.next = head ##CHUNK 7 return x end """ popfirst!(d::Deque{T}) where T Remove the element at the front of deque `d`. """ function Base.popfirst!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) head = d.head @assert head.back >= head.front @inbounds x = head.data[head.front] Base._unsetindex!(head.data, head.front) # see issue/884 head.front += 1 if head.back < head.front if d.nblocks > 1 # release and detach the head block empty!(head.data) ##CHUNK 8 # block at the head of the train, elements towards the back head_deque_block(ty::Type{T}, n::Integer) where {T} = DequeBlock{T}(n, n+1) capacity(blk::DequeBlock) = blk.capa Base.length(blk::DequeBlock) = blk.back - blk.front + 1 Base.isempty(blk::DequeBlock) = blk.back < blk.front ishead(blk::DequeBlock) = blk.prev === blk isrear(blk::DequeBlock) = blk.next === blk # reset the block to empty, and position function reset!(blk::DequeBlock{T}, front::Integer) where T empty!(blk.data) resize!(blk.data, blk.capa) blk.front = front blk.back = front - 1 blk.prev = blk blk.next = blk end ##CHUNK 9 Parameters ---------- `T::Type` Deque element data type. `blksize::Int` Deque block size (in bytes). Default = 1024. """ mutable struct Deque{T} nblocks::Int blksize::Int len::Int head::DequeBlock{T} rear::DequeBlock{T} function Deque{T}(blksize::Integer) where T head = rear = rear_deque_block(T, blksize) new{T}(1, blksize, 0, head, rear) end ##CHUNK 10 blksize::Int len::Int head::DequeBlock{T} rear::DequeBlock{T} function Deque{T}(blksize::Integer) where T head = rear = rear_deque_block(T, blksize) new{T}(1, blksize, 0, head, rear) end Deque{T}() where {T} = Deque{T}(DEFAULT_DEQUEUE_BLOCKSIZE) end """ isempty(d::Deque) Verifies if deque `d` is empty. """ Base.isempty(d::Deque) = d.len == 0
265
287
DataStructures.jl
17
function Base.pushfirst!(d::Deque{T}, x) where T head = d.head if isempty(head) n = head.capa head.front = n + 1 head.back = n end if head.front > 1 @inbounds head.data[head.front -= 1] = convert(T, x) else n::Int = d.blksize new_head = head_deque_block(T, n) new_head.front = n new_head.data[n] = convert(T, x) new_head.next = head d.head = head.prev = new_head d.nblocks += 1 end d.len += 1 return d end
function Base.pushfirst!(d::Deque{T}, x) where T head = d.head if isempty(head) n = head.capa head.front = n + 1 head.back = n end if head.front > 1 @inbounds head.data[head.front -= 1] = convert(T, x) else n::Int = d.blksize new_head = head_deque_block(T, n) new_head.front = n new_head.data[n] = convert(T, x) new_head.next = head d.head = head.prev = new_head d.nblocks += 1 end d.len += 1 return d end
[ 265, 287 ]
function Base.pushfirst!(d::Deque{T}, x) where T head = d.head if isempty(head) n = head.capa head.front = n + 1 head.back = n end if head.front > 1 @inbounds head.data[head.front -= 1] = convert(T, x) else n::Int = d.blksize new_head = head_deque_block(T, n) new_head.front = n new_head.data[n] = convert(T, x) new_head.next = head d.head = head.prev = new_head d.nblocks += 1 end d.len += 1 return d end
function Base.pushfirst!(d::Deque{T}, x) where T head = d.head if isempty(head) n = head.capa head.front = n + 1 head.back = n end if head.front > 1 @inbounds head.data[head.front -= 1] = convert(T, x) else n::Int = d.blksize new_head = head_deque_block(T, n) new_head.front = n new_head.data[n] = convert(T, x) new_head.next = head d.head = head.prev = new_head d.nblocks += 1 end d.len += 1 return d end
Base.pushfirst!
265
287
src/deque.jl
#FILE: DataStructures.jl/src/circ_deque.jl ##CHUNK 1 v end """ pushfirst!(D::CircularDeque, v) Add an element to the front. """ @inline function Base.pushfirst!(D::CircularDeque, v) @boundscheck D.n < D.capacity || throw(BoundsError()) D.n += 1 tmp = D.first - 1 D.first = ifelse(tmp < 1, D.capacity, tmp) @inbounds D.buffer[D.first] = v D end """ popfirst!(D::CircularDeque) #CURRENT FILE: DataStructures.jl/src/deque.jl ##CHUNK 1 """ popfirst!(d::Deque{T}) where T Remove the element at the front of deque `d`. """ function Base.popfirst!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) head = d.head @assert head.back >= head.front @inbounds x = head.data[head.front] Base._unsetindex!(head.data, head.front) # see issue/884 head.front += 1 if head.back < head.front if d.nblocks > 1 # release and detach the head block empty!(head.data) d.head = head.next::DequeBlock{T} d.head.prev = d.head ##CHUNK 2 pushfirst!(d::Deque{T}, x) where T Add an element to the front of deque `d`. """ """ pop!(d::Deque{T}) where T Remove the element at the back of deque `d`. """ function Base.pop!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) rear = d.rear @assert rear.back >= rear.front @inbounds x = rear.data[rear.back] Base._unsetindex!(rear.data, rear.back) # see issue/884 rear.back -= 1 if rear.back < rear.front if d.nblocks > 1 ##CHUNK 3 # block at the head of the train, elements towards the back head_deque_block(ty::Type{T}, n::Integer) where {T} = DequeBlock{T}(n, n+1) capacity(blk::DequeBlock) = blk.capa Base.length(blk::DequeBlock) = blk.back - blk.front + 1 Base.isempty(blk::DequeBlock) = blk.back < blk.front ishead(blk::DequeBlock) = blk.prev === blk isrear(blk::DequeBlock) = blk.next === blk # reset the block to empty, and position function reset!(blk::DequeBlock{T}, front::Integer) where T empty!(blk.data) resize!(blk.data, blk.capa) blk.front = front blk.back = front - 1 blk.prev = blk blk.next = blk end ##CHUNK 4 if isempty(rear) rear.front = 1 rear.back = 0 end if rear.back < rear.capa @inbounds rear.data[rear.back += 1] = convert(T, x) else new_rear = rear_deque_block(T, d.blksize) new_rear.back = 1 new_rear.data[1] = convert(T, x) new_rear.prev = rear d.rear = rear.next = new_rear d.nblocks += 1 end d.len += 1 return d end """ ##CHUNK 5 """ function Base.empty!(d::Deque{T}) where T # release all blocks except the head if d.nblocks > 1 cb::DequeBlock{T} = d.rear while cb != d.head empty!(cb.data) cb = cb.prev end end # clean the head block (but retain the block itself) reset!(d.head, 1) # reset queue fields d.nblocks = 1 d.len = 0 d.rear = d.head return d end ##CHUNK 6 """ push!(d::Deque{T}, x) where T Add an element to the back of deque `d`. """ function Base.push!(d::Deque{T}, x) where T rear = d.rear if isempty(rear) rear.front = 1 rear.back = 0 end if rear.back < rear.capa @inbounds rear.data[rear.back += 1] = convert(T, x) else new_rear = rear_deque_block(T, d.blksize) new_rear.back = 1 ##CHUNK 7 # clean the head block (but retain the block itself) reset!(d.head, 1) # reset queue fields d.nblocks = 1 d.len = 0 d.rear = d.head return d end """ push!(d::Deque{T}, x) where T Add an element to the back of deque `d`. """ function Base.push!(d::Deque{T}, x) where T rear = d.rear ##CHUNK 8 new_rear.data[1] = convert(T, x) new_rear.prev = rear d.rear = rear.next = new_rear d.nblocks += 1 end d.len += 1 return d end """ pushfirst!(d::Deque{T}, x) where T Add an element to the front of deque `d`. """ """ pop!(d::Deque{T}) where T Remove the element at the back of deque `d`. """ ##CHUNK 9 function Base.pop!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) rear = d.rear @assert rear.back >= rear.front @inbounds x = rear.data[rear.back] Base._unsetindex!(rear.data, rear.back) # see issue/884 rear.back -= 1 if rear.back < rear.front if d.nblocks > 1 # release and detach the rear block empty!(rear.data) d.rear = rear.prev::DequeBlock{T} d.rear.next = d.rear d.nblocks -= 1 end end d.len -= 1 return x end
294
313
DataStructures.jl
18
function Base.pop!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) rear = d.rear @assert rear.back >= rear.front @inbounds x = rear.data[rear.back] Base._unsetindex!(rear.data, rear.back) # see issue/884 rear.back -= 1 if rear.back < rear.front if d.nblocks > 1 # release and detach the rear block empty!(rear.data) d.rear = rear.prev::DequeBlock{T} d.rear.next = d.rear d.nblocks -= 1 end end d.len -= 1 return x end
function Base.pop!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) rear = d.rear @assert rear.back >= rear.front @inbounds x = rear.data[rear.back] Base._unsetindex!(rear.data, rear.back) # see issue/884 rear.back -= 1 if rear.back < rear.front if d.nblocks > 1 # release and detach the rear block empty!(rear.data) d.rear = rear.prev::DequeBlock{T} d.rear.next = d.rear d.nblocks -= 1 end end d.len -= 1 return x end
[ 294, 313 ]
function Base.pop!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) rear = d.rear @assert rear.back >= rear.front @inbounds x = rear.data[rear.back] Base._unsetindex!(rear.data, rear.back) # see issue/884 rear.back -= 1 if rear.back < rear.front if d.nblocks > 1 # release and detach the rear block empty!(rear.data) d.rear = rear.prev::DequeBlock{T} d.rear.next = d.rear d.nblocks -= 1 end end d.len -= 1 return x end
function Base.pop!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) rear = d.rear @assert rear.back >= rear.front @inbounds x = rear.data[rear.back] Base._unsetindex!(rear.data, rear.back) # see issue/884 rear.back -= 1 if rear.back < rear.front if d.nblocks > 1 # release and detach the rear block empty!(rear.data) d.rear = rear.prev::DequeBlock{T} d.rear.next = d.rear d.nblocks -= 1 end end d.len -= 1 return x end
Base.pop!
294
313
src/deque.jl
#CURRENT FILE: DataStructures.jl/src/deque.jl ##CHUNK 1 isempty(d) && throw(ArgumentError("Deque must be non-empty")) head = d.head @assert head.back >= head.front @inbounds x = head.data[head.front] Base._unsetindex!(head.data, head.front) # see issue/884 head.front += 1 if head.back < head.front if d.nblocks > 1 # release and detach the head block empty!(head.data) d.head = head.next::DequeBlock{T} d.head.prev = d.head d.nblocks -= 1 end end d.len -= 1 return x end ##CHUNK 2 """ function Base.empty!(d::Deque{T}) where T # release all blocks except the head if d.nblocks > 1 cb::DequeBlock{T} = d.rear while cb != d.head empty!(cb.data) cb = cb.prev end end # clean the head block (but retain the block itself) reset!(d.head, 1) # reset queue fields d.nblocks = 1 d.len = 0 d.rear = d.head return d end ##CHUNK 3 if isempty(rear) rear.front = 1 rear.back = 0 end if rear.back < rear.capa @inbounds rear.data[rear.back += 1] = convert(T, x) else new_rear = rear_deque_block(T, d.blksize) new_rear.back = 1 new_rear.data[1] = convert(T, x) new_rear.prev = rear d.rear = rear.next = new_rear d.nblocks += 1 end d.len += 1 return d end """ ##CHUNK 4 """ push!(d::Deque{T}, x) where T Add an element to the back of deque `d`. """ function Base.push!(d::Deque{T}, x) where T rear = d.rear if isempty(rear) rear.front = 1 rear.back = 0 end if rear.back < rear.capa @inbounds rear.data[rear.back += 1] = convert(T, x) else new_rear = rear_deque_block(T, d.blksize) new_rear.back = 1 ##CHUNK 5 # clean the head block (but retain the block itself) reset!(d.head, 1) # reset queue fields d.nblocks = 1 d.len = 0 d.rear = d.head return d end """ push!(d::Deque{T}, x) where T Add an element to the back of deque `d`. """ function Base.push!(d::Deque{T}, x) where T rear = d.rear ##CHUNK 6 end end # Manipulation """ empty!(d::Deque{T}) where T Reset the deque `d`. """ function Base.empty!(d::Deque{T}) where T # release all blocks except the head if d.nblocks > 1 cb::DequeBlock{T} = d.rear while cb != d.head empty!(cb.data) cb = cb.prev end end ##CHUNK 7 Remove the element at the back of deque `d`. """ """ popfirst!(d::Deque{T}) where T Remove the element at the front of deque `d`. """ function Base.popfirst!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) head = d.head @assert head.back >= head.front @inbounds x = head.data[head.front] Base._unsetindex!(head.data, head.front) # see issue/884 head.front += 1 if head.back < head.front if d.nblocks > 1 # release and detach the head block ##CHUNK 8 new_rear.data[1] = convert(T, x) new_rear.prev = rear d.rear = rear.next = new_rear d.nblocks += 1 end d.len += 1 return d end """ pushfirst!(d::Deque{T}, x) where T Add an element to the front of deque `d`. """ function Base.pushfirst!(d::Deque{T}, x) where T head = d.head if isempty(head) n = head.capa head.front = n + 1 ##CHUNK 9 # block at the head of the train, elements towards the back head_deque_block(ty::Type{T}, n::Integer) where {T} = DequeBlock{T}(n, n+1) capacity(blk::DequeBlock) = blk.capa Base.length(blk::DequeBlock) = blk.back - blk.front + 1 Base.isempty(blk::DequeBlock) = blk.back < blk.front ishead(blk::DequeBlock) = blk.prev === blk isrear(blk::DequeBlock) = blk.next === blk # reset the block to empty, and position function reset!(blk::DequeBlock{T}, front::Integer) where T empty!(blk.data) resize!(blk.data, blk.capa) blk.front = front blk.back = front - 1 blk.prev = blk blk.next = blk end ##CHUNK 10 head.back = n end if head.front > 1 @inbounds head.data[head.front -= 1] = convert(T, x) else n::Int = d.blksize new_head = head_deque_block(T, n) new_head.front = n new_head.data[n] = convert(T, x) new_head.next = head d.head = head.prev = new_head d.nblocks += 1 end d.len += 1 return d end """ pop!(d::Deque{T}) where T
320
339
DataStructures.jl
19
function Base.popfirst!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) head = d.head @assert head.back >= head.front @inbounds x = head.data[head.front] Base._unsetindex!(head.data, head.front) # see issue/884 head.front += 1 if head.back < head.front if d.nblocks > 1 # release and detach the head block empty!(head.data) d.head = head.next::DequeBlock{T} d.head.prev = d.head d.nblocks -= 1 end end d.len -= 1 return x end
function Base.popfirst!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) head = d.head @assert head.back >= head.front @inbounds x = head.data[head.front] Base._unsetindex!(head.data, head.front) # see issue/884 head.front += 1 if head.back < head.front if d.nblocks > 1 # release and detach the head block empty!(head.data) d.head = head.next::DequeBlock{T} d.head.prev = d.head d.nblocks -= 1 end end d.len -= 1 return x end
[ 320, 339 ]
function Base.popfirst!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) head = d.head @assert head.back >= head.front @inbounds x = head.data[head.front] Base._unsetindex!(head.data, head.front) # see issue/884 head.front += 1 if head.back < head.front if d.nblocks > 1 # release and detach the head block empty!(head.data) d.head = head.next::DequeBlock{T} d.head.prev = d.head d.nblocks -= 1 end end d.len -= 1 return x end
function Base.popfirst!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) head = d.head @assert head.back >= head.front @inbounds x = head.data[head.front] Base._unsetindex!(head.data, head.front) # see issue/884 head.front += 1 if head.back < head.front if d.nblocks > 1 # release and detach the head block empty!(head.data) d.head = head.next::DequeBlock{T} d.head.prev = d.head d.nblocks -= 1 end end d.len -= 1 return x end
Base.popfirst!
320
339
src/deque.jl
#FILE: DataStructures.jl/src/circ_deque.jl ##CHUNK 1 D.n += 1 tmp = D.first - 1 D.first = ifelse(tmp < 1, D.capacity, tmp) @inbounds D.buffer[D.first] = v D end """ popfirst!(D::CircularDeque) Remove the element at the front. """ @inline Base.@propagate_inbounds function Base.popfirst!(D::CircularDeque) v = first(D) Base._unsetindex!(D.buffer, D.first) # see issue/884 D.n -= 1 tmp = D.first + 1 D.first = ifelse(tmp > D.capacity, 1, tmp) v end #CURRENT FILE: DataStructures.jl/src/deque.jl ##CHUNK 1 new_head.next = head d.head = head.prev = new_head d.nblocks += 1 end d.len += 1 return d end """ pop!(d::Deque{T}) where T Remove the element at the back of deque `d`. """ function Base.pop!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) rear = d.rear @assert rear.back >= rear.front @inbounds x = rear.data[rear.back] Base._unsetindex!(rear.data, rear.back) # see issue/884 ##CHUNK 2 """ function Base.empty!(d::Deque{T}) where T # release all blocks except the head if d.nblocks > 1 cb::DequeBlock{T} = d.rear while cb != d.head empty!(cb.data) cb = cb.prev end end # clean the head block (but retain the block itself) reset!(d.head, 1) # reset queue fields d.nblocks = 1 d.len = 0 d.rear = d.head return d end ##CHUNK 3 rear.back -= 1 if rear.back < rear.front if d.nblocks > 1 # release and detach the rear block empty!(rear.data) d.rear = rear.prev::DequeBlock{T} d.rear.next = d.rear d.nblocks -= 1 end end d.len -= 1 return x end """ popfirst!(d::Deque{T}) where T Remove the element at the front of deque `d`. """ ##CHUNK 4 Remove the element at the back of deque `d`. """ function Base.pop!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) rear = d.rear @assert rear.back >= rear.front @inbounds x = rear.data[rear.back] Base._unsetindex!(rear.data, rear.back) # see issue/884 rear.back -= 1 if rear.back < rear.front if d.nblocks > 1 # release and detach the rear block empty!(rear.data) d.rear = rear.prev::DequeBlock{T} d.rear.next = d.rear d.nblocks -= 1 end end ##CHUNK 5 head.back = n end if head.front > 1 @inbounds head.data[head.front -= 1] = convert(T, x) else n::Int = d.blksize new_head = head_deque_block(T, n) new_head.front = n new_head.data[n] = convert(T, x) new_head.next = head d.head = head.prev = new_head d.nblocks += 1 end d.len += 1 return d end """ pop!(d::Deque{T}) where T ##CHUNK 6 pushfirst!(d::Deque{T}, x) where T Add an element to the front of deque `d`. """ function Base.pushfirst!(d::Deque{T}, x) where T head = d.head if isempty(head) n = head.capa head.front = n + 1 head.back = n end if head.front > 1 @inbounds head.data[head.front -= 1] = convert(T, x) else n::Int = d.blksize new_head = head_deque_block(T, n) new_head.front = n new_head.data[n] = convert(T, x) ##CHUNK 7 end end # Manipulation """ empty!(d::Deque{T}) where T Reset the deque `d`. """ function Base.empty!(d::Deque{T}) where T # release all blocks except the head if d.nblocks > 1 cb::DequeBlock{T} = d.rear while cb != d.head empty!(cb.data) cb = cb.prev end end ##CHUNK 8 # clean the head block (but retain the block itself) reset!(d.head, 1) # reset queue fields d.nblocks = 1 d.len = 0 d.rear = d.head return d end """ push!(d::Deque{T}, x) where T Add an element to the back of deque `d`. """ function Base.push!(d::Deque{T}, x) where T rear = d.rear ##CHUNK 9 # block at the head of the train, elements towards the back head_deque_block(ty::Type{T}, n::Integer) where {T} = DequeBlock{T}(n, n+1) capacity(blk::DequeBlock) = blk.capa Base.length(blk::DequeBlock) = blk.back - blk.front + 1 Base.isempty(blk::DequeBlock) = blk.back < blk.front ishead(blk::DequeBlock) = blk.prev === blk isrear(blk::DequeBlock) = blk.next === blk # reset the block to empty, and position function reset!(blk::DequeBlock{T}, front::Integer) where T empty!(blk.data) resize!(blk.data, blk.capa) blk.front = front blk.back = front - 1 blk.prev = blk blk.next = blk end
15
27
DataStructures.jl
20
function DiBitVector(n::Integer, v::Integer) if Int(n) < 0 throw(ArgumentError("n ($n) must be greater than or equal to zero")) end if !(Int(v) in 0:3) throw(ArgumentError("v ($v) must be in 0:3")) end fv = (0x0000000000000000, 0x5555555555555555, 0xaaaaaaaaaaaaaaaa, 0xffffffffffffffff)[v + 1] vec = Vector{UInt64}(undef, cld(n, 32)) fill!(vec, fv) return new(vec, n % UInt64) end
function DiBitVector(n::Integer, v::Integer) if Int(n) < 0 throw(ArgumentError("n ($n) must be greater than or equal to zero")) end if !(Int(v) in 0:3) throw(ArgumentError("v ($v) must be in 0:3")) end fv = (0x0000000000000000, 0x5555555555555555, 0xaaaaaaaaaaaaaaaa, 0xffffffffffffffff)[v + 1] vec = Vector{UInt64}(undef, cld(n, 32)) fill!(vec, fv) return new(vec, n % UInt64) end
[ 15, 27 ]
function DiBitVector(n::Integer, v::Integer) if Int(n) < 0 throw(ArgumentError("n ($n) must be greater than or equal to zero")) end if !(Int(v) in 0:3) throw(ArgumentError("v ($v) must be in 0:3")) end fv = (0x0000000000000000, 0x5555555555555555, 0xaaaaaaaaaaaaaaaa, 0xffffffffffffffff)[v + 1] vec = Vector{UInt64}(undef, cld(n, 32)) fill!(vec, fv) return new(vec, n % UInt64) end
function DiBitVector(n::Integer, v::Integer) if Int(n) < 0 throw(ArgumentError("n ($n) must be greater than or equal to zero")) end if !(Int(v) in 0:3) throw(ArgumentError("v ($v) must be in 0:3")) end fv = (0x0000000000000000, 0x5555555555555555, 0xaaaaaaaaaaaaaaaa, 0xffffffffffffffff)[v + 1] vec = Vector{UInt64}(undef, cld(n, 32)) fill!(vec, fv) return new(vec, n % UInt64) end
DiBitVector
15
27
src/dibit_vector.jl
#FILE: DataStructures.jl/src/int_set.jl ##CHUNK 1 idx = n+1 if 1 <= idx <= length(s.bits) unsafe_getindex(s.bits, idx) != s.inverse else ifelse((idx <= 0) | (idx > typemax(Int)), false, s.inverse) end end function findnextidx(s::IntSet, i::Int, invert=false) if s.inverse ⊻ invert # i+1 could rollover causing a BoundsError in findnext/findnextnot nextidx = i == typemax(Int) ? 0 : something(findnextnot(s.bits, i+1), 0) # Extend indices beyond the length of the bits since it is inverted nextidx = nextidx == 0 ? max(i, length(s.bits))+1 : nextidx else nextidx = i == typemax(Int) ? 0 : something(findnext(s.bits, i+1), 0) end return nextidx end ##CHUNK 2 # An internal function for setting the inclusion bit for a given integer n >= 0 @inline function _setint!(s::IntSet, n::Integer, b::Bool) idx = n+1 if idx > length(s.bits) !b && return s # setting a bit to zero outside the set's bits is a no-op newlen = idx + idx>>1 # This operation may overflow; we want saturation _resize0!(s.bits, ifelse(newlen<0, typemax(Int), newlen)) end unsafe_setindex!(s.bits, b, idx) # Use @inbounds once available return s end # An internal function to resize a bitarray and ensure the newly allocated # elements are zeroed (will become unnecessary if this behavior changes) @inline function _resize0!(b::BitVector, newlen::Integer) len = length(b) resize!(b, newlen) len < newlen && @inbounds(b[len+1:newlen] .= false) # resize! gives dirty memory return b end ##CHUNK 3 # i+1 could rollover causing a BoundsError in findnext/findnextnot nextidx = i == typemax(Int) ? 0 : something(findnextnot(s.bits, i+1), 0) # Extend indices beyond the length of the bits since it is inverted nextidx = nextidx == 0 ? max(i, length(s.bits))+1 : nextidx else nextidx = i == typemax(Int) ? 0 : something(findnext(s.bits, i+1), 0) end return nextidx end Base.iterate(s::IntSet) = iterate(s, findnextidx(s, 0)) function Base.iterate(s::IntSet, i::Int, invert=false) i <= 0 && return nothing return (i-1, findnextidx(s, i, invert)) end # Nextnot iterates through elements *not* in the set nextnot(s::IntSet, i) = iterate(s, i, true) #FILE: DataStructures.jl/src/swiss_dict.jl ##CHUNK 1 @inline function _hashtag(u::Unsigned) #extracts tag between 0x02 and 0xff from lower bits, rotates tag bits to front u = u % UInt tag = u % UInt8 if UInt === UInt64 hi = ((u>>8) | (u<<56)) % Int else hi = ((u>>8) | (u<<24)) % Int end tag = tag > 1 ? tag : tag+0x02 return (hi, tag) end Base.@propagate_inbounds function _slotget(slots::Vector{_u8x16}, i::Int) @boundscheck 0 < i <= length(slots)*16 || throw(BoundsError(slots, 1 + (i-1)>>4)) GC.@preserve slots begin return unsafe_load(convert(Ptr{UInt8}, pointer(slots)), i) end end #CURRENT FILE: DataStructures.jl/src/dibit_vector.jl ##CHUNK 1 """ DiBitVector(n::Integer, v::Integer) Create a `DiBitVector` with `n` elements preinitialized to a value `v` from `0` to `3`, inclusive. A `DiBitVector` is a vector whose elements are two bits wide, allowing storage of integer values between 0 and 3. This structure is optimized for performance and memory savings for large numbers of elements. """ mutable struct DiBitVector <: AbstractVector{UInt8} data::Vector{UInt64} len::UInt end @inline Base.checkbounds(D::DiBitVector, n::Integer) = 0 < n ≤ length(D.data) << 5 || throw(BoundsError(D, n)) """ DiBitVector(n::Integer) ##CHUNK 2 Create a [`DiBitVector`](@ref) with `n` elements set to `0`. """ DiBitVector(n::Integer) = DiBitVector(n, 0) DiBitVector() = DiBitVector(0, 0) @inline Base.length(x::DiBitVector) = x.len % Int @inline Base.size(x::DiBitVector) = (length(x),) @inline index(n::Integer) = ((n-1) >>> 5) + 1 @inline offset(n::Integer) = ((UInt64(n)-1) << 1) & 63 @inline function Base.getindex(x::DiBitVector, i::Int) @boundscheck checkbounds(x, i) return UInt8((@inbounds x.data[index(i)] >>> offset(i)) & 3) end @inline function Base.unsafe_setindex!(x::DiBitVector, v::UInt64, i::Int) bits = @inbounds x.data[index(i)] bits &= ~(UInt64(3) << offset(i)) ##CHUNK 3 @inline offset(n::Integer) = ((UInt64(n)-1) << 1) & 63 @inline function Base.getindex(x::DiBitVector, i::Int) @boundscheck checkbounds(x, i) return UInt8((@inbounds x.data[index(i)] >>> offset(i)) & 3) end @inline function Base.unsafe_setindex!(x::DiBitVector, v::UInt64, i::Int) bits = @inbounds x.data[index(i)] bits &= ~(UInt64(3) << offset(i)) bits |= convert(UInt64, v) << offset(i) @inbounds x.data[index(i)] = bits end @inline function Base.setindex!(x::DiBitVector, v::Integer, i::Int) v & 3 == v || throw(DomainError("Can only contain 0:3 (tried $v)")) @boundscheck checkbounds(x, i) unsafe_setindex!(x, convert(UInt64, v), i) end ##CHUNK 4 bits |= convert(UInt64, v) << offset(i) @inbounds x.data[index(i)] = bits end @inline function Base.setindex!(x::DiBitVector, v::Integer, i::Int) v & 3 == v || throw(DomainError("Can only contain 0:3 (tried $v)")) @boundscheck checkbounds(x, i) unsafe_setindex!(x, convert(UInt64, v), i) end @inline function Base.push!(x::DiBitVector, v::Integer) len = length(x) len == UInt64(length(x.data)) << 5 && push!(x.data, zero(UInt64)) x.len = (len + 1) % UInt64 x[len+1] = convert(UInt64, v) return x end @inline function Base.pop!(x::DiBitVector) x.len == 0 && throw(ArgumentError("array must be non-empty")) ##CHUNK 5 @inline function Base.push!(x::DiBitVector, v::Integer) len = length(x) len == UInt64(length(x.data)) << 5 && push!(x.data, zero(UInt64)) x.len = (len + 1) % UInt64 x[len+1] = convert(UInt64, v) return x end @inline function Base.pop!(x::DiBitVector) x.len == 0 && throw(ArgumentError("array must be non-empty")) v = x[end] x.len = (x.len - 1) % UInt64 x.len == UInt64((length(x.data) -1)) << 5 && pop!(x.data) return v end @inline Base.zero(x::DiBitVector) = DiBitVector(x.len, 0) ##CHUNK 6 mutable struct DiBitVector <: AbstractVector{UInt8} data::Vector{UInt64} len::UInt end @inline Base.checkbounds(D::DiBitVector, n::Integer) = 0 < n ≤ length(D.data) << 5 || throw(BoundsError(D, n)) """ DiBitVector(n::Integer) Create a [`DiBitVector`](@ref) with `n` elements set to `0`. """ DiBitVector(n::Integer) = DiBitVector(n, 0) DiBitVector() = DiBitVector(0, 0) @inline Base.length(x::DiBitVector) = x.len % Int @inline Base.size(x::DiBitVector) = (length(x),) @inline index(n::Integer) = ((n-1) >>> 5) + 1
3
16
DataStructures.jl
21
function not_iterator_of_pairs(kv::T) where T # if the object is not iterable, return true, else check the eltype of the iteration Base.isiterable(T) || return true # else, check if we can check `eltype`: if Base.IteratorEltype(kv) isa Base.HasEltype typ = eltype(kv) if !(typ == Any) return !(typ <: Union{<: Tuple, <: Pair}) end end # we can't check eltype, or eltype is not useful, # so brute force it. return any(x->!isa(x, Union{Tuple,Pair}), kv) end
function not_iterator_of_pairs(kv::T) where T # if the object is not iterable, return true, else check the eltype of the iteration Base.isiterable(T) || return true # else, check if we can check `eltype`: if Base.IteratorEltype(kv) isa Base.HasEltype typ = eltype(kv) if !(typ == Any) return !(typ <: Union{<: Tuple, <: Pair}) end end # we can't check eltype, or eltype is not useful, # so brute force it. return any(x->!isa(x, Union{Tuple,Pair}), kv) end
[ 3, 16 ]
function not_iterator_of_pairs(kv::T) where T # if the object is not iterable, return true, else check the eltype of the iteration Base.isiterable(T) || return true # else, check if we can check `eltype`: if Base.IteratorEltype(kv) isa Base.HasEltype typ = eltype(kv) if !(typ == Any) return !(typ <: Union{<: Tuple, <: Pair}) end end # we can't check eltype, or eltype is not useful, # so brute force it. return any(x->!isa(x, Union{Tuple,Pair}), kv) end
function not_iterator_of_pairs(kv::T) where T # if the object is not iterable, return true, else check the eltype of the iteration Base.isiterable(T) || return true # else, check if we can check `eltype`: if Base.IteratorEltype(kv) isa Base.HasEltype typ = eltype(kv) if !(typ == Any) return !(typ <: Union{<: Tuple, <: Pair}) end end # we can't check eltype, or eltype is not useful, # so brute force it. return any(x->!isa(x, Union{Tuple,Pair}), kv) end
not_iterator_of_pairs
3
16
src/dict_support.jl
#FILE: DataStructures.jl/test/test_priority_queue.jl ##CHUNK 1 x::T end Base.IteratorEltype(::EltypeUnknownIterator) = Base.EltypeUnknown() Base.iterate(i::EltypeUnknownIterator) = Base.iterate(i.x) Base.iterate(i::EltypeUnknownIterator, state) = Base.iterate(i.x, state) Base.IteratorSize(i::EltypeUnknownIterator) = Base.IteratorSize(i.x) Base.length(i::EltypeUnknownIterator) = Base.length(i.x) Base.size(i::EltypeUnknownIterator) = Base.size(i.x) @test_nowarn PriorityQueue(Dict(zip(1:5, 2:6))) @test_nowarn PriorityQueue(EltypeUnknownIterator(Dict(zip(1:5, 2:6)))) @test_throws ArgumentError PriorityQueue(EltypeUnknownIterator(['a'])) end @testset "Eltype any" begin struct EltypeAnyIterator{T} x::T end Base.IteratorEltype(::EltypeAnyIterator) = Base.HasEltype() Base.eltype(::EltypeAnyIterator) = Any ##CHUNK 2 @test_nowarn PriorityQueue(EltypeUnknownIterator(Dict(zip(1:5, 2:6)))) @test_throws ArgumentError PriorityQueue(EltypeUnknownIterator(['a'])) end @testset "Eltype any" begin struct EltypeAnyIterator{T} x::T end Base.IteratorEltype(::EltypeAnyIterator) = Base.HasEltype() Base.eltype(::EltypeAnyIterator) = Any Base.iterate(i::EltypeAnyIterator) = Base.iterate(i.x) Base.iterate(i::EltypeAnyIterator, state) = Base.iterate(i.x, state) Base.IteratorSize(i::EltypeAnyIterator) = Base.IteratorSize(i.x) Base.length(i::EltypeAnyIterator) = Base.length(i.x) Base.size(i::EltypeAnyIterator) = Base.size(i.x) @test_nowarn PriorityQueue(EltypeAnyIterator(Dict(zip(1:5, 2:6)))) @test_throws ArgumentError PriorityQueue(EltypeAnyIterator(['a'])) end end #FILE: DataStructures.jl/src/sorted_multi_dict.jl ##CHUNK 1 """ Base.eltype(sc) Returns the (key,value) type (a 2-entry pair, i.e., `Pair{K,V}`) for SortedDict and SortedMultiDict. Returns the key type for SortedSet. This function may also be applied to the type itself. Time: O(1) """ @inline Base.eltype(m::SortedMultiDict{K,D,Ord}) where {K,D,Ord <: Ordering} = Pair{K,D} @inline Base.eltype(::Type{SortedMultiDict{K,D,Ord}}) where {K,D,Ord <: Ordering} = Pair{K,D} function in_(k_, d_, m::SortedMultiDict) k = convert(keytype(m), k_) d = convert(valtype(m), d_) i1 = findkeyless(m.bt, k) i2,exactfound = findkey(m.bt,k) !exactfound && return false ord = m.bt.ord while true i1 = nextloc0(m.bt, i1) ##CHUNK 2 SortedMultiDict{K,D, typeof(o)}(o, kv) SortedMultiDict{K,D}(o::Ordering, kv) where {K,D} = SortedMultiDict{K,D, typeof(o)}(o, kv) # TODO: figure out how to infer type without three copies function SortedMultiDict(o::Ordering, kv) c = collect(kv) if eltype(c) <: Pair c2 = collect((t.first, t.second) for t in c) elseif eltype(c) <: Tuple c2 = collect((t[1], t[2]) for t in c) else throw(ArgumentError("In SortedMultiDict(o,kv), kv should contain either pairs or 2-tuples")) end SortedMultiDict{eltype(c2).parameters[1], eltype(c2).parameters[2], typeof(o)}(o, c2) end """ SortedMultiDict{K,V}(::Val{true}, iterable) where {K,V} #FILE: DataStructures.jl/src/ordered_robin_dict.jl ##CHUNK 1 function OrderedRobinDict(kv) try return dict_with_eltype((K, V) -> OrderedRobinDict{K, V}, kv, eltype(kv)) catch e if !isiterable(typeof(kv)) || !all(x -> isa(x, Union{Tuple,Pair}), kv) !all(x->isa(x,Union{Tuple,Pair}),kv) throw(ArgumentError("OrderedRobinDict(kv): kv needs to be an iterator of tuples or pairs")) else rethrow(e) end end end Base.empty(d::OrderedRobinDict{K,V}) where {K,V} = OrderedRobinDict{K,V}() Base.length(d::Union{RobinDict, OrderedRobinDict}) = d.count Base.isempty(d::Union{RobinDict, OrderedRobinDict}) = (length(d) == 0) """ empty!(collection) -> collection ##CHUNK 2 OrderedRobinDict() = OrderedRobinDict{Any,Any}() OrderedRobinDict(kv::Tuple{}) = OrderedRobinDict() Base.copy(d::OrderedRobinDict) = OrderedRobinDict(d) Base.empty(d::OrderedRobinDict, ::Type{K}, ::Type{V}) where {K, V} = OrderedRobinDict{K, V}() OrderedRobinDict(ps::Pair{K,V}...) where {K,V} = OrderedRobinDict{K,V}(ps) OrderedRobinDict(ps::Pair...) = OrderedRobinDict(ps) OrderedRobinDict(d::AbstractDict{K, V}) where {K, V} = OrderedRobinDict{K, V}(d) function OrderedRobinDict(kv) try return dict_with_eltype((K, V) -> OrderedRobinDict{K, V}, kv, eltype(kv)) catch e if !isiterable(typeof(kv)) || !all(x -> isa(x, Union{Tuple,Pair}), kv) !all(x->isa(x,Union{Tuple,Pair}),kv) throw(ArgumentError("OrderedRobinDict(kv): kv needs to be an iterator of tuples or pairs")) else rethrow(e) end #FILE: DataStructures.jl/src/sorted_set.jl ##CHUNK 1 """ Base.in(k,m::SortedSet) Return `true` iff element `k` is in sorted set `m` is a sorted set. Unlike the `in` function for `Set`, this routine will thrown an error if `k` is not convertible to `eltype(m)`. Time: O(*c* log *n*) """ @inline function Base.in(k_, m::SortedSet) i, exactfound = findkey(m.bt, convert(keytype(m),k_)) return exactfound end @inline Base.keytype(m::SortedSet{K,Ord}) where {K,Ord <: Ordering} = K @inline Base.keytype(::Type{SortedSet{K,Ord}}) where {K,Ord <: Ordering} = K #FILE: DataStructures.jl/src/priorityqueue.jl ##CHUNK 1 PriorityQueue(o1::Ordering, o2::Ordering) = throw(ArgumentError("PriorityQueue with two parameters must be called with an Ordering and an iterable of pairs")) PriorityQueue(kv, o::Ordering=Forward) = PriorityQueue(o, kv) function PriorityQueue(o::Ordering, kv) try _priority_queue_with_eltype(o, kv, eltype(kv)) catch e if not_iterator_of_pairs(kv) throw(ArgumentError("PriorityQueue(kv): kv needs to be an iterator of tuples or pairs")) else rethrow(e) end end end _priority_queue_with_eltype(o::Ord, ps, ::Type{Pair{K,V}} ) where {K,V,Ord} = PriorityQueue{ K, V,Ord}(o, ps) _priority_queue_with_eltype(o::Ord, kv, ::Type{Tuple{K,V}}) where {K,V,Ord} = PriorityQueue{ K, V,Ord}(o, kv) _priority_queue_with_eltype(o::Ord, ps, ::Type{Pair{K}} ) where {K, Ord} = PriorityQueue{ K,Any,Ord}(o, ps) _priority_queue_with_eltype(o::Ord, kv, ::Type ) where { Ord} = PriorityQueue{Any,Any,Ord}(o, kv) ## TODO: It seems impossible (or at least very challenging) to create the eltype below. #FILE: DataStructures.jl/src/swiss_dict.jl ##CHUNK 1 SwissDict(ps::Pair...) = SwissDict(ps) function SwissDict(kv) try dict_with_eltype((K, V) -> SwissDict{K, V}, kv, eltype(kv)) catch e if !isiterable(typeof(kv)) || !all(x->isa(x,Union{Tuple,Pair}),kv) throw(ArgumentError("SwissDict(kv): kv needs to be an iterator of tuples or pairs")) else rethrow(e) end end end # SIMD utilities @inline _expand16(u::UInt8) = ntuple(i->VecElement(u), Val(16)) _blsr(i::UInt32)= i & (i-Int32(1)) @inline _vcmp_eq(u::_u8x16, v::_u8x16) = Core.Intrinsics.llvmcall((""" %cmp = icmp eq <16 x i8> %0, %1 #FILE: DataStructures.jl/src/sorted_container_iteration.jl ##CHUNK 1 Base.eltype(::Type{IterableObject{C, R, KeysIter, NoTokens, D}}) where {C, R, D} = keytype(C) Base.eltype(::Type{IterableObject{C, R, ValsIter, NoTokens, D}}) where {C, R, D} = valtype(C) Base.eltype(::Type{IterableObject{C, R, KeysValsIter, NoTokens, D}}) where {C, R, D} = eltype(C) Base.eltype(::ItObj) where {ItObj <: IterableObject} = eltype(ItObj) get_item(m::SortedContainer, state::SAIterationState) = get_item(base_iterable_object(m), state) function next(ito::IterableObject{C, R, KV, T, ForwardIter}, state::SAIterationState) where {C, R, KV, T} sn = state.next (sn < 3 || !(sn in ito.m.bt.useddatacells)) && throw(BoundsError()) SAIterationState(nextloc0(ito.m.bt, sn), state.final) end #CURRENT FILE: DataStructures.jl/src/dict_support.jl
103
117
DataStructures.jl
22
function root_union!(s::IntDisjointSet{T}, x::T, y::T) where {T<:Integer} parents = s.parents rks = s.ranks @inbounds xrank = rks[x] @inbounds yrank = rks[y] if xrank < yrank x, y = y, x elseif xrank == yrank rks[x] += one(T) end @inbounds parents[y] = x s.ngroups -= one(T) return x end
function root_union!(s::IntDisjointSet{T}, x::T, y::T) where {T<:Integer} parents = s.parents rks = s.ranks @inbounds xrank = rks[x] @inbounds yrank = rks[y] if xrank < yrank x, y = y, x elseif xrank == yrank rks[x] += one(T) end @inbounds parents[y] = x s.ngroups -= one(T) return x end
[ 103, 117 ]
function root_union!(s::IntDisjointSet{T}, x::T, y::T) where {T<:Integer} parents = s.parents rks = s.ranks @inbounds xrank = rks[x] @inbounds yrank = rks[y] if xrank < yrank x, y = y, x elseif xrank == yrank rks[x] += one(T) end @inbounds parents[y] = x s.ngroups -= one(T) return x end
function root_union!(s::IntDisjointSet{T}, x::T, y::T) where {T<:Integer} parents = s.parents rks = s.ranks @inbounds xrank = rks[x] @inbounds yrank = rks[y] if xrank < yrank x, y = y, x elseif xrank == yrank rks[x] += one(T) end @inbounds parents[y] = x s.ngroups -= one(T) return x end
root_union!
103
117
src/disjoint_set.jl
#CURRENT FILE: DataStructures.jl/src/disjoint_set.jl ##CHUNK 1 Assume `x ≠ y` (unsafe). """ """ push!(s::IntDisjointSet{T}) Make a new subset with an automatically chosen new element `x`. Returns the new element. Throw an `ArgumentError` if the capacity of the set would be exceeded. """ function Base.push!(s::IntDisjointSet{T}) where {T<:Integer} l = length(s) l < typemax(T) || throw(ArgumentError(_intdisjointset_bounds_err_msg(T))) x = l + one(T) push!(s.parents, x) push!(s.ranks, zero(T)) s.ngroups += one(T) return x end ##CHUNK 2 function Base.push!(s::IntDisjointSet{T}) where {T<:Integer} l = length(s) l < typemax(T) || throw(ArgumentError(_intdisjointset_bounds_err_msg(T))) x = l + one(T) push!(s.parents, x) push!(s.ranks, zero(T)) s.ngroups += one(T) return x end """ DisjointSet{T}(xs) A forest of disjoint sets of arbitrary value type `T`. It is a wrapper of `IntDisjointSet{Int}`, which uses a dictionary to map the input value to an internal index. """ mutable struct DisjointSet{T} <: AbstractSet{T} intmap::Dict{T,Int} ##CHUNK 3 @inbounds p = parents[x] @inbounds if parents[p] != p parents[x] = p = _find_root_impl!(parents, p) end return p end """ find_root!(s::IntDisjointSet{T}, x::T) Find the root element of the subset that contains an member `x`. Path compression happens here. """ find_root!(s::IntDisjointSet{T}, x::T) where {T<:Integer} = find_root_impl!(s.parents, x) """ in_same_set(s::IntDisjointSet{T}, x::T, y::T) Returns `true` if `x` and `y` belong to the same subset in `s`, and `false` otherwise. """ ##CHUNK 4 """ num_groups(s::IntDisjointSet) Get a number of groups. """ num_groups(s::IntDisjointSet) = s.ngroups Base.eltype(::Type{IntDisjointSet{T}}) where {T<:Integer} = T # find the root element of the subset that contains x # path compression is implemented here function find_root_impl!(parents::Vector{T}, x::Integer) where {T<:Integer} p = parents[x] @inbounds if parents[p] != p parents[x] = p = _find_root_impl!(parents, p) end return p end # unsafe version of the above function _find_root_impl!(parents::Vector{T}, x::Integer) where {T<:Integer} ##CHUNK 5 in_same_set(s::IntDisjointSet{T}, x::T, y::T) where {T<:Integer} = find_root!(s, x) == find_root!(s, y) """ union!(s::IntDisjointSet{T}, x::T, y::T) Merge the subset containing `x` and that containing `y` into one and return the root of the new set. """ function Base.union!(s::IntDisjointSet{T}, x::T, y::T) where {T<:Integer} parents = s.parents xroot = find_root_impl!(parents, x) yroot = find_root_impl!(parents, y) return xroot != yroot ? root_union!(s, xroot, yroot) : xroot end """ root_union!(s::IntDisjointSet{T}, x::T, y::T) Form a new set that is the union of the two sets whose root elements are `x` and `y` and return the root of the new set. ##CHUNK 6 function find_root_impl!(parents::Vector{T}, x::Integer) where {T<:Integer} p = parents[x] @inbounds if parents[p] != p parents[x] = p = _find_root_impl!(parents, p) end return p end # unsafe version of the above function _find_root_impl!(parents::Vector{T}, x::Integer) where {T<:Integer} @inbounds p = parents[x] @inbounds if parents[p] != p parents[x] = p = _find_root_impl!(parents, p) end return p end """ find_root!(s::IntDisjointSet{T}, x::T) ##CHUNK 7 """ root_union!(s::DisjointSet{T}, x::T, y::T) Form a new set that is the union of the two sets whose root elements are `x` and `y` and return the root of the new set. Assume `x ≠ y` (unsafe). """ root_union!(s::DisjointSet{T}, x::T, y::T) where {T} = s.revmap[root_union!(s.internal, s.intmap[x], s.intmap[y])] """ push!(s::DisjointSet{T}, x::T) Make a new subset containing `x` if any existing subset of `s` does not contain `x`. """ function Base.push!(s::DisjointSet{T}, x::T) where T haskey(s.intmap, x) && return x id = push!(s.internal) s.intmap[x] = id push!(s.revmap, x) # Note, this assumes invariant: length(s.revmap) == id return x ##CHUNK 8 A forest of disjoint sets of integers, which is a data structure (also called a union–find data structure or merge–find set) that tracks a set of elements partitioned into a number of disjoint (non-overlapping) subsets. """ mutable struct IntDisjointSet{T<:Integer} parents::Vector{T} ranks::Vector{T} ngroups::T end IntDisjointSet(n::T) where {T<:Integer} = IntDisjointSet{T}(collect(Base.OneTo(n)), zeros(T, n), n) IntDisjointSet{T}(n::Integer) where {T<:Integer} = IntDisjointSet{T}(collect(Base.OneTo(T(n))), zeros(T, T(n)), T(n)) Base.length(s::IntDisjointSet) = length(s.parents) function Base.sizehint!(s::IntDisjointSet, n::Integer) sizehint!(s.parents, n) sizehint!(s.ranks, n) return s end ##CHUNK 9 Find the root element of the subset that contains an member `x`. Path compression happens here. """ find_root!(s::IntDisjointSet{T}, x::T) where {T<:Integer} = find_root_impl!(s.parents, x) """ in_same_set(s::IntDisjointSet{T}, x::T, y::T) Returns `true` if `x` and `y` belong to the same subset in `s`, and `false` otherwise. """ in_same_set(s::IntDisjointSet{T}, x::T, y::T) where {T<:Integer} = find_root!(s, x) == find_root!(s, y) """ union!(s::IntDisjointSet{T}, x::T, y::T) Merge the subset containing `x` and that containing `y` into one and return the root of the new set. """ function Base.union!(s::IntDisjointSet{T}, x::T, y::T) where {T<:Integer} parents = s.parents ##CHUNK 10 xroot = find_root_impl!(parents, x) yroot = find_root_impl!(parents, y) return xroot != yroot ? root_union!(s, xroot, yroot) : xroot end """ root_union!(s::IntDisjointSet{T}, x::T, y::T) Form a new set that is the union of the two sets whose root elements are `x` and `y` and return the root of the new set. Assume `x ≠ y` (unsafe). """ """ push!(s::IntDisjointSet{T}) Make a new subset with an automatically chosen new element `x`. Returns the new element. Throw an `ArgumentError` if the capacity of the set would be exceeded. """
150
162
DataStructures.jl
23
function DisjointSet{T}(xs) where T # xs must be iterable imap = Dict{T,Int}() rmap = Vector{T}() n = length(xs)::Int sizehint!(imap, n) sizehint!(rmap, n) id = 0 for x in xs imap[x] = (id += 1) push!(rmap,x) end return new{T}(imap, rmap, IntDisjointSet(n)) end
function DisjointSet{T}(xs) where T # xs must be iterable imap = Dict{T,Int}() rmap = Vector{T}() n = length(xs)::Int sizehint!(imap, n) sizehint!(rmap, n) id = 0 for x in xs imap[x] = (id += 1) push!(rmap,x) end return new{T}(imap, rmap, IntDisjointSet(n)) end
[ 150, 162 ]
function DisjointSet{T}(xs) where T # xs must be iterable imap = Dict{T,Int}() rmap = Vector{T}() n = length(xs)::Int sizehint!(imap, n) sizehint!(rmap, n) id = 0 for x in xs imap[x] = (id += 1) push!(rmap,x) end return new{T}(imap, rmap, IntDisjointSet(n)) end
function DisjointSet{T}(xs) where T # xs must be iterable imap = Dict{T,Int}() rmap = Vector{T}() n = length(xs)::Int sizehint!(imap, n) sizehint!(rmap, n) id = 0 for x in xs imap[x] = (id += 1) push!(rmap,x) end return new{T}(imap, rmap, IntDisjointSet(n)) end
DisjointSet{T}
150
162
src/disjoint_set.jl
#CURRENT FILE: DataStructures.jl/src/disjoint_set.jl ##CHUNK 1 push!(s.ranks, zero(T)) s.ngroups += one(T) return x end """ DisjointSet{T}(xs) A forest of disjoint sets of arbitrary value type `T`. It is a wrapper of `IntDisjointSet{Int}`, which uses a dictionary to map the input value to an internal index. """ mutable struct DisjointSet{T} <: AbstractSet{T} intmap::Dict{T,Int} revmap::Vector{T} internal::IntDisjointSet{Int} DisjointSet{T}() where T = new{T}(Dict{T,Int}(), Vector{T}(), IntDisjointSet(0)) end ##CHUNK 2 """ push!(s::DisjointSet{T}, x::T) Make a new subset containing `x` if any existing subset of `s` does not contain `x`. """ function Base.push!(s::DisjointSet{T}, x::T) where T haskey(s.intmap, x) && return x id = push!(s.internal) s.intmap[x] = id push!(s.revmap, x) # Note, this assumes invariant: length(s.revmap) == id return x end ##CHUNK 3 It is a wrapper of `IntDisjointSet{Int}`, which uses a dictionary to map the input value to an internal index. """ mutable struct DisjointSet{T} <: AbstractSet{T} intmap::Dict{T,Int} revmap::Vector{T} internal::IntDisjointSet{Int} DisjointSet{T}() where T = new{T}(Dict{T,Int}(), Vector{T}(), IntDisjointSet(0)) end DisjointSet() = DisjointSet{Any}() DisjointSet(xs) = _DisjointSet(xs, Base.IteratorEltype(xs)) _DisjointSet(xs, ::Base.HasEltype) = DisjointSet{eltype(xs)}(xs) function _DisjointSet(xs, ::Base.EltypeUnknown) T = Base.@default_eltype(xs) (isconcretetype(T) || T === Union{}) || return Base.grow_to!(DisjointSet{T}(), xs) return DisjointSet{T}(xs) end ##CHUNK 4 # Disjoint-Set ############################################################ # # A forest of disjoint sets of integers # # Since each element is an integer, we can use arrays # instead of dictionary (for efficiency) # # Disjoint sets over other key types can be implemented # based on an IntDisjointSet through a map from the key # to an integer index # ############################################################ _intdisjointset_bounds_err_msg(T) = "the maximum number of elements in IntDisjointSet{$T} is $(typemax(T))" """ IntDisjointSet{T<:Integer}(n::Integer) ##CHUNK 5 elseif xrank == yrank rks[x] += one(T) end @inbounds parents[y] = x s.ngroups -= one(T) return x end """ push!(s::IntDisjointSet{T}) Make a new subset with an automatically chosen new element `x`. Returns the new element. Throw an `ArgumentError` if the capacity of the set would be exceeded. """ function Base.push!(s::IntDisjointSet{T}) where {T<:Integer} l = length(s) l < typemax(T) || throw(ArgumentError(_intdisjointset_bounds_err_msg(T))) x = l + one(T) push!(s.parents, x) ##CHUNK 6 # based on an IntDisjointSet through a map from the key # to an integer index # ############################################################ _intdisjointset_bounds_err_msg(T) = "the maximum number of elements in IntDisjointSet{$T} is $(typemax(T))" """ IntDisjointSet{T<:Integer}(n::Integer) A forest of disjoint sets of integers, which is a data structure (also called a union–find data structure or merge–find set) that tracks a set of elements partitioned into a number of disjoint (non-overlapping) subsets. """ mutable struct IntDisjointSet{T<:Integer} parents::Vector{T} ranks::Vector{T} ngroups::T end ##CHUNK 7 DisjointSet() = DisjointSet{Any}() DisjointSet(xs) = _DisjointSet(xs, Base.IteratorEltype(xs)) _DisjointSet(xs, ::Base.HasEltype) = DisjointSet{eltype(xs)}(xs) function _DisjointSet(xs, ::Base.EltypeUnknown) T = Base.@default_eltype(xs) (isconcretetype(T) || T === Union{}) || return Base.grow_to!(DisjointSet{T}(), xs) return DisjointSet{T}(xs) end Base.iterate(s::DisjointSet) = iterate(s.revmap) Base.iterate(s::DisjointSet, i) = iterate(s.revmap, i) Base.length(s::DisjointSet) = length(s.internal) """ num_groups(s::DisjointSet) Get a number of groups. """ ##CHUNK 8 Base.iterate(s::DisjointSet) = iterate(s.revmap) Base.iterate(s::DisjointSet, i) = iterate(s.revmap, i) Base.length(s::DisjointSet) = length(s.internal) """ num_groups(s::DisjointSet) Get a number of groups. """ num_groups(s::DisjointSet) = num_groups(s.internal) Base.eltype(::Type{DisjointSet{T}}) where T = T Base.empty(s::DisjointSet{T}, ::Type{U}=T) where {T,U} = DisjointSet{U}() function Base.sizehint!(s::DisjointSet, n::Integer) sizehint!(s.intmap, n) sizehint!(s.revmap, n) sizehint!(s.internal, n) return s end ##CHUNK 9 Make a new subset with an automatically chosen new element `x`. Returns the new element. Throw an `ArgumentError` if the capacity of the set would be exceeded. """ function Base.push!(s::IntDisjointSet{T}) where {T<:Integer} l = length(s) l < typemax(T) || throw(ArgumentError(_intdisjointset_bounds_err_msg(T))) x = l + one(T) push!(s.parents, x) push!(s.ranks, zero(T)) s.ngroups += one(T) return x end """ DisjointSet{T}(xs) A forest of disjoint sets of arbitrary value type `T`. ##CHUNK 10 A forest of disjoint sets of integers, which is a data structure (also called a union–find data structure or merge–find set) that tracks a set of elements partitioned into a number of disjoint (non-overlapping) subsets. """ mutable struct IntDisjointSet{T<:Integer} parents::Vector{T} ranks::Vector{T} ngroups::T end IntDisjointSet(n::T) where {T<:Integer} = IntDisjointSet{T}(collect(Base.OneTo(n)), zeros(T, n), n) IntDisjointSet{T}(n::Integer) where {T<:Integer} = IntDisjointSet{T}(collect(Base.OneTo(T(n))), zeros(T, T(n)), T(n)) Base.length(s::IntDisjointSet) = length(s.parents) function Base.sizehint!(s::IntDisjointSet, n::Integer) sizehint!(s.parents, n) sizehint!(s.ranks, n) return s end
111
131
DataStructures.jl
24
function nextreme(ord::Base.Ordering, n::Int, arr::AbstractVector{T}) where T if n <= 0 return T[] # sort(arr)[1:n] returns [] for n <= 0 elseif n >= length(arr) return sort(arr, order = ord) end rev = Base.ReverseOrdering(ord) buffer = heapify(arr[1:n], rev) for i = n + 1 : length(arr) @inbounds xi = arr[i] if Base.lt(rev, buffer[1], xi) buffer[1] = xi percolate_down!(buffer, 1, rev) end end return sort!(buffer, order = ord) end
function nextreme(ord::Base.Ordering, n::Int, arr::AbstractVector{T}) where T if n <= 0 return T[] # sort(arr)[1:n] returns [] for n <= 0 elseif n >= length(arr) return sort(arr, order = ord) end rev = Base.ReverseOrdering(ord) buffer = heapify(arr[1:n], rev) for i = n + 1 : length(arr) @inbounds xi = arr[i] if Base.lt(rev, buffer[1], xi) buffer[1] = xi percolate_down!(buffer, 1, rev) end end return sort!(buffer, order = ord) end
[ 111, 131 ]
function nextreme(ord::Base.Ordering, n::Int, arr::AbstractVector{T}) where T if n <= 0 return T[] # sort(arr)[1:n] returns [] for n <= 0 elseif n >= length(arr) return sort(arr, order = ord) end rev = Base.ReverseOrdering(ord) buffer = heapify(arr[1:n], rev) for i = n + 1 : length(arr) @inbounds xi = arr[i] if Base.lt(rev, buffer[1], xi) buffer[1] = xi percolate_down!(buffer, 1, rev) end end return sort!(buffer, order = ord) end
function nextreme(ord::Base.Ordering, n::Int, arr::AbstractVector{T}) where T if n <= 0 return T[] # sort(arr)[1:n] returns [] for n <= 0 elseif n >= length(arr) return sort(arr, order = ord) end rev = Base.ReverseOrdering(ord) buffer = heapify(arr[1:n], rev) for i = n + 1 : length(arr) @inbounds xi = arr[i] if Base.lt(rev, buffer[1], xi) buffer[1] = xi percolate_down!(buffer, 1, rev) end end return sort!(buffer, order = ord) end
nextreme
111
131
src/heaps.jl
#FILE: DataStructures.jl/src/heaps/arrays_as_heaps.jl ##CHUNK 1 j = r > len || lt(o, xs[l], xs[r]) ? l : r lt(o, xs[j], x) || break xs[i] = xs[j] i = j end xs[i] = x end percolate_down!(xs::AbstractArray, i::Integer, o::Ordering, len::Integer=length(xs)) = percolate_down!(xs, i, xs[i], o, len) # Binary min-heap percolate up. function percolate_up!(xs::AbstractArray, i::Integer, x=xs[i], o::Ordering=Forward) @inbounds while (j = heapparent(i)) >= 1 lt(o, x, xs[j]) || break xs[i] = xs[j] i = j end xs[i] = x end ##CHUNK 2 # Binary heap indexing heapleft(i::Integer) = 2i heapright(i::Integer) = 2i + 1 heapparent(i::Integer) = div(i, 2) # Binary min-heap percolate down. function percolate_down!(xs::AbstractArray, i::Integer, x=xs[i], o::Ordering=Forward, len::Integer=length(xs)) @inbounds while (l = heapleft(i)) <= len r = heapright(i) j = r > len || lt(o, xs[l], xs[r]) ? l : r lt(o, xs[j], x) || break xs[i] = xs[j] i = j end xs[i] = x end percolate_down!(xs::AbstractArray, i::Integer, o::Ordering, len::Integer=length(xs)) = percolate_down!(xs, i, xs[i], o, len) ##CHUNK 3 # Binary min-heap percolate up. function percolate_up!(xs::AbstractArray, i::Integer, x=xs[i], o::Ordering=Forward) @inbounds while (j = heapparent(i)) >= 1 lt(o, x, xs[j]) || break xs[i] = xs[j] i = j end xs[i] = x end @inline percolate_up!(xs::AbstractArray, i::Integer, o::Ordering) = percolate_up!(xs, i, xs[i], o) """ heappop!(v, [ord]) Given a binary heap-ordered array, remove and return the lowest ordered element. For efficiency, this function does not check that the array is indeed heap-ordered. """ function heappop!(xs::AbstractArray, o::Ordering=Forward) ##CHUNK 4 @inline percolate_up!(xs::AbstractArray, i::Integer, o::Ordering) = percolate_up!(xs, i, xs[i], o) """ heappop!(v, [ord]) Given a binary heap-ordered array, remove and return the lowest ordered element. For efficiency, this function does not check that the array is indeed heap-ordered. """ function heappop!(xs::AbstractArray, o::Ordering=Forward) x = xs[1] y = pop!(xs) if !isempty(xs) percolate_down!(xs, 1, y, o) end return x end """ heappush!(v, x, [ord]) ##CHUNK 5 # This contains code that was formerly a part of Julia. License is MIT: http://julialang.org/license using Base.Order: Forward, Ordering, lt const DefaultReverseOrdering = Base.ReverseOrdering{Base.ForwardOrdering} # Heap operations on flat arrays # ------------------------------ # Binary heap indexing heapleft(i::Integer) = 2i heapright(i::Integer) = 2i + 1 heapparent(i::Integer) = div(i, 2) # Binary min-heap percolate down. function percolate_down!(xs::AbstractArray, i::Integer, x=xs[i], o::Ordering=Forward, len::Integer=length(xs)) @inbounds while (l = heapleft(i)) <= len r = heapright(i) #CURRENT FILE: DataStructures.jl/src/heaps.jl ##CHUNK 1 Note that for simple heaps (not mutable or minmax) sorting the internal array of elements in-place is faster. """ function extract_all_rev!(h::AbstractHeap{VT}) where VT n = length(h) r = Vector{VT}(undef, n) for i in 1 : n r[n + 1 - i] = pop!(h) end return r end # Array functions using heaps """ nextreme(ord, n, arr) return an array of the first `n` values of `arr` sorted by `ord`. """ ##CHUNK 2 """ nlargest(n, arr; kw...) Return the `n` largest elements of the array `arr`. Equivalent to: sort(arr, kw..., rev=true)[1:min(n, end)] Note that if `arr` contains floats and is free of NaN values, then the following alternative may be used to achieve 2x performance: DataStructures.nextreme(DataStructures.FasterReverse(), n, arr) This faster version is equivalent to: sort(arr, lt = >)[1:min(n, end)] """ function nlargest(n::Int, arr::AbstractVector; lt=isless, by=identity) order = Base.ReverseOrdering(Base.ord(lt, by, nothing)) ##CHUNK 3 for i in 1 : n r[i] = pop!(h) end return r end """ extract_all_rev!(h) Return an array of heap elements in reverse sorted order (heap head at last index). Note that for simple heaps (not mutable or minmax) sorting the internal array of elements in-place is faster. """ function extract_all_rev!(h::AbstractHeap{VT}) where VT n = length(h) r = Vector{VT}(undef, n) for i in 1 : n r[n + 1 - i] = pop!(h) end ##CHUNK 4 then the following alternative may be used to achieve 2x performance: DataStructures.nextreme(DataStructures.FasterReverse(), n, arr) This faster version is equivalent to: sort(arr, lt = >)[1:min(n, end)] """ function nlargest(n::Int, arr::AbstractVector; lt=isless, by=identity) order = Base.ReverseOrdering(Base.ord(lt, by, nothing)) return nextreme(order, n, arr) end """ nsmallest(n, arr; kw...) Return the `n` smallest elements of the array `arr`. Equivalent to: sort(arr; kw...)[1:min(n, end)] ##CHUNK 5 Note that if `arr` contains floats and is free of NaN values, then the following alternative may be used to achieve 2x performance: DataStructures.nextreme(DataStructures.FasterForward(), n, arr) This faster version is equivalent to: sort(arr, lt = <)[1:min(n, end)] """ function nsmallest(n::Int, arr::AbstractVector; lt=isless, by=identity) order = Base.ord(lt, by, nothing) return nextreme(order, n, arr) end
159
169
DataStructures.jl
25
function findnextidx(s::IntSet, i::Int, invert=false) if s.inverse ⊻ invert # i+1 could rollover causing a BoundsError in findnext/findnextnot nextidx = i == typemax(Int) ? 0 : something(findnextnot(s.bits, i+1), 0) # Extend indices beyond the length of the bits since it is inverted nextidx = nextidx == 0 ? max(i, length(s.bits))+1 : nextidx else nextidx = i == typemax(Int) ? 0 : something(findnext(s.bits, i+1), 0) end return nextidx end
function findnextidx(s::IntSet, i::Int, invert=false) if s.inverse ⊻ invert # i+1 could rollover causing a BoundsError in findnext/findnextnot nextidx = i == typemax(Int) ? 0 : something(findnextnot(s.bits, i+1), 0) # Extend indices beyond the length of the bits since it is inverted nextidx = nextidx == 0 ? max(i, length(s.bits))+1 : nextidx else nextidx = i == typemax(Int) ? 0 : something(findnext(s.bits, i+1), 0) end return nextidx end
[ 159, 169 ]
function findnextidx(s::IntSet, i::Int, invert=false) if s.inverse ⊻ invert # i+1 could rollover causing a BoundsError in findnext/findnextnot nextidx = i == typemax(Int) ? 0 : something(findnextnot(s.bits, i+1), 0) # Extend indices beyond the length of the bits since it is inverted nextidx = nextidx == 0 ? max(i, length(s.bits))+1 : nextidx else nextidx = i == typemax(Int) ? 0 : something(findnext(s.bits, i+1), 0) end return nextidx end
function findnextidx(s::IntSet, i::Int, invert=false) if s.inverse ⊻ invert # i+1 could rollover causing a BoundsError in findnext/findnextnot nextidx = i == typemax(Int) ? 0 : something(findnextnot(s.bits, i+1), 0) # Extend indices beyond the length of the bits since it is inverted nextidx = nextidx == 0 ? max(i, length(s.bits))+1 : nextidx else nextidx = i == typemax(Int) ? 0 : something(findnext(s.bits, i+1), 0) end return nextidx end
findnextidx
159
169
src/int_set.jl
#FILE: DataStructures.jl/src/robin_dict.jl ##CHUNK 1 # this assumes that there is a key/value present in the dictionary at index index0 = index sz = length(h.keys) @inbounds while true index0 = (index0 & (sz - 1)) + 1 if isslotempty(h, index0) || calculate_distance(h, index0) == 0 break end end #index0 represents the position before which we have to shift backwards # the backwards shifting algorithm curr = index next = (index & (sz - 1)) + 1 @inbounds while next != index0 h.vals[curr] = h.vals[next] h.keys[curr] = h.keys[next] h.hashes[curr] = h.hashes[next] ##CHUNK 2 #index0 represents the position before which we have to shift backwards # the backwards shifting algorithm curr = index next = (index & (sz - 1)) + 1 @inbounds while next != index0 h.vals[curr] = h.vals[next] h.keys[curr] = h.keys[next] h.hashes[curr] = h.hashes[next] curr = next next = (next & (sz-1)) + 1 end #curr is at the last position, reset back to normal isbitstype(K) || isbitsunion(K) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.keys, curr-1) isbitstype(V) || isbitsunion(V) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.vals, curr-1) @inbounds h.hashes[curr] = 0x0 h.count -= 1 #CURRENT FILE: DataStructures.jl/src/int_set.jl ##CHUNK 1 function Base.iterate(s::IntSet, i::Int, invert=false) i <= 0 && return nothing return (i-1, findnextidx(s, i, invert)) end # Nextnot iterates through elements *not* in the set nextnot(s::IntSet, i) = iterate(s, i, true) function Base.last(s::IntSet) l = length(s.bits) if s.inverse idx = l < typemax(Int) ? typemax(Int) : something(findprevnot(s.bits, l), 0) else idx = something(findprev(s.bits, l), 0) end idx == 0 ? throw(ArgumentError("collection must be non-empty")) : idx - 1 end Base.length(s::IntSet) = (n = sum(s.bits); ifelse(s.inverse, typemax(Int) - n, n)) ##CHUNK 2 idx = n+1 if 1 <= idx <= length(s.bits) unsafe_getindex(s.bits, idx) != s.inverse else ifelse((idx <= 0) | (idx > typemax(Int)), false, s.inverse) end end Base.iterate(s::IntSet) = iterate(s, findnextidx(s, 0)) function Base.iterate(s::IntSet, i::Int, invert=false) i <= 0 && return nothing return (i-1, findnextidx(s, i, invert)) end # Nextnot iterates through elements *not* in the set nextnot(s::IntSet, i) = iterate(s, i, true) function Base.last(s::IntSet) ##CHUNK 3 end function Base.symdiff!(s1::IntSet, s2::IntSet) e = _matchlength!(s1.bits, length(s2.bits)) map!(⊻, s1.bits, s1.bits, s2.bits) s2.inverse && (s1.inverse = !s1.inverse) append!(s1.bits, e) return s1 end function Base.in(n::Integer, s::IntSet) idx = n+1 if 1 <= idx <= length(s.bits) unsafe_getindex(s.bits, idx) != s.inverse else ifelse((idx <= 0) | (idx > typemax(Int)), false, s.inverse) end end Base.iterate(s::IntSet) = iterate(s, findnextidx(s, 0)) ##CHUNK 4 l = length(s.bits) if s.inverse idx = l < typemax(Int) ? typemax(Int) : something(findprevnot(s.bits, l), 0) else idx = something(findprev(s.bits, l), 0) end idx == 0 ? throw(ArgumentError("collection must be non-empty")) : idx - 1 end Base.length(s::IntSet) = (n = sum(s.bits); ifelse(s.inverse, typemax(Int) - n, n)) complement(s::IntSet) = complement!(copy(s)) complement!(s::IntSet) = (s.inverse = !s.inverse; s) function Base.show(io::IO, s::IntSet) print(io, "IntSet([") first = true for n in s if s.inverse && n > 2 state = nextnot(s, n - 3) ##CHUNK 5 end function Base.:(==)(s1::IntSet, s2::IntSet) l1 = length(s1.bits) l2 = length(s2.bits) l1 < l2 && return ==(s2, s1) # Swap so s1 is always equal-length or longer # Try to do this without allocating memory or checking bit-by-bit if s1.inverse == s2.inverse # If the lengths are the same, simply punt to bitarray comparison l1 == l2 && return s1.bits == s2.bits # Otherwise check the last bit. If equal, we only need to check up to l2 return findprev(s1.bits, l1) == findprev(s2.bits, l2) && unsafe_getindex(s1.bits, 1:l2) == s2.bits else # one complement, one not. Could feasibly be true on 32 bit machines # Only if all non-overlapping bits are set and overlaps are inverted return l1 == typemax(Int) && map!(!, unsafe_getindex(s1.bits, 1:l2)) == s2.bits && (l1 == l2 || all(unsafe_getindex(s1.bits, l2+1:l1))) ##CHUNK 6 l1 == l2 && return s1.bits == s2.bits # Otherwise check the last bit. If equal, we only need to check up to l2 return findprev(s1.bits, l1) == findprev(s2.bits, l2) && unsafe_getindex(s1.bits, 1:l2) == s2.bits else # one complement, one not. Could feasibly be true on 32 bit machines # Only if all non-overlapping bits are set and overlaps are inverted return l1 == typemax(Int) && map!(!, unsafe_getindex(s1.bits, 1:l2)) == s2.bits && (l1 == l2 || all(unsafe_getindex(s1.bits, l2+1:l1))) end end const hashis_seed = UInt === UInt64 ? 0x88989f1fc7dea67d : 0xc7dea67d function Base.hash(s::IntSet, h::UInt) # Only hash the bits array up to the last-set bit to prevent extra empty # bits from changing the hash result l = findprev(s.bits, length(s.bits)) return hash(unsafe_getindex(s.bits, 1:l), h) ⊻ hash(s.inverse) ⊻ hashis_seed end ##CHUNK 7 Base.intersect(s1::IntSet) = copy(s1) Base.intersect(s1::IntSet, ss...) = intersect(s1, intersect(ss...)) function Base.intersect(s1::IntSet, ns) s = IntSet() for n in ns n in s1 && push!(s, n) end return s end Base.intersect(s1::IntSet, s2::IntSet) = intersect!(copy(s1), s2) function Base.intersect!(s1::IntSet, s2::IntSet) l = length(s2.bits) if !s1.inverse & !s2.inverse; _resize0!(s1.bits, l); map!(&, s1.bits, s1.bits, s2.bits) elseif s1.inverse & !s2.inverse; _resize0!(s1.bits, l); map!(<, s1.bits, s1.bits, s2.bits); s1.inverse = false elseif !s1.inverse & s2.inverse; e = _matchlength!(s1.bits, l); map!(>, s1.bits, s1.bits, s2.bits); append!(s1.bits, e) else #= s1.inverse & s2.inverse=# e = _matchlength!(s1.bits, l); map!(|, s1.bits, s1.bits, s2.bits); append!(s1.bits, e) end return s1 end ##CHUNK 8 complement(s::IntSet) = complement!(copy(s)) complement!(s::IntSet) = (s.inverse = !s.inverse; s) function Base.show(io::IO, s::IntSet) print(io, "IntSet([") first = true for n in s if s.inverse && n > 2 state = nextnot(s, n - 3) if state !== nothing && state[2] <= 0 print(io, ", ..., ", typemax(Int)-1) break end end !first && print(io, ", ") print(io, n) first = false end print(io, "])")
64
76
DataStructures.jl
26
function Base.pop!(d::MultiDict, key, default) vs = get(d, key, Base.secret_table_token) if vs === Base.secret_table_token if default !== Base.secret_table_token return default else throw(KeyError(key)) end end v = pop!(vs) (length(vs) == 0) && delete!(d, key) return v end
function Base.pop!(d::MultiDict, key, default) vs = get(d, key, Base.secret_table_token) if vs === Base.secret_table_token if default !== Base.secret_table_token return default else throw(KeyError(key)) end end v = pop!(vs) (length(vs) == 0) && delete!(d, key) return v end
[ 64, 76 ]
function Base.pop!(d::MultiDict, key, default) vs = get(d, key, Base.secret_table_token) if vs === Base.secret_table_token if default !== Base.secret_table_token return default else throw(KeyError(key)) end end v = pop!(vs) (length(vs) == 0) && delete!(d, key) return v end
function Base.pop!(d::MultiDict, key, default) vs = get(d, key, Base.secret_table_token) if vs === Base.secret_table_token if default !== Base.secret_table_token return default else throw(KeyError(key)) end end v = pop!(vs) (length(vs) == 0) && delete!(d, key) return v end
length
64
76
src/multi_dict.jl
#FILE: DataStructures.jl/src/sorted_dict.jl ##CHUNK 1 Returns `sc`. This is a no-op if `k` is not present in `sd`. Time: O(*c* log *n*) """ @inline function Base.delete!(m::SortedDict, k_) i, exactfound = findkey(m.bt, convert(keytype(m), k_)) if exactfound delete!(m.bt, i) end m end """ Base.pop!(sd::SortedDict, k) Base.pop!(sd::SortedDict, k, default) Delete the item with key `k` in `sd` and return the value that was associated with `k`. If `k` is not in `sd` return `default`, or throw a `KeyError` if `default` is not specified. Time: O(*c* log *n*) ##CHUNK 2 """ Base.pop!(sd::SortedDict, k) Base.pop!(sd::SortedDict, k, default) Delete the item with key `k` in `sd` and return the value that was associated with `k`. If `k` is not in `sd` return `default`, or throw a `KeyError` if `default` is not specified. Time: O(*c* log *n*) """ @inline function Base.pop!(m::SortedDict, k_) i, exactfound = findkey(m.bt, convert(keytype(m), k_)) !exactfound && throw(KeyError(k_)) @inbounds d = m.bt.data[i].d delete!(m.bt, i) return d end @inline function Base.pop!(m::SortedDict, k_, default) #FILE: DataStructures.jl/src/sorted_set.jl ##CHUNK 1 i, exactfound = findkey(m.bt, k) !exactfound && return default delete!(m.bt, i) return k end """ Base.popfirst!(ss::SortedSet) Delete the item with first key in SortedSet `ss` and returns the key. This function was named `pop!` in a previous version of the package. A `BoundsError` results if `ss` is empty. Time: O(log *n*) """ @inline function Base.popfirst!(m::SortedSet) i = beginloc(m.bt) i == 2 && throw(BoundsError()) k = m.bt.data[i].k delete!(m.bt, i) return k end ##CHUNK 2 k = convert(keytype(m),k_) i, exactfound = findkey(m.bt, k) !exactfound && throw(KeyError(k_)) k2 = m.bt.data[i].k delete!(m.bt, i) return k2 end @inline function Base.pop!(m::SortedSet, k_, default) k = convert(keytype(m),k_) i, exactfound = findkey(m.bt, k) !exactfound && return default delete!(m.bt, i) return k end """ Base.popfirst!(ss::SortedSet) Delete the item with first key in SortedSet `ss` and returns the ##CHUNK 3 Base.pop!(m::SortedSet) = error("pop!(::SortedSet) is disabled in this version; refer to popfirst! and `poplast! in the docs") """ poplast!(ss::SortedSet) Delete the item with last key in SortedSet `ss` and returns the key. A `BoundsError` results if `ss` is empty. This function will be renamed `Base.pop!` in a future version of the package. Time: O(log *n*) """ @inline function poplast!(m::SortedSet) i = endloc(m.bt) i == 2 && throw(BoundsError()) k = m.bt.data[i].k delete!(m.bt, i) return k end ##CHUNK 4 Base.pop!(ss::SortedSet, k, default) Delete the item with key `k` in `ss` and return the item that compares equal to `k` according to the sort order (which is not necessarily `k`, since equality in the sort-order does not necessarily imply hash-equality). If `k` is not found, return `default`, or throw a `KeyError` if `default` is not specified. Time: O(*c* log *n*) """ @inline function Base.pop!(m::SortedSet, k_) k = convert(keytype(m),k_) i, exactfound = findkey(m.bt, k) !exactfound && throw(KeyError(k_)) k2 = m.bt.data[i].k delete!(m.bt, i) return k2 end @inline function Base.pop!(m::SortedSet, k_, default) k = convert(keytype(m),k_) #FILE: DataStructures.jl/src/swiss_dict.jl ##CHUNK 1 1 julia> pop!(d, "d") ERROR: KeyError: key "d" not found [...] julia> pop!(d, "e", 4) 4 ``` """ function Base.pop!(h::SwissDict, key) index = ht_keyindex(h, key) return index > 0 ? _pop!(h, index) : throw(KeyError(key)) end function Base.pop!(h::SwissDict, key, default) index = ht_keyindex(h, key) return index > 0 ? _pop!(h, index) : default end ##CHUNK 2 end function _pop!(h::SwissDict, index) @inbounds val = h.vals[index] _delete!(h, index) maybe_rehash_shrink!(h) return val end """ pop!(collection, key[, default]) Delete and return the mapping for `key` if it exists in `collection`, otherwise return `default`, or throw an error if `default` is not specified. # Examples ```jldoctest julia> d = SwissDict("a"=>1, "b"=>2, "c"=>3); julia> pop!(d, "a") #FILE: DataStructures.jl/src/robin_dict.jl ##CHUNK 1 function Base.pop!(h::RobinDict{K, V}, key0) where {K, V} key = convert(K, key0) index = rh_search(h, key) return index > 0 ? _pop!(h, index) : throw(KeyError(key)) end """ pop!(collection, key[, default]) Delete and return the mapping for `key` if it exists in `collection`, otherwise return `default`, or throw an error if `default` is not specified. # Examples ```jldoctest julia> d = RobinDict("a"=>1, "b"=>2, "c"=>3); julia> pop!(d, "a") 1 #CURRENT FILE: DataStructures.jl/src/multi_dict.jl ##CHUNK 1 if !haskey(d.d, k) d.d[k] = V[] end push!(d.d[k], v) return d end function Base.in(pr::(Tuple{Any,Any}), d::MultiDict{K,V}) where {K,V} k = convert(K, pr[1]) v = get(d,k,Base.secret_table_token) (v !== Base.secret_table_token) && (pr[2] in v) end Base.pop!(d::MultiDict, key) = pop!(d, key, Base.secret_table_token) Base.push!(d::MultiDict, kv::Pair) = insert!(d, kv[1], kv[2]) #Base.push!(d::MultiDict, kv::Pair, kv2::Pair) = (push!(d.d, kv, kv2); d) #Base.push!(d::MultiDict, kv::Pair, kv2::Pair, kv3::Pair...) = (push!(d.d, kv, kv2, kv3...); d) Base.push!(d::MultiDict, kv) = insert!(d, kv[1], kv[2])
98
112
DataStructures.jl
27
function Base.iterate(e::EnumerateAll) V = eltype(eltype(values(e.d))) vs = V[] dstate = iterate(e.d.d) vstate = iterate(vs) dstate === nothing || vstate === nothing && return nothing k = nothing while vstate === nothing ((k, vs), dst) = dstate dstate = iterate(e.d.d, dst) vstate = iterate(vs) end v, vst = vstate return ((k, v), (dstate, k, vs, vstate)) end
function Base.iterate(e::EnumerateAll) V = eltype(eltype(values(e.d))) vs = V[] dstate = iterate(e.d.d) vstate = iterate(vs) dstate === nothing || vstate === nothing && return nothing k = nothing while vstate === nothing ((k, vs), dst) = dstate dstate = iterate(e.d.d, dst) vstate = iterate(vs) end v, vst = vstate return ((k, v), (dstate, k, vs, vstate)) end
[ 98, 112 ]
function Base.iterate(e::EnumerateAll) V = eltype(eltype(values(e.d))) vs = V[] dstate = iterate(e.d.d) vstate = iterate(vs) dstate === nothing || vstate === nothing && return nothing k = nothing while vstate === nothing ((k, vs), dst) = dstate dstate = iterate(e.d.d, dst) vstate = iterate(vs) end v, vst = vstate return ((k, v), (dstate, k, vs, vstate)) end
function Base.iterate(e::EnumerateAll) V = eltype(eltype(values(e.d))) vs = V[] dstate = iterate(e.d.d) vstate = iterate(vs) dstate === nothing || vstate === nothing && return nothing k = nothing while vstate === nothing ((k, vs), dst) = dstate dstate = iterate(e.d.d, dst) vstate = iterate(vs) end v, vst = vstate return ((k, v), (dstate, k, vs, vstate)) end
Base.iterate
98
112
src/multi_dict.jl
#FILE: DataStructures.jl/src/sorted_dict.jl ##CHUNK 1 end end foundsemitoken = state[foundi] for i = firsti : N @inbounds if state[i] != pastendsemitoken(sds.vec[i]) && eq(ord, deref_key((sds.vec[i], state[i])), firstk) state[i] = advance((sds.vec[i], state[i])) end end @inbounds return (deref((sds.vec[foundi], foundsemitoken)), state) end """ Base.merge!(sd::SortedDict{K,V,Ord}, d1::AbstractDict{K,V}...) where {K,V,Ord<:Ordering} Merge one or more dicts `d1`, etc. into `sd`. These must all must have the same key-value types. In the case of keys duplicated among the arguments, the rightmost argument that owns the key gets its value stored. Time: O(*cN* log *N*), where *N* #FILE: DataStructures.jl/src/sorted_container_iteration.jl ##CHUNK 1 end iteration_init(m::SortedContainer) = iteration_init(base_iterable_object(m)) @inline function get_item0(ito::IterableObject{C, R, KeysIter, T, D}, state::SAIterationState) where {C, R, T, D} @inbounds k = ito.m.bt.data[state.next].k return k end @inline function get_item0(ito::IterableObject{C, R, ValsIter, T, D}, state::SAIterationState) where {C, R, T, D} @inbounds v = ito.m.bt.data[state.next].d return v end @inline function get_item0(ito::IterableObject{C, R, KeysValsIter, T, D}, state::SAIterationState) where {C, R, T, D} ##CHUNK 2 Base.eltype(::Type{IterableObject{C, R, KeysIter, NoTokens, D}}) where {C, R, D} = keytype(C) Base.eltype(::Type{IterableObject{C, R, ValsIter, NoTokens, D}}) where {C, R, D} = valtype(C) Base.eltype(::Type{IterableObject{C, R, KeysValsIter, NoTokens, D}}) where {C, R, D} = eltype(C) Base.eltype(::ItObj) where {ItObj <: IterableObject} = eltype(ItObj) get_item(m::SortedContainer, state::SAIterationState) = get_item(base_iterable_object(m), state) function next(ito::IterableObject{C, R, KV, T, ForwardIter}, state::SAIterationState) where {C, R, KV, T} sn = state.next (sn < 3 || !(sn in ito.m.bt.useddatacells)) && throw(BoundsError()) SAIterationState(nextloc0(ito.m.bt, sn), state.final) end ##CHUNK 3 get_item(m::SortedContainer, state::SAIterationState) = get_item(base_iterable_object(m), state) function next(ito::IterableObject{C, R, KV, T, ForwardIter}, state::SAIterationState) where {C, R, KV, T} sn = state.next (sn < 3 || !(sn in ito.m.bt.useddatacells)) && throw(BoundsError()) SAIterationState(nextloc0(ito.m.bt, sn), state.final) end function next(ito::IterableObject{C, R, KV, T, ReverseIter}, state::SAIterationState) where {C, R, KV, T} sn = state.next (sn < 3 || !(sn in ito.m.bt.useddatacells)) && throw(BoundsError()) SAIterationState(prevloc0(ito.m.bt, sn), state.final) end next(m::SortedContainer, state::SAIterationState) = next(base_iterable_object(m), state) ##CHUNK 4 @inline function get_item0(ito::IterableObject{C, R, ValsIter, T, D}, state::SAIterationState) where {C, R, T, D} @inbounds v = ito.m.bt.data[state.next].d return v end @inline function get_item0(ito::IterableObject{C, R, KeysValsIter, T, D}, state::SAIterationState) where {C, R, T, D} @inbounds dt = ito.m.bt.data[state.next] return (dt.k => dt.d) end get_item(ito::IterableObject{C, R, KeysIter, TokenIter, D}, state::SAIterationState) where {C, R, D} = ((ito.m, IntSemiToken(state.next)), get_item0(ito, state)) Base.eltype(::Type{IterableObject{C, R, KeysIter, TokenIter, D}}) where {C, R, D} = Tuple{Tuple{C,IntSemiToken}, keytype(C)} #FILE: DataStructures.jl/src/priorityqueue.jl ##CHUNK 1 function Base.merge!(d::AbstractDict, other::PriorityQueue) next = iterate(other, false) while next !== nothing (k, v), state = next d[k] = v next = iterate(other, state) end return d end function Base.merge!(combine::Function, d::AbstractDict, other::PriorityQueue) next = iterate(other, false) while next !== nothing (k, v), state = next d[k] = haskey(d, k) ? combine(d[k], v) : v next = iterate(other, state) end return d end #FILE: DataStructures.jl/test/test_multi_dict.jl ##CHUNK 1 end @testset "special functions: count, enumerateall" begin #not appending arrays to one array, using array of arrays d = MultiDict{Char,Array{Int,1}}() @test count(d) == 0 for i in 1:15 insert!(d, rand('a':'f'), rand()>0.5 ? [rand(1:10)] : rand(1:10, rand(1:3))) end @test 15 <= count(d) <=45 @test size(d) == (length(d), count(d)) #= --- broken phlavenk ---- allvals = [kv for kv in enumerateall(d)] @test length(allvals) == count(d) @test all(kv->in(kv,d), enumerateall(d)) # @test length(d) == 15 # @test length(values(d)) == 15 # @test length(keys(d)) <= 6 #FILE: DataStructures.jl/src/sorted_multi_dict.jl ##CHUNK 1 return p2 == pastendsemitoken(m2) end if p2 == pastendsemitoken(m2) return false end @inbounds k1,d1 = deref((m1,p1)) @inbounds k2,d2 = deref((m2,p2)) (!eq(ord,k1,k2) || !isequal(d1,d2)) && return false @inbounds p1 = advance((m1,p1)) @inbounds p2 = advance((m2,p2)) end end function mergetwo!(m::SortedMultiDict{K,D,Ord}, iterable) where {K,D,Ord <: Ordering} for (k,v) in iterable insert!(m.bt, convert(K,k), convert(D,v), true) end end #CURRENT FILE: DataStructures.jl/src/multi_dict.jl ##CHUNK 1 struct EnumerateAll d::MultiDict end enumerateall(d::MultiDict) = EnumerateAll(d) Base.length(e::EnumerateAll) = count(e.d) function Base.iterate(e::EnumerateAll, s) dstate, k, vs, vstate = s dstate === nothing || vstate === nothing && return nothing while vstate === nothing ((k, vs), dst) = dstate dstate = iterate(e.d.d, dst) vstate = iterate(vs) end v, vst = vstate return ((k, v), (dstate, k, vs, vstate)) end ##CHUNK 2 #Base.push!(d::MultiDict, kv::Pair, kv2::Pair, kv3::Pair...) = (push!(d.d, kv, kv2, kv3...); d) Base.push!(d::MultiDict, kv) = insert!(d, kv[1], kv[2]) #Base.push!(d::MultiDict, kv, kv2...) = (push!(d.d, kv, kv2...); d) Base.count(d::MultiDict) = length(keys(d)) == 0 ? 0 : mapreduce(k -> length(d[k]), +, keys(d)) Base.size(d::MultiDict) = (length(keys(d)), count(d::MultiDict)) # enumerate struct EnumerateAll d::MultiDict end enumerateall(d::MultiDict) = EnumerateAll(d) Base.length(e::EnumerateAll) = count(e.d) function Base.iterate(e::EnumerateAll, s) dstate, k, vs, vstate = s
114
124
DataStructures.jl
28
function Base.iterate(e::EnumerateAll, s) dstate, k, vs, vstate = s dstate === nothing || vstate === nothing && return nothing while vstate === nothing ((k, vs), dst) = dstate dstate = iterate(e.d.d, dst) vstate = iterate(vs) end v, vst = vstate return ((k, v), (dstate, k, vs, vstate)) end
function Base.iterate(e::EnumerateAll, s) dstate, k, vs, vstate = s dstate === nothing || vstate === nothing && return nothing while vstate === nothing ((k, vs), dst) = dstate dstate = iterate(e.d.d, dst) vstate = iterate(vs) end v, vst = vstate return ((k, v), (dstate, k, vs, vstate)) end
[ 114, 124 ]
function Base.iterate(e::EnumerateAll, s) dstate, k, vs, vstate = s dstate === nothing || vstate === nothing && return nothing while vstate === nothing ((k, vs), dst) = dstate dstate = iterate(e.d.d, dst) vstate = iterate(vs) end v, vst = vstate return ((k, v), (dstate, k, vs, vstate)) end
function Base.iterate(e::EnumerateAll, s) dstate, k, vs, vstate = s dstate === nothing || vstate === nothing && return nothing while vstate === nothing ((k, vs), dst) = dstate dstate = iterate(e.d.d, dst) vstate = iterate(vs) end v, vst = vstate return ((k, v), (dstate, k, vs, vstate)) end
Base.iterate
114
124
src/multi_dict.jl
#FILE: DataStructures.jl/src/sorted_dict.jl ##CHUNK 1 end end foundsemitoken = state[foundi] for i = firsti : N @inbounds if state[i] != pastendsemitoken(sds.vec[i]) && eq(ord, deref_key((sds.vec[i], state[i])), firstk) state[i] = advance((sds.vec[i], state[i])) end end @inbounds return (deref((sds.vec[foundi], foundsemitoken)), state) end """ Base.merge!(sd::SortedDict{K,V,Ord}, d1::AbstractDict{K,V}...) where {K,V,Ord<:Ordering} Merge one or more dicts `d1`, etc. into `sd`. These must all must have the same key-value types. In the case of keys duplicated among the arguments, the rightmost argument that owns the key gets its value stored. Time: O(*cN* log *N*), where *N* #FILE: DataStructures.jl/src/sorted_container_iteration.jl ##CHUNK 1 end iteration_init(m::SortedContainer) = iteration_init(base_iterable_object(m)) @inline function get_item0(ito::IterableObject{C, R, KeysIter, T, D}, state::SAIterationState) where {C, R, T, D} @inbounds k = ito.m.bt.data[state.next].k return k end @inline function get_item0(ito::IterableObject{C, R, ValsIter, T, D}, state::SAIterationState) where {C, R, T, D} @inbounds v = ito.m.bt.data[state.next].d return v end @inline function get_item0(ito::IterableObject{C, R, KeysValsIter, T, D}, state::SAIterationState) where {C, R, T, D} ##CHUNK 2 get_item(m::SortedContainer, state::SAIterationState) = get_item(base_iterable_object(m), state) function next(ito::IterableObject{C, R, KV, T, ForwardIter}, state::SAIterationState) where {C, R, KV, T} sn = state.next (sn < 3 || !(sn in ito.m.bt.useddatacells)) && throw(BoundsError()) SAIterationState(nextloc0(ito.m.bt, sn), state.final) end function next(ito::IterableObject{C, R, KV, T, ReverseIter}, state::SAIterationState) where {C, R, KV, T} sn = state.next (sn < 3 || !(sn in ito.m.bt.useddatacells)) && throw(BoundsError()) SAIterationState(prevloc0(ito.m.bt, sn), state.final) end next(m::SortedContainer, state::SAIterationState) = next(base_iterable_object(m), state) ##CHUNK 3 struct SAIterationState next::Int final::Int end # The iterate function is decomposed into three pieces: # The iteration_init function initializes the iteration state and # also stores the final state. It # does different things depending on the parameter R (range) and D (direction). # The get_item function retrieves the requested data from the # the container and depends on the KV and D parameters. # The next function updates the iteration state to the next item # and depends on the D (direction) parameter. iteration_init(ito::IterableObject{C, EntireContainer, KV, T, ForwardIter}) where {C, KV, T} = SAIterationState(beginloc(ito.m.bt), 2) iteration_init(ito::IterableObject{C, EntireContainer, KV, T, ReverseIter}) where ##CHUNK 4 function next(ito::IterableObject{C, R, KV, T, ReverseIter}, state::SAIterationState) where {C, R, KV, T} sn = state.next (sn < 3 || !(sn in ito.m.bt.useddatacells)) && throw(BoundsError()) SAIterationState(prevloc0(ito.m.bt, sn), state.final) end next(m::SortedContainer, state::SAIterationState) = next(base_iterable_object(m), state) """ Base.iterate(iter::SortedContainerIterable) with the following helper functions to construct a `SortedContainerIterable`: inclusive(m::SortedContainer, st1, st2) inclusive(m::SortedContainer, (st1, st2)) inclusive_key(m::SortedContainer, key1, key2) inclusive_key(m::SortedContainer, (key1, key2)) #FILE: DataStructures.jl/src/swiss_dict.jl ##CHUNK 1 is === nothing && return nothing i, s = is @inbounds p = h.keys[i] => h.vals[i] return (p, s) end Base.@propagate_inbounds function Base.iterate(v::Union{KeySet{<:Any, <:SwissDict}, Base.ValueIterator{<:SwissDict}}, state=v.dict.idxfloor) is = _iterslots(v.dict, state) is === nothing && return nothing i, s = is return (v isa KeySet ? v.dict.keys[i] : v.dict.vals[i], s) end #FILE: DataStructures.jl/src/priorityqueue.jl ##CHUNK 1 function Base.merge!(d::AbstractDict, other::PriorityQueue) next = iterate(other, false) while next !== nothing (k, v), state = next d[k] = v next = iterate(other, state) end return d end function Base.merge!(combine::Function, d::AbstractDict, other::PriorityQueue) next = iterate(other, false) while next !== nothing (k, v), state = next d[k] = haskey(d, k) ? combine(d[k], v) : v next = iterate(other, state) end return d end #FILE: DataStructures.jl/src/sorted_multi_dict.jl ##CHUNK 1 firsti == 0 && return nothing foundi = firsti @inbounds firstk = deref_key((sds.vec[firsti], state[firsti])) for i = firsti + 1 : N if state[i] != pastendsemitoken(sds.vec[i]) @inbounds k2 = deref_key((sds.vec[i], state[i])) if lt(ord, k2, firstk) foundi = i firstk = k2 end end end foundsemitoken = state[foundi] @inbounds state[foundi] = advance((sds.vec[foundi], foundsemitoken)) @inbounds return (deref((sds.vec[foundi], foundsemitoken)), state) end """ Base.merge!(smd::SortedMultiDict, iter...) #CURRENT FILE: DataStructures.jl/src/multi_dict.jl ##CHUNK 1 dstate = iterate(e.d.d) vstate = iterate(vs) dstate === nothing || vstate === nothing && return nothing k = nothing while vstate === nothing ((k, vs), dst) = dstate dstate = iterate(e.d.d, dst) vstate = iterate(vs) end v, vst = vstate return ((k, v), (dstate, k, vs, vstate)) end ##CHUNK 2 struct EnumerateAll d::MultiDict end enumerateall(d::MultiDict) = EnumerateAll(d) Base.length(e::EnumerateAll) = count(e.d) function Base.iterate(e::EnumerateAll) V = eltype(eltype(values(e.d))) vs = V[] dstate = iterate(e.d.d) vstate = iterate(vs) dstate === nothing || vstate === nothing && return nothing k = nothing while vstate === nothing ((k, vs), dst) = dstate dstate = iterate(e.d.d, dst) vstate = iterate(vs) end v, vst = vstate
127
141
DataStructures.jl
29
function Base.getindex(l::MutableLinkedList{T}, r::UnitRange) where T @boundscheck 0 < first(r) < last(r) <= l.len || throw(BoundsError(l, r)) l2 = MutableLinkedList{T}() node = l.node for i in 1:first(r) node = node.next end len = length(r) for j in 1:len push!(l2, node.data) node = node.next end l2.len = len return l2 end
function Base.getindex(l::MutableLinkedList{T}, r::UnitRange) where T @boundscheck 0 < first(r) < last(r) <= l.len || throw(BoundsError(l, r)) l2 = MutableLinkedList{T}() node = l.node for i in 1:first(r) node = node.next end len = length(r) for j in 1:len push!(l2, node.data) node = node.next end l2.len = len return l2 end
[ 127, 141 ]
function Base.getindex(l::MutableLinkedList{T}, r::UnitRange) where T @boundscheck 0 < first(r) < last(r) <= l.len || throw(BoundsError(l, r)) l2 = MutableLinkedList{T}() node = l.node for i in 1:first(r) node = node.next end len = length(r) for j in 1:len push!(l2, node.data) node = node.next end l2.len = len return l2 end
function Base.getindex(l::MutableLinkedList{T}, r::UnitRange) where T @boundscheck 0 < first(r) < last(r) <= l.len || throw(BoundsError(l, r)) l2 = MutableLinkedList{T}() node = l.node for i in 1:first(r) node = node.next end len = length(r) for j in 1:len push!(l2, node.data) node = node.next end l2.len = len return l2 end
Base.getindex
127
141
src/mutable_list.jl
#CURRENT FILE: DataStructures.jl/src/mutable_list.jl ##CHUNK 1 l2 = MutableLinkedList{T}() for h in l push!(l2, h) end return l2 end function Base.getindex(l::MutableLinkedList, idx::Int) @boundscheck 0 < idx <= l.len || throw(BoundsError(l, idx)) node = l.node for i in 1:idx node = node.next end return node.data end function Base.setindex!(l::MutableLinkedList{T}, data, idx::Int) where T @boundscheck 0 < idx <= l.len || throw(BoundsError(l, idx)) node = l.node ##CHUNK 2 return l end function Base.delete!(l::MutableLinkedList, r::UnitRange) @boundscheck 0 < first(r) < last(r) <= l.len || throw(BoundsError(l, r)) node = l.node for i in 1:first(r) node = node.next end prev = node.prev len = length(r) for j in 1:len node = node.next end next = node prev.next = next next.prev = prev l.len -= len return l end ##CHUNK 3 @boundscheck 0 < idx <= l.len || throw(BoundsError(l, idx)) node = l.node for i = 1:idx node = node.next end prev = node.prev next = node.next prev.next = next next.prev = prev l.len -= 1 return l end function Base.delete!(l::MutableLinkedList, r::UnitRange) @boundscheck 0 < first(r) < last(r) <= l.len || throw(BoundsError(l, r)) node = l.node for i in 1:first(r) node = node.next end prev = node.prev ##CHUNK 4 for i in 1:idx node = node.next end return node.data end function Base.setindex!(l::MutableLinkedList{T}, data, idx::Int) where T @boundscheck 0 < idx <= l.len || throw(BoundsError(l, idx)) node = l.node for i in 1:idx node = node.next end node.data = convert(T, data) return l end function Base.append!(l1::MutableLinkedList{T}, l2::MutableLinkedList{T}) where T l1.node.prev.next = l2.node.next # l1's last's next is now l2's first l2.node.prev.next = l1.node # l2's last's next is now l1.node ##CHUNK 5 len = length(r) for j in 1:len node = node.next end next = node prev.next = next next.prev = prev l.len -= len return l end function Base.push!(l::MutableLinkedList{T}, data) where T oldlast = l.node.prev node = ListNode{T}(data) node.next = l.node node.prev = oldlast l.node.prev = node oldlast.next = node l.len += 1 return l ##CHUNK 6 function Base.reverse(l::MutableLinkedList{T}) where T l2 = MutableLinkedList{T}() for h in l pushfirst!(l2, h) end return l2 end function Base.copy(l::MutableLinkedList{T}) where T l2 = MutableLinkedList{T}() for h in l push!(l2, h) end return l2 end function Base.getindex(l::MutableLinkedList, idx::Int) @boundscheck 0 < idx <= l.len || throw(BoundsError(l, idx)) node = l.node ##CHUNK 7 function Base.append!(l::MutableLinkedList, elts...) for elt in elts for v in elt push!(l, v) end end return l end function Base.delete!(l::MutableLinkedList, idx::Int) @boundscheck 0 < idx <= l.len || throw(BoundsError(l, idx)) node = l.node for i = 1:idx node = node.next end prev = node.prev next = node.next prev.next = next next.prev = prev l.len -= 1 ##CHUNK 8 end function Base.push!(l::MutableLinkedList{T}, data1, data...) where T push!(l, data1) for v in data push!(l, v) end return l end function Base.pushfirst!(l::MutableLinkedList{T}, data) where T oldfirst = l.node.next node = ListNode{T}(data) node.prev = l.node node.next = oldfirst l.node.next = node oldfirst.prev = node l.len += 1 return l end ##CHUNK 9 l2.node.next.prev = l1.node.prev # l2's first's prev is now l1's last l1.node.prev = l2.node.prev # l1's first's prev is now l2's last l1.len += length(l2) # make l2 empty l2.node.prev = l2.node l2.node.next = l2.node l2.len = 0 return l1 end function Base.append!(l::MutableLinkedList, elts...) for elt in elts for v in elt push!(l, v) end end return l end function Base.delete!(l::MutableLinkedList, idx::Int) ##CHUNK 10 function Base.push!(l::MutableLinkedList{T}, data) where T oldlast = l.node.prev node = ListNode{T}(data) node.next = l.node node.prev = oldlast l.node.prev = node oldlast.next = node l.len += 1 return l end function Base.push!(l::MutableLinkedList{T}, data1, data...) where T push!(l, data1) for v in data push!(l, v) end return l end
153
164
DataStructures.jl
30
function Base.append!(l1::MutableLinkedList{T}, l2::MutableLinkedList{T}) where T l1.node.prev.next = l2.node.next # l1's last's next is now l2's first l2.node.prev.next = l1.node # l2's last's next is now l1.node l2.node.next.prev = l1.node.prev # l2's first's prev is now l1's last l1.node.prev = l2.node.prev # l1's first's prev is now l2's last l1.len += length(l2) # make l2 empty l2.node.prev = l2.node l2.node.next = l2.node l2.len = 0 return l1 end
function Base.append!(l1::MutableLinkedList{T}, l2::MutableLinkedList{T}) where T l1.node.prev.next = l2.node.next # l1's last's next is now l2's first l2.node.prev.next = l1.node # l2's last's next is now l1.node l2.node.next.prev = l1.node.prev # l2's first's prev is now l1's last l1.node.prev = l2.node.prev # l1's first's prev is now l2's last l1.len += length(l2) # make l2 empty l2.node.prev = l2.node l2.node.next = l2.node l2.len = 0 return l1 end
[ 153, 164 ]
function Base.append!(l1::MutableLinkedList{T}, l2::MutableLinkedList{T}) where T l1.node.prev.next = l2.node.next # l1's last's next is now l2's first l2.node.prev.next = l1.node # l2's last's next is now l1.node l2.node.next.prev = l1.node.prev # l2's first's prev is now l1's last l1.node.prev = l2.node.prev # l1's first's prev is now l2's last l1.len += length(l2) # make l2 empty l2.node.prev = l2.node l2.node.next = l2.node l2.len = 0 return l1 end
function Base.append!(l1::MutableLinkedList{T}, l2::MutableLinkedList{T}) where T l1.node.prev.next = l2.node.next # l1's last's next is now l2's first l2.node.prev.next = l1.node # l2's last's next is now l1.node l2.node.next.prev = l1.node.prev # l2's first's prev is now l1's last l1.node.prev = l2.node.prev # l1's first's prev is now l2's last l1.len += length(l2) # make l2 empty l2.node.prev = l2.node l2.node.next = l2.node l2.len = 0 return l1 end
Base.append!
153
164
src/mutable_list.jl
#FILE: DataStructures.jl/test/test_mutable_list.jl ##CHUNK 1 l = MutableLinkedList{Int}() @testset "push back" begin for i = 1:n push!(l, i) @test last(l) == i if i > 4 @test getindex(l, i) == i @test getindex(l, 1:floor(Int, i/2)) == MutableLinkedList{Int}(1:floor(Int, i/2)...) @test l[1:floor(Int, i/2)] == MutableLinkedList{Int}(1:floor(Int, i/2)...) setindex!(l, 0, i - 2) @test l == MutableLinkedList{Int}(1:i-3..., 0, i-1:i...) setindex!(l, i - 2, i - 2) end @test lastindex(l) == i @test length(l) == i @test isempty(l) == false for (j, k) in enumerate(l) @test j == k end #CURRENT FILE: DataStructures.jl/src/mutable_list.jl ##CHUNK 1 l.len -= len return l end function Base.push!(l::MutableLinkedList{T}, data) where T oldlast = l.node.prev node = ListNode{T}(data) node.next = l.node node.prev = oldlast l.node.prev = node oldlast.next = node l.len += 1 return l end function Base.push!(l::MutableLinkedList{T}, data1, data...) where T push!(l, data1) for v in data push!(l, v) end ##CHUNK 2 l = new{T}() l.len = 0 l.node = ListNode{T}() l.node.next = l.node l.node.prev = l.node return l end end MutableLinkedList() = MutableLinkedList{Any}() function MutableLinkedList{T}(elts...) where T l = MutableLinkedList{T}() for elt in elts push!(l, elt) end return l end Base.iterate(l::MutableLinkedList) = l.len == 0 ? nothing : (l.node.next.data, l.node.next.next) ##CHUNK 3 return l end function Base.pushfirst!(l::MutableLinkedList{T}, data) where T oldfirst = l.node.next node = ListNode{T}(data) node.prev = l.node node.next = oldfirst l.node.next = node oldfirst.prev = node l.len += 1 return l end function Base.pop!(l::MutableLinkedList) isempty(l) && throw(ArgumentError("List must be non-empty")) last = l.node.prev.prev data = l.node.prev.data last.next = l.node l.node.prev = last ##CHUNK 4 oldlast.next = node l.len += 1 return l end function Base.push!(l::MutableLinkedList{T}, data1, data...) where T push!(l, data1) for v in data push!(l, v) end return l end function Base.pushfirst!(l::MutableLinkedList{T}, data) where T oldfirst = l.node.next node = ListNode{T}(data) node.prev = l.node node.next = oldfirst l.node.next = node oldfirst.prev = node ##CHUNK 5 Base.iterate(l::MutableLinkedList, n::ListNode) = n === l.node ? nothing : (n.data, n.next) Base.isempty(l::MutableLinkedList) = l.len == 0 Base.length(l::MutableLinkedList) = l.len Base.collect(l::MutableLinkedList{T}) where T = T[x for x in l] Base.eltype(::Type{<:MutableLinkedList{T}}) where T = T Base.lastindex(l::MutableLinkedList) = l.len function Base.first(l::MutableLinkedList) isempty(l) && throw(ArgumentError("List is empty")) return l.node.next.data end function Base.last(l::MutableLinkedList) isempty(l) && throw(ArgumentError("List is empty")) return l.node.prev.data end Base.:(==)(l1::MutableLinkedList{T}, l2::MutableLinkedList{S}) where {T,S} = false ##CHUNK 6 l.len += 1 return l end function Base.pop!(l::MutableLinkedList) isempty(l) && throw(ArgumentError("List must be non-empty")) last = l.node.prev.prev data = l.node.prev.data last.next = l.node l.node.prev = last l.len -= 1 return data end function Base.popfirst!(l::MutableLinkedList) isempty(l) && throw(ArgumentError("List must be non-empty")) first = l.node.next.next data = l.node.next.data first.prev = l.node l.node.next = first ##CHUNK 7 function MutableLinkedList{T}(elts...) where T l = MutableLinkedList{T}() for elt in elts push!(l, elt) end return l end Base.iterate(l::MutableLinkedList) = l.len == 0 ? nothing : (l.node.next.data, l.node.next.next) Base.iterate(l::MutableLinkedList, n::ListNode) = n === l.node ? nothing : (n.data, n.next) Base.isempty(l::MutableLinkedList) = l.len == 0 Base.length(l::MutableLinkedList) = l.len Base.collect(l::MutableLinkedList{T}) where T = T[x for x in l] Base.eltype(::Type{<:MutableLinkedList{T}}) where T = T Base.lastindex(l::MutableLinkedList) = l.len function Base.first(l::MutableLinkedList) isempty(l) && throw(ArgumentError("List is empty")) ##CHUNK 8 l.len -= 1 return data end function Base.popfirst!(l::MutableLinkedList) isempty(l) && throw(ArgumentError("List must be non-empty")) first = l.node.next.next data = l.node.next.data first.prev = l.node l.node.next = first l.len -= 1 return data end function Base.show(io::IO, node::ListNode) x = node.data print(io, "$(typeof(node))($x)") end function Base.show(io::IO, l::MutableLinkedList) ##CHUNK 9 end function Base.delete!(l::MutableLinkedList, idx::Int) @boundscheck 0 < idx <= l.len || throw(BoundsError(l, idx)) node = l.node for i = 1:idx node = node.next end prev = node.prev next = node.next prev.next = next next.prev = prev l.len -= 1 return l end function Base.delete!(l::MutableLinkedList, r::UnitRange) @boundscheck 0 < first(r) < last(r) <= l.len || throw(BoundsError(l, r)) node = l.node for i in 1:first(r)
175
187
DataStructures.jl
31
function Base.delete!(l::MutableLinkedList, idx::Int) @boundscheck 0 < idx <= l.len || throw(BoundsError(l, idx)) node = l.node for i = 1:idx node = node.next end prev = node.prev next = node.next prev.next = next next.prev = prev l.len -= 1 return l end
function Base.delete!(l::MutableLinkedList, idx::Int) @boundscheck 0 < idx <= l.len || throw(BoundsError(l, idx)) node = l.node for i = 1:idx node = node.next end prev = node.prev next = node.next prev.next = next next.prev = prev l.len -= 1 return l end
[ 175, 187 ]
function Base.delete!(l::MutableLinkedList, idx::Int) @boundscheck 0 < idx <= l.len || throw(BoundsError(l, idx)) node = l.node for i = 1:idx node = node.next end prev = node.prev next = node.next prev.next = next next.prev = prev l.len -= 1 return l end
function Base.delete!(l::MutableLinkedList, idx::Int) @boundscheck 0 < idx <= l.len || throw(BoundsError(l, idx)) node = l.node for i = 1:idx node = node.next end prev = node.prev next = node.next prev.next = next next.prev = prev l.len -= 1 return l end
Base.delete!
175
187
src/mutable_list.jl
#FILE: DataStructures.jl/src/red_black_tree.jl ##CHUNK 1 node = node.leftChild end return node end """ delete!(tree::RBTree, key) Deletes `key` from `tree`, if present, else returns the unmodified tree. """ function Base.delete!(tree::RBTree{K}, d::K) where K z = tree.nil node = tree.root while node !== tree.nil if node.data == d z = node end if d < node.data ##CHUNK 2 function Base.delete!(tree::RBTree{K}, d::K) where K z = tree.nil node = tree.root while node !== tree.nil if node.data == d z = node end if d < node.data node = node.leftChild else node = node.rightChild end end (z === tree.nil) && return tree y = z y_original_color = y.color #CURRENT FILE: DataStructures.jl/src/mutable_list.jl ##CHUNK 1 end return l end function Base.delete!(l::MutableLinkedList, r::UnitRange) @boundscheck 0 < first(r) < last(r) <= l.len || throw(BoundsError(l, r)) node = l.node for i in 1:first(r) node = node.next end prev = node.prev len = length(r) for j in 1:len node = node.next end next = node prev.next = next next.prev = prev l.len -= len ##CHUNK 2 l2.node.next = l2.node l2.len = 0 return l1 end function Base.append!(l::MutableLinkedList, elts...) for elt in elts for v in elt push!(l, v) end end return l end function Base.delete!(l::MutableLinkedList, r::UnitRange) @boundscheck 0 < first(r) < last(r) <= l.len || throw(BoundsError(l, r)) node = l.node for i in 1:first(r) node = node.next ##CHUNK 3 return l end function Base.pop!(l::MutableLinkedList) isempty(l) && throw(ArgumentError("List must be non-empty")) last = l.node.prev.prev data = l.node.prev.data last.next = l.node l.node.prev = last l.len -= 1 return data end function Base.popfirst!(l::MutableLinkedList) isempty(l) && throw(ArgumentError("List must be non-empty")) first = l.node.next.next data = l.node.next.data first.prev = l.node l.node.next = first l.len -= 1 ##CHUNK 4 end function Base.setindex!(l::MutableLinkedList{T}, data, idx::Int) where T @boundscheck 0 < idx <= l.len || throw(BoundsError(l, idx)) node = l.node for i in 1:idx node = node.next end node.data = convert(T, data) return l end function Base.append!(l1::MutableLinkedList{T}, l2::MutableLinkedList{T}) where T l1.node.prev.next = l2.node.next # l1's last's next is now l2's first l2.node.prev.next = l1.node # l2's last's next is now l1.node l2.node.next.prev = l1.node.prev # l2's first's prev is now l1's last l1.node.prev = l2.node.prev # l1's first's prev is now l2's last l1.len += length(l2) # make l2 empty l2.node.prev = l2.node ##CHUNK 5 end function Base.pushfirst!(l::MutableLinkedList{T}, data) where T oldfirst = l.node.next node = ListNode{T}(data) node.prev = l.node node.next = oldfirst l.node.next = node oldfirst.prev = node l.len += 1 return l end function Base.pop!(l::MutableLinkedList) isempty(l) && throw(ArgumentError("List must be non-empty")) last = l.node.prev.prev data = l.node.prev.data last.next = l.node l.node.prev = last l.len -= 1 ##CHUNK 6 end prev = node.prev len = length(r) for j in 1:len node = node.next end next = node prev.next = next next.prev = prev l.len -= len return l end function Base.push!(l::MutableLinkedList{T}, data) where T oldlast = l.node.prev node = ListNode{T}(data) node.next = l.node node.prev = oldlast l.node.prev = node oldlast.next = node ##CHUNK 7 for i in 1:idx node = node.next end return node.data end function Base.getindex(l::MutableLinkedList{T}, r::UnitRange) where T @boundscheck 0 < first(r) < last(r) <= l.len || throw(BoundsError(l, r)) l2 = MutableLinkedList{T}() node = l.node for i in 1:first(r) node = node.next end len = length(r) for j in 1:len push!(l2, node.data) node = node.next end l2.len = len return l2 ##CHUNK 8 l2 = MutableLinkedList{T}() for h in l push!(l2, h) end return l2 end function Base.getindex(l::MutableLinkedList, idx::Int) @boundscheck 0 < idx <= l.len || throw(BoundsError(l, idx)) node = l.node for i in 1:idx node = node.next end return node.data end function Base.getindex(l::MutableLinkedList{T}, r::UnitRange) where T @boundscheck 0 < first(r) < last(r) <= l.len || throw(BoundsError(l, r)) l2 = MutableLinkedList{T}() node = l.node
189
205
DataStructures.jl
32
function Base.delete!(l::MutableLinkedList, r::UnitRange) @boundscheck 0 < first(r) < last(r) <= l.len || throw(BoundsError(l, r)) node = l.node for i in 1:first(r) node = node.next end prev = node.prev len = length(r) for j in 1:len node = node.next end next = node prev.next = next next.prev = prev l.len -= len return l end
function Base.delete!(l::MutableLinkedList, r::UnitRange) @boundscheck 0 < first(r) < last(r) <= l.len || throw(BoundsError(l, r)) node = l.node for i in 1:first(r) node = node.next end prev = node.prev len = length(r) for j in 1:len node = node.next end next = node prev.next = next next.prev = prev l.len -= len return l end
[ 189, 205 ]
function Base.delete!(l::MutableLinkedList, r::UnitRange) @boundscheck 0 < first(r) < last(r) <= l.len || throw(BoundsError(l, r)) node = l.node for i in 1:first(r) node = node.next end prev = node.prev len = length(r) for j in 1:len node = node.next end next = node prev.next = next next.prev = prev l.len -= len return l end
function Base.delete!(l::MutableLinkedList, r::UnitRange) @boundscheck 0 < first(r) < last(r) <= l.len || throw(BoundsError(l, r)) node = l.node for i in 1:first(r) node = node.next end prev = node.prev len = length(r) for j in 1:len node = node.next end next = node prev.next = next next.prev = prev l.len -= len return l end
Base.delete!
189
205
src/mutable_list.jl
#CURRENT FILE: DataStructures.jl/src/mutable_list.jl ##CHUNK 1 end return l end function Base.delete!(l::MutableLinkedList, idx::Int) @boundscheck 0 < idx <= l.len || throw(BoundsError(l, idx)) node = l.node for i = 1:idx node = node.next end prev = node.prev next = node.next prev.next = next next.prev = prev l.len -= 1 return l end function Base.push!(l::MutableLinkedList{T}, data) where T ##CHUNK 2 l2.node.next = l2.node l2.len = 0 return l1 end function Base.append!(l::MutableLinkedList, elts...) for elt in elts for v in elt push!(l, v) end end return l end function Base.delete!(l::MutableLinkedList, idx::Int) @boundscheck 0 < idx <= l.len || throw(BoundsError(l, idx)) node = l.node for i = 1:idx node = node.next end ##CHUNK 3 node = ListNode{T}(data) node.prev = l.node node.next = oldfirst l.node.next = node oldfirst.prev = node l.len += 1 return l end function Base.pop!(l::MutableLinkedList) isempty(l) && throw(ArgumentError("List must be non-empty")) last = l.node.prev.prev data = l.node.prev.data last.next = l.node l.node.prev = last l.len -= 1 return data end function Base.popfirst!(l::MutableLinkedList) ##CHUNK 4 for i in 1:idx node = node.next end return node.data end function Base.getindex(l::MutableLinkedList{T}, r::UnitRange) where T @boundscheck 0 < first(r) < last(r) <= l.len || throw(BoundsError(l, r)) l2 = MutableLinkedList{T}() node = l.node for i in 1:first(r) node = node.next end len = length(r) for j in 1:len push!(l2, node.data) node = node.next end l2.len = len return l2 ##CHUNK 5 end function Base.setindex!(l::MutableLinkedList{T}, data, idx::Int) where T @boundscheck 0 < idx <= l.len || throw(BoundsError(l, idx)) node = l.node for i in 1:idx node = node.next end node.data = convert(T, data) return l end function Base.append!(l1::MutableLinkedList{T}, l2::MutableLinkedList{T}) where T l1.node.prev.next = l2.node.next # l1's last's next is now l2's first l2.node.prev.next = l1.node # l2's last's next is now l1.node l2.node.next.prev = l1.node.prev # l2's first's prev is now l1's last l1.node.prev = l2.node.prev # l1's first's prev is now l2's last l1.len += length(l2) # make l2 empty l2.node.prev = l2.node ##CHUNK 6 prev = node.prev next = node.next prev.next = next next.prev = prev l.len -= 1 return l end function Base.push!(l::MutableLinkedList{T}, data) where T oldlast = l.node.prev node = ListNode{T}(data) node.next = l.node node.prev = oldlast l.node.prev = node oldlast.next = node l.len += 1 return l end ##CHUNK 7 isempty(l) && throw(ArgumentError("List must be non-empty")) last = l.node.prev.prev data = l.node.prev.data last.next = l.node l.node.prev = last l.len -= 1 return data end function Base.popfirst!(l::MutableLinkedList) isempty(l) && throw(ArgumentError("List must be non-empty")) first = l.node.next.next data = l.node.next.data first.prev = l.node l.node.next = first l.len -= 1 return data end function Base.show(io::IO, node::ListNode) ##CHUNK 8 l2 = MutableLinkedList{T}() for h in l push!(l2, h) end return l2 end function Base.getindex(l::MutableLinkedList, idx::Int) @boundscheck 0 < idx <= l.len || throw(BoundsError(l, idx)) node = l.node for i in 1:idx node = node.next end return node.data end function Base.getindex(l::MutableLinkedList{T}, r::UnitRange) where T @boundscheck 0 < first(r) < last(r) <= l.len || throw(BoundsError(l, r)) l2 = MutableLinkedList{T}() node = l.node ##CHUNK 9 for i in 1:first(r) node = node.next end len = length(r) for j in 1:len push!(l2, node.data) node = node.next end l2.len = len return l2 end function Base.setindex!(l::MutableLinkedList{T}, data, idx::Int) where T @boundscheck 0 < idx <= l.len || throw(BoundsError(l, idx)) node = l.node for i in 1:idx node = node.next end node.data = convert(T, data) return l ##CHUNK 10 function Base.push!(l::MutableLinkedList{T}, data1, data...) where T push!(l, data1) for v in data push!(l, v) end return l end function Base.pushfirst!(l::MutableLinkedList{T}, data) where T oldfirst = l.node.next node = ListNode{T}(data) node.prev = l.node node.next = oldfirst l.node.next = node oldfirst.prev = node l.len += 1 return l end function Base.pop!(l::MutableLinkedList)
125
141
DataStructures.jl
33
function Base.setindex!(h::OrderedRobinDict{K, V}, v0, key0) where {K,V} key = convert(K, key0) v = convert(V, v0) index = get(h.dict, key, -2) if index < 0 _setindex!(h, v0, key0) else @assert haskey(h, key0) @inbounds orig_v = h.vals[index] !isequal(orig_v, v0) && (@inbounds h.vals[index] = v0) end check_for_rehash(h) && rehash!(h) return h end
function Base.setindex!(h::OrderedRobinDict{K, V}, v0, key0) where {K,V} key = convert(K, key0) v = convert(V, v0) index = get(h.dict, key, -2) if index < 0 _setindex!(h, v0, key0) else @assert haskey(h, key0) @inbounds orig_v = h.vals[index] !isequal(orig_v, v0) && (@inbounds h.vals[index] = v0) end check_for_rehash(h) && rehash!(h) return h end
[ 125, 141 ]
function Base.setindex!(h::OrderedRobinDict{K, V}, v0, key0) where {K,V} key = convert(K, key0) v = convert(V, v0) index = get(h.dict, key, -2) if index < 0 _setindex!(h, v0, key0) else @assert haskey(h, key0) @inbounds orig_v = h.vals[index] !isequal(orig_v, v0) && (@inbounds h.vals[index] = v0) end check_for_rehash(h) && rehash!(h) return h end
function Base.setindex!(h::OrderedRobinDict{K, V}, v0, key0) where {K,V} key = convert(K, key0) v = convert(V, v0) index = get(h.dict, key, -2) if index < 0 _setindex!(h, v0, key0) else @assert haskey(h, key0) @inbounds orig_v = h.vals[index] !isequal(orig_v, v0) && (@inbounds h.vals[index] = v0) end check_for_rehash(h) && rehash!(h) return h end
Base.setindex!
125
141
src/ordered_robin_dict.jl
#FILE: DataStructures.jl/src/swiss_dict.jl ##CHUNK 1 return h end function Base.setindex!(h::SwissDict{K,V}, v0, key0) where {K, V} key = convert(K, key0) _setindex!(h, v0, key) end function _setindex!(h::SwissDict{K,V}, v0, key::K) where {K, V} v = convert(V, v0) index, tag = ht_keyindex2!(h, key) if index > 0 h.age += 1 @inbounds h.keys[index] = key @inbounds h.vals[index] = v else _setindex!(h, v, key, -index, tag) end ##CHUNK 2 @inbounds h.keys[index] = key @inbounds h.vals[index] = v else _setindex!(h, v, key, -index, tag) end return v end function Base.getindex(h::SwissDict{K,V}, key) where {K, V} index = ht_keyindex(h, key) @inbounds return (index < 0) ? throw(KeyError(key)) : h.vals[index]::V end """ get(collection, key, default) Return the value stored for the given key, or the given default value if no mapping for the key is present. # Examples #FILE: DataStructures.jl/src/robin_dict.jl ##CHUNK 1 # grow at least 25% if newsz < (oldsz*5)>>2 return d end rehash!(d, newsz) end Base.@propagate_inbounds isslotfilled(h::RobinDict, index) = (h.hashes[index] != 0) Base.@propagate_inbounds isslotempty(h::RobinDict, index) = (h.hashes[index] == 0) function Base.setindex!(h::RobinDict{K,V}, v0, key0) where {K, V} key = convert(K, key0) isequal(key, key0) || throw(ArgumentError("$key0 is not a valid key for type $K")) _setindex!(h, key, v0) end function _setindex!(h::RobinDict{K,V}, key::K, v0) where {K, V} v = convert(V, v0) index = rh_insert!(h, key, v) ##CHUNK 2 function Base.setindex!(h::RobinDict{K,V}, v0, key0) where {K, V} key = convert(K, key0) isequal(key, key0) || throw(ArgumentError("$key0 is not a valid key for type $K")) _setindex!(h, key, v0) end function _setindex!(h::RobinDict{K,V}, key::K, v0) where {K, V} v = convert(V, v0) index = rh_insert!(h, key, v) @assert index > 0 return h end """ empty!(collection) -> collection Remove all elements from a `collection`. # Examples ##CHUNK 3 function Base.delete!(h::RobinDict{K, V}, key0) where {K, V} key = convert(K, key0) index = rh_search(h, key) if index > 0 rh_delete!(h, index) end return h end function get_next_filled(h::RobinDict, i) L = length(h.keys) (1 <= i <= L) || return 0 for j = i:L @inbounds if isslotfilled(h, j) return j end end return 0 end ##CHUNK 4 return _get!(default, h, key) end function _get!(default::Callable, h::RobinDict{K,V}, key::K) where V where K index = rh_search(h, key) index > 0 && return h.vals[index] v = convert(V, default()) rh_insert!(h, key, v) return v end function Base.getindex(h::RobinDict{K, V}, key) where {K, V} index = rh_search(h, key) @inbounds return (index < 0) ? throw(KeyError(key)) : h.vals[index] end """ get(collection, key, default) ##CHUNK 5 v = oldv[i] rh_insert_for_rehash!(h, k, v, oldh[i]) end end return h end function Base.sizehint!(d::RobinDict, newsz::Integer) newsz = _tablesz(newsz*2) # *2 for keys and values in same array oldsz = length(d.keys) # grow at least 25% if newsz < (oldsz*5)>>2 return d end rehash!(d, newsz) end Base.@propagate_inbounds isslotfilled(h::RobinDict, index) = (h.hashes[index] != 0) Base.@propagate_inbounds isslotempty(h::RobinDict, index) = (h.hashes[index] == 0) ##CHUNK 6 ``` """ function Base.getkey(h::RobinDict{K,V}, key, default) where {K, V} index = rh_search(h, key) @inbounds return (index < 0) ? default : h.keys[index]::K end # backward shift deletion by not keeping any tombstones function rh_delete!(h::RobinDict{K, V}, index) where {K, V} @assert index > 0 # this assumes that there is a key/value present in the dictionary at index index0 = index sz = length(h.keys) @inbounds while true index0 = (index0 & (sz - 1)) + 1 if isslotempty(h, index0) || calculate_distance(h, index0) == 0 break end end #CURRENT FILE: DataStructures.jl/src/ordered_robin_dict.jl ##CHUNK 1 return (keysl > (1 + ALLOWABLE_USELESS_GROWTH)*dictl) end function rehash!(h::OrderedRobinDict{K, V}) where {K, V} keys = h.keys vals = h.vals hk = Vector{K}() hv = Vector{V}() for (idx, (k, v)) in enumerate(zip(keys, vals)) if get(h.dict, k, -1) == idx push!(hk, k) push!(hv, v) end end h.keys = hk h.vals = hv for (idx, k) in enumerate(h.keys) ##CHUNK 2 time() end ``` """ function Base.get!(default::Base.Callable, h::OrderedRobinDict{K,V}, key0) where {K,V} index = get(h.dict, key0, -2) index > 0 && return @inbounds h.vals[index] v = convert(V, default()) setindex!(h, v, key0) return v end function Base.getindex(h::OrderedRobinDict{K,V}, key) where {K,V} index = get(h.dict, key, -1) return (index < 0) ? throw(KeyError(key)) : @inbounds h.vals[index]::V end """ get(collection, key, default)
151
171
DataStructures.jl
34
function rehash!(h::OrderedRobinDict{K, V}) where {K, V} keys = h.keys vals = h.vals hk = Vector{K}() hv = Vector{V}() for (idx, (k, v)) in enumerate(zip(keys, vals)) if get(h.dict, k, -1) == idx push!(hk, k) push!(hv, v) end end h.keys = hk h.vals = hv for (idx, k) in enumerate(h.keys) h.dict[k] = idx end return h end
function rehash!(h::OrderedRobinDict{K, V}) where {K, V} keys = h.keys vals = h.vals hk = Vector{K}() hv = Vector{V}() for (idx, (k, v)) in enumerate(zip(keys, vals)) if get(h.dict, k, -1) == idx push!(hk, k) push!(hv, v) end end h.keys = hk h.vals = hv for (idx, k) in enumerate(h.keys) h.dict[k] = idx end return h end
[ 151, 171 ]
function rehash!(h::OrderedRobinDict{K, V}) where {K, V} keys = h.keys vals = h.vals hk = Vector{K}() hv = Vector{V}() for (idx, (k, v)) in enumerate(zip(keys, vals)) if get(h.dict, k, -1) == idx push!(hk, k) push!(hv, v) end end h.keys = hk h.vals = hv for (idx, k) in enumerate(h.keys) h.dict[k] = idx end return h end
function rehash!(h::OrderedRobinDict{K, V}) where {K, V} keys = h.keys vals = h.vals hk = Vector{K}() hv = Vector{V}() for (idx, (k, v)) in enumerate(zip(keys, vals)) if get(h.dict, k, -1) == idx push!(hk, k) push!(hv, v) end end h.keys = hk h.vals = hv for (idx, k) in enumerate(h.keys) h.dict[k] = idx end return h end
rehash!
151
171
src/ordered_robin_dict.jl
#FILE: DataStructures.jl/src/robin_dict.jl ##CHUNK 1 RobinDict{String, Int64}() ``` """ function Base.empty!(h::RobinDict{K,V}) where {K, V} sz = length(h.keys) empty!(h.hashes) empty!(h.keys) empty!(h.vals) resize!(h.keys, sz) resize!(h.vals, sz) resize!(h.hashes, sz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end function rh_search(h::RobinDict{K, V}, key) where {K, V} sz = length(h.keys) chash = hash_key(key) ##CHUNK 2 function RobinDict{K, V}(d::RobinDict{K, V}) where {K, V} RobinDict{K, V}(copy(d.hashes), copy(d.keys), copy(d.vals), d.count, d.idxfloor) end function RobinDict{K,V}(kv) where V where K h = RobinDict{K,V}() for (k,v) in kv h[k] = v end return h end RobinDict{K,V}(p::Pair) where {K,V} = setindex!(RobinDict{K,V}(), p.second, p.first) function RobinDict{K,V}(ps::Pair...) where V where K h = RobinDict{K,V}() sizehint!(h, length(ps)) for p in ps h[p.first] = p.second end return h ##CHUNK 3 keys::Array{K,1} vals::Array{V,1} count::Int idxfloor::Int end function RobinDict{K, V}() where {K, V} n = 16 RobinDict{K, V}(zeros(UInt32, n), Vector{K}(undef, n), Vector{V}(undef, n), 0, 0) end function RobinDict{K, V}(d::RobinDict{K, V}) where {K, V} RobinDict{K, V}(copy(d.hashes), copy(d.keys), copy(d.vals), d.count, d.idxfloor) end function RobinDict{K,V}(kv) where V where K h = RobinDict{K,V}() for (k,v) in kv h[k] = v end ##CHUNK 4 h.keys = Vector{K}(undef, newsz) h.vals = Vector{V}(undef, newsz) h.hashes = zeros(UInt32,newsz) h.count = 0 h.idxfloor = 0 for i = 1:sz @inbounds if oldh[i] != 0 k = oldk[i] v = oldv[i] rh_insert_for_rehash!(h, k, v, oldh[i]) end end return h end function Base.sizehint!(d::RobinDict, newsz::Integer) newsz = _tablesz(newsz*2) # *2 for keys and values in same array oldsz = length(d.keys) ##CHUNK 5 end return index_curr end #rehash! algorithm function rehash!(h::RobinDict{K,V}, newsz = length(h.keys)) where {K, V} oldk = h.keys oldv = h.vals oldh = h.hashes sz = length(oldk) newsz = _tablesz(newsz) if h.count == 0 resize!(h.keys, newsz) resize!(h.vals, newsz) resize!(h.hashes, newsz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end #CURRENT FILE: DataStructures.jl/src/ordered_robin_dict.jl ##CHUNK 1 empty!(h.vals) h.count = 0 return h end function _setindex!(h::OrderedRobinDict, v, key) hk, hv = h.keys, h.vals push!(hk, key) push!(hv, v) nk = length(hk) @inbounds h.dict[key] = Int32(nk) h.count += 1 end function Base.setindex!(h::OrderedRobinDict{K, V}, v0, key0) where {K,V} key = convert(K, key0) v = convert(V, v0) index = get(h.dict, key, -2) if index < 0 ##CHUNK 2 julia> empty!(A); julia> A OrderedRobinDict{String, Int64}() ``` """ function Base.empty!(h::OrderedRobinDict{K,V}) where {K, V} empty!(h.dict) empty!(h.keys) empty!(h.vals) h.count = 0 return h end function _setindex!(h::OrderedRobinDict, v, key) hk, hv = h.keys, h.vals push!(hk, key) push!(hv, v) nk = length(hk) ##CHUNK 3 @inbounds h.dict[key] = Int32(nk) h.count += 1 end function Base.setindex!(h::OrderedRobinDict{K, V}, v0, key0) where {K,V} key = convert(K, key0) v = convert(V, v0) index = get(h.dict, key, -2) if index < 0 _setindex!(h, v0, key0) else @assert haskey(h, key0) @inbounds orig_v = h.vals[index] !isequal(orig_v, v0) && (@inbounds h.vals[index] = v0) end check_for_rehash(h) && rehash!(h) return h ##CHUNK 4 end function get_next_filled_index(h::OrderedRobinDict, index) # get the next filled slot, including index and beyond while (index <= length(h.keys)) isslotfilled(h, index) && return index index += 1 end return -1 end Base.@propagate_inbounds function Base.iterate(h::OrderedRobinDict) isempty(h) && return nothing check_for_rehash(h) && rehash!(h) index = get_first_filled_index(h) return (Pair(h.keys[index], h.vals[index]), index+1) end Base.@propagate_inbounds function Base.iterate(h::OrderedRobinDict, i) length(h.keys) < i && return nothing ##CHUNK 5 "a" => 1 ``` """ function Base.delete!(h::OrderedRobinDict, key) pop!(h, key) return h end function _delete!(h::OrderedRobinDict, index) @inbounds h.dict[h.keys[index]] = -1 h.count -= 1 check_for_rehash(h) ? rehash!(h) : h end function get_first_filled_index(h::OrderedRobinDict) index = 1 while (true) isslotfilled(h, index) && return index index += 1 end
173
183
DataStructures.jl
35
function Base.sizehint!(d::OrderedRobinDict, newsz) oldsz = length(d) # grow at least 25% if newsz < (oldsz*5)>>2 return d end sizehint!(d.keys, newsz) sizehint!(d.vals, newsz) sizehint!(d.dict, newsz) return d end
function Base.sizehint!(d::OrderedRobinDict, newsz) oldsz = length(d) # grow at least 25% if newsz < (oldsz*5)>>2 return d end sizehint!(d.keys, newsz) sizehint!(d.vals, newsz) sizehint!(d.dict, newsz) return d end
[ 173, 183 ]
function Base.sizehint!(d::OrderedRobinDict, newsz) oldsz = length(d) # grow at least 25% if newsz < (oldsz*5)>>2 return d end sizehint!(d.keys, newsz) sizehint!(d.vals, newsz) sizehint!(d.dict, newsz) return d end
function Base.sizehint!(d::OrderedRobinDict, newsz) oldsz = length(d) # grow at least 25% if newsz < (oldsz*5)>>2 return d end sizehint!(d.keys, newsz) sizehint!(d.vals, newsz) sizehint!(d.dict, newsz) return d end
Base.sizehint!
173
183
src/ordered_robin_dict.jl
#FILE: DataStructures.jl/src/swiss_dict.jl ##CHUNK 1 sz = length(h.keys) if h.count*4 < sz && sz > 16 rehash!(h, sz>>1) end end function Base.sizehint!(d::SwissDict, newsz::Integer) newsz = _tablesz(newsz*2) # *2 for keys and values in same array oldsz = length(d.keys) # grow at least 25% if newsz < (oldsz*5)>>2 return d end rehash!(d, newsz) end function rehash!(h::SwissDict{K,V}, newsz = length(h.keys)) where {K, V} olds = h.slots oldk = h.keys oldv = h.vals ##CHUNK 2 # worst-case hysteresis: shrink at 25% vs grow at 30% if all hashes collide. # expected hysteresis is 25% to 42.5%. function maybe_rehash_grow!(h::SwissDict) sz = length(h.keys) if h.count > sz * SWISS_DICT_LOAD_FACTOR || (h.nbfull-1) * 10 > sz * 6 rehash!(h, sz<<2) end end function maybe_rehash_shrink!(h::SwissDict) sz = length(h.keys) if h.count*4 < sz && sz > 16 rehash!(h, sz>>1) end end function Base.sizehint!(d::SwissDict, newsz::Integer) newsz = _tablesz(newsz*2) # *2 for keys and values in same array oldsz = length(d.keys) # grow at least 25% ##CHUNK 3 if newsz < (oldsz*5)>>2 return d end rehash!(d, newsz) end function rehash!(h::SwissDict{K,V}, newsz = length(h.keys)) where {K, V} olds = h.slots oldk = h.keys oldv = h.vals sz = length(oldk) newsz = _tablesz(newsz) (newsz*SWISS_DICT_LOAD_FACTOR) > h.count || (newsz <<= 1) h.age += 1 h.idxfloor = 1 if h.count == 0 resize!(h.slots, newsz>>4) fill!(h.slots, _expand16(0x00)) resize!(h.keys, newsz) resize!(h.vals, newsz) ##CHUNK 4 sz = length(oldk) newsz = _tablesz(newsz) (newsz*SWISS_DICT_LOAD_FACTOR) > h.count || (newsz <<= 1) h.age += 1 h.idxfloor = 1 if h.count == 0 resize!(h.slots, newsz>>4) fill!(h.slots, _expand16(0x00)) resize!(h.keys, newsz) resize!(h.vals, newsz) h.nbfull = 0 return h end nssz = newsz>>4 slots = fill(_expand16(0x00), nssz) keys = Vector{K}(undef, newsz) vals = Vector{V}(undef, newsz) age0 = h.age nbfull = 0 is = _iterslots(h, 1) #FILE: DataStructures.jl/src/robin_dict.jl ##CHUNK 1 end return index_curr end #rehash! algorithm function rehash!(h::RobinDict{K,V}, newsz = length(h.keys)) where {K, V} oldk = h.keys oldv = h.vals oldh = h.hashes sz = length(oldk) newsz = _tablesz(newsz) if h.count == 0 resize!(h.keys, newsz) resize!(h.vals, newsz) resize!(h.hashes, newsz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end ##CHUNK 2 h.keys = Vector{K}(undef, newsz) h.vals = Vector{V}(undef, newsz) h.hashes = zeros(UInt32,newsz) h.count = 0 h.idxfloor = 0 for i = 1:sz @inbounds if oldh[i] != 0 k = oldk[i] v = oldv[i] rh_insert_for_rehash!(h, k, v, oldh[i]) end end return h end function Base.sizehint!(d::RobinDict, newsz::Integer) newsz = _tablesz(newsz*2) # *2 for keys and values in same array oldsz = length(d.keys) ##CHUNK 3 v = oldv[i] rh_insert_for_rehash!(h, k, v, oldh[i]) end end return h end function Base.sizehint!(d::RobinDict, newsz::Integer) newsz = _tablesz(newsz*2) # *2 for keys and values in same array oldsz = length(d.keys) # grow at least 25% if newsz < (oldsz*5)>>2 return d end rehash!(d, newsz) end Base.@propagate_inbounds isslotfilled(h::RobinDict, index) = (h.hashes[index] != 0) Base.@propagate_inbounds isslotempty(h::RobinDict, index) = (h.hashes[index] == 0) ##CHUNK 4 @inbounds h_new.vals[index_curr] = cval @inbounds h_new.keys[index_curr] = ckey @inbounds h_new.hashes[index_curr] = chash @assert probe_current >= 0 if h_new.idxfloor == 0 h_new.idxfloor = index_curr else h_new.idxfloor = min(h_new.idxfloor, index_curr) end return index_curr end #rehash! algorithm function rehash!(h::RobinDict{K,V}, newsz = length(h.keys)) where {K, V} oldk = h.keys oldv = h.vals oldh = h.hashes sz = length(oldk) ##CHUNK 5 RobinDict{String, Int64}() ``` """ function Base.empty!(h::RobinDict{K,V}) where {K, V} sz = length(h.keys) empty!(h.hashes) empty!(h.keys) empty!(h.vals) resize!(h.keys, sz) resize!(h.vals, sz) resize!(h.hashes, sz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end function rh_search(h::RobinDict{K, V}, key) where {K, V} sz = length(h.keys) chash = hash_key(key) ##CHUNK 6 newsz = _tablesz(newsz) if h.count == 0 resize!(h.keys, newsz) resize!(h.vals, newsz) resize!(h.hashes, newsz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end h.keys = Vector{K}(undef, newsz) h.vals = Vector{V}(undef, newsz) h.hashes = zeros(UInt32,newsz) h.count = 0 h.idxfloor = 0 for i = 1:sz @inbounds if oldh[i] != 0 k = oldk[i] #CURRENT FILE: DataStructures.jl/src/ordered_robin_dict.jl
343
353
DataStructures.jl
36
function Base.pop!(h::OrderedRobinDict) check_for_rehash(h) && rehash!(h) index = length(h.keys) while (index > 0) isslotfilled(h, index) && break index -= 1 end index == 0 && rehash!(h) @inbounds key = h.keys[index] return key => _pop!(h, index) end
function Base.pop!(h::OrderedRobinDict) check_for_rehash(h) && rehash!(h) index = length(h.keys) while (index > 0) isslotfilled(h, index) && break index -= 1 end index == 0 && rehash!(h) @inbounds key = h.keys[index] return key => _pop!(h, index) end
[ 343, 353 ]
function Base.pop!(h::OrderedRobinDict) check_for_rehash(h) && rehash!(h) index = length(h.keys) while (index > 0) isslotfilled(h, index) && break index -= 1 end index == 0 && rehash!(h) @inbounds key = h.keys[index] return key => _pop!(h, index) end
function Base.pop!(h::OrderedRobinDict) check_for_rehash(h) && rehash!(h) index = length(h.keys) while (index > 0) isslotfilled(h, index) && break index -= 1 end index == 0 && rehash!(h) @inbounds key = h.keys[index] return key => _pop!(h, index) end
Base.pop!
343
353
src/ordered_robin_dict.jl
#FILE: DataStructures.jl/src/robin_dict.jl ##CHUNK 1 ``` """ function Base.getkey(h::RobinDict{K,V}, key, default) where {K, V} index = rh_search(h, key) @inbounds return (index < 0) ? default : h.keys[index]::K end # backward shift deletion by not keeping any tombstones function rh_delete!(h::RobinDict{K, V}, index) where {K, V} @assert index > 0 # this assumes that there is a key/value present in the dictionary at index index0 = index sz = length(h.keys) @inbounds while true index0 = (index0 & (sz - 1)) + 1 if isslotempty(h, index0) || calculate_distance(h, index0) == 0 break end end ##CHUNK 2 function Base.delete!(h::RobinDict{K, V}, key0) where {K, V} key = convert(K, key0) index = rh_search(h, key) if index > 0 rh_delete!(h, index) end return h end function get_next_filled(h::RobinDict, i) L = length(h.keys) (1 <= i <= L) || return 0 for j = i:L @inbounds if isslotfilled(h, j) return j end end return 0 end ##CHUNK 3 h.keys = Vector{K}(undef, newsz) h.vals = Vector{V}(undef, newsz) h.hashes = zeros(UInt32,newsz) h.count = 0 h.idxfloor = 0 for i = 1:sz @inbounds if oldh[i] != 0 k = oldk[i] v = oldv[i] rh_insert_for_rehash!(h, k, v, oldh[i]) end end return h end function Base.sizehint!(d::RobinDict, newsz::Integer) newsz = _tablesz(newsz*2) # *2 for keys and values in same array oldsz = length(d.keys) #FILE: DataStructures.jl/src/swiss_dict.jl ##CHUNK 1 function Base.pop!(h::SwissDict, key) index = ht_keyindex(h, key) return index > 0 ? _pop!(h, index) : throw(KeyError(key)) end function Base.pop!(h::SwissDict, key, default) index = ht_keyindex(h, key) return index > 0 ? _pop!(h, index) : default end function Base.pop!(h::SwissDict) isempty(h) && throw(ArgumentError("SwissDict must be non-empty")) is = _iterslots(h, h.idxfloor) @assert is !== nothing idx, s = is @inbounds key = h.keys[idx] @inbounds val = h.vals[idx] _delete!(h, idx) h.idxfloor = idx return key => val #FILE: DataStructures.jl/src/priorityqueue.jl ##CHUNK 1 "b" => 3 ``` """ function Base.popfirst!(pq::PriorityQueue) x = pq.xs[1] y = pop!(pq.xs) if !isempty(pq) @inbounds pq.xs[1] = y pq.index[y.first] = 1 percolate_down!(pq, 1) end delete!(pq.index, x.first) return x end function Base.popat!(pq::PriorityQueue, key) idx = pq.index[key] force_up!(pq, idx) popfirst!(pq) end #CURRENT FILE: DataStructures.jl/src/ordered_robin_dict.jl ##CHUNK 1 "a" => 1 ``` """ function Base.delete!(h::OrderedRobinDict, key) pop!(h, key) return h end function _delete!(h::OrderedRobinDict, index) @inbounds h.dict[h.keys[index]] = -1 h.count -= 1 check_for_rehash(h) ? rehash!(h) : h end function get_first_filled_index(h::OrderedRobinDict) index = 1 while (true) isslotfilled(h, index) && return index index += 1 end ##CHUNK 2 end function get_next_filled_index(h::OrderedRobinDict, index) # get the next filled slot, including index and beyond while (index <= length(h.keys)) isslotfilled(h, index) && return index index += 1 end return -1 end Base.@propagate_inbounds function Base.iterate(h::OrderedRobinDict) isempty(h) && return nothing check_for_rehash(h) && rehash!(h) index = get_first_filled_index(h) return (Pair(h.keys[index], h.vals[index]), index+1) end Base.@propagate_inbounds function Base.iterate(h::OrderedRobinDict, i) length(h.keys) < i && return nothing ##CHUNK 3 h.count -= 1 check_for_rehash(h) ? rehash!(h) : h end function get_first_filled_index(h::OrderedRobinDict) index = 1 while (true) isslotfilled(h, index) && return index index += 1 end end function get_next_filled_index(h::OrderedRobinDict, index) # get the next filled slot, including index and beyond while (index <= length(h.keys)) isslotfilled(h, index) && return index index += 1 end return -1 end ##CHUNK 4 julia> empty!(A); julia> A OrderedRobinDict{String, Int64}() ``` """ function Base.empty!(h::OrderedRobinDict{K,V}) where {K, V} empty!(h.dict) empty!(h.keys) empty!(h.vals) h.count = 0 return h end function _setindex!(h::OrderedRobinDict, v, key) hk, hv = h.keys, h.vals push!(hk, key) push!(hv, v) nk = length(hk) ##CHUNK 5 empty!(h.vals) h.count = 0 return h end function _setindex!(h::OrderedRobinDict, v, key) hk, hv = h.keys, h.vals push!(hk, key) push!(hv, v) nk = length(hk) @inbounds h.dict[key] = Int32(nk) h.count += 1 end function Base.setindex!(h::OrderedRobinDict{K, V}, v0, key0) where {K,V} key = convert(K, key0) v = convert(V, v0) index = get(h.dict, key, -2) if index < 0
49
67
DataStructures.jl
37
function PriorityQueue{K,V,O}(o::O, itr) where {K,V,O<:Ordering} xs = Vector{Pair{K,V}}(undef, length(itr)) index = Dict{K, Int}() for (i, (k, v)) in enumerate(itr) xs[i] = Pair{K,V}(k, v) if haskey(index, k) throw(ArgumentError("PriorityQueue keys must be unique")) end index[k] = i end pq = new{K,V,O}(xs, o, index) # heapify for i in heapparent(length(pq.xs)):-1:1 percolate_down!(pq, i) end return pq end
function PriorityQueue{K,V,O}(o::O, itr) where {K,V,O<:Ordering} xs = Vector{Pair{K,V}}(undef, length(itr)) index = Dict{K, Int}() for (i, (k, v)) in enumerate(itr) xs[i] = Pair{K,V}(k, v) if haskey(index, k) throw(ArgumentError("PriorityQueue keys must be unique")) end index[k] = i end pq = new{K,V,O}(xs, o, index) # heapify for i in heapparent(length(pq.xs)):-1:1 percolate_down!(pq, i) end return pq end
[ 49, 67 ]
function PriorityQueue{K,V,O}(o::O, itr) where {K,V,O<:Ordering} xs = Vector{Pair{K,V}}(undef, length(itr)) index = Dict{K, Int}() for (i, (k, v)) in enumerate(itr) xs[i] = Pair{K,V}(k, v) if haskey(index, k) throw(ArgumentError("PriorityQueue keys must be unique")) end index[k] = i end pq = new{K,V,O}(xs, o, index) # heapify for i in heapparent(length(pq.xs)):-1:1 percolate_down!(pq, i) end return pq end
function PriorityQueue{K,V,O}(o::O, itr) where {K,V,O<:Ordering} xs = Vector{Pair{K,V}}(undef, length(itr)) index = Dict{K, Int}() for (i, (k, v)) in enumerate(itr) xs[i] = Pair{K,V}(k, v) if haskey(index, k) throw(ArgumentError("PriorityQueue keys must be unique")) end index[k] = i end pq = new{K,V,O}(xs, o, index) # heapify for i in heapparent(length(pq.xs)):-1:1 percolate_down!(pq, i) end return pq end
PriorityQueue{K,V,O}
49
67
src/priorityqueue.jl
#CURRENT FILE: DataStructures.jl/src/priorityqueue.jl ##CHUNK 1 return default else return pq.xs[i].second end end # Change the priority of an existing element, or enqueue it if it isn't present. function Base.setindex!(pq::PriorityQueue{K, V}, value, key) where {K,V} i = get(pq.index, key, 0) if i != 0 @inbounds oldvalue = pq.xs[i].second pq.xs[i] = Pair{K,V}(key, value) if lt(pq.o, oldvalue, value) percolate_down!(pq, i) else percolate_up!(pq, i) end else push!(pq, key=>value) end ##CHUNK 2 "a" => 2 "b" => 3 ``` """ struct PriorityQueue{K,V,O<:Ordering} <: AbstractDict{K,V} # Binary heap of (element, priority) pairs. xs::Vector{Pair{K,V}} o::O # Map elements to their index in xs index::Dict{K, Int} function PriorityQueue{K,V,O}(o::O) where {K,V,O<:Ordering} new{K,V,O}(Vector{Pair{K,V}}(), o, Dict{K, Int}()) end PriorityQueue{K, V, O}(xs::Vector{Pair{K,V}}, o::O, index::Dict{K, Int}) where {K,V,O<:Ordering} = new(xs, o, index) end ##CHUNK 3 @inbounds oldvalue = pq.xs[i].second pq.xs[i] = Pair{K,V}(key, value) if lt(pq.o, oldvalue, value) percolate_down!(pq, i) else percolate_up!(pq, i) end else push!(pq, key=>value) end return value end """ push!(pq::PriorityQueue{K,V}, pair::Pair{K,V}) where {K,V} Insert the a key `k` into a priority queue `pq` with priority `v`. # Examples ##CHUNK 4 "a" => 1 "b" => 2 "c" => 3 "d" => 4 "e" => 5 ``` """ function Base.push!(pq::PriorityQueue{K,V}, pair::Pair{K,V}) where {K,V} key = pair.first if haskey(pq, key) throw(ArgumentError("PriorityQueue keys must be unique")) end push!(pq.xs, pair) pq.index[key] = length(pq) percolate_up!(pq, length(pq)) return pq end Base.push!(pq::PriorityQueue{K,V}, kv::Pair) where {K,V} = push!(pq, Pair{K,V}(kv.first, kv.second)) ##CHUNK 5 PriorityQueue{K,V}(ps::Pair...) where {K,V} = PriorityQueue{K,V,ForwardOrdering}(Forward, ps) PriorityQueue{K,V}(o::Ord, ps::Pair...) where {K,V,Ord<:Ordering} = PriorityQueue{K,V,Ord}(o, ps) # Construction specifying Key/Value types # e.g., PriorityQueue{Int,Float64}([1=>1, 2=>2.0]) PriorityQueue{K,V}(kv) where {K,V} = PriorityQueue{K,V}(Forward, kv) function PriorityQueue{K,V}(o::Ord, kv) where {K,V,Ord<:Ordering} try PriorityQueue{K,V,Ord}(o, kv) catch e if not_iterator_of_pairs(kv) throw(ArgumentError("PriorityQueue(kv): kv needs to be an iterator of tuples or pairs")) else rethrow(e) end end end # Construction inferring Key/Value types from input # e.g. PriorityQueue{} ##CHUNK 6 function Base.merge!(combine::Function, d::AbstractDict, other::PriorityQueue) next = iterate(other, false) while next !== nothing (k, v), state = next d[k] = haskey(d, k) ? combine(d[k], v) : v next = iterate(other, state) end return d end # Opaque not to be exported. mutable struct _PQIteratorState{K, V, O <: Ordering} pq::PriorityQueue{K, V, O} _PQIteratorState{K, V, O}(pq::PriorityQueue{K, V, O}) where {K, V, O <: Ordering} = new(pq) end _PQIteratorState(pq::PriorityQueue{K, V, O}) where {K, V, O <: Ordering} = _PQIteratorState{K, V, O}(pq) # Unordered iteration through key value pairs in a PriorityQueue ##CHUNK 7 end end pq.index[x.first] = i pq.xs[i] = x end # Equivalent to percolate_up! with an element having lower priority than any other function force_up!(pq::PriorityQueue, i::Integer) x = pq.xs[i] @inbounds while i > 1 j = heapparent(i) pq.index[pq.xs[j].first] = i pq.xs[i] = pq.xs[j] i = j end pq.index[x.first] = i pq.xs[i] = x end Base.getindex(pq::PriorityQueue, key) = pq.xs[pq.index[key]].second ##CHUNK 8 # O(n) iteration. function _iterate(pq::PriorityQueue, state) (k, idx), i = state return (pq.xs[idx], i) end _iterate(pq::PriorityQueue, ::Nothing) = nothing Base.iterate(pq::PriorityQueue, ::Nothing) = nothing function Base.iterate(pq::PriorityQueue, ordered::Bool=true) if ordered isempty(pq) && return nothing state = _PQIteratorState(PriorityQueue(copy(pq.xs), pq.o, copy(pq.index))) return popfirst!(state.pq), state else _iterate(pq, iterate(pq.index)) end end function Base.iterate(pq::PriorityQueue, state::_PQIteratorState) ##CHUNK 9 # Opaque not to be exported. mutable struct _PQIteratorState{K, V, O <: Ordering} pq::PriorityQueue{K, V, O} _PQIteratorState{K, V, O}(pq::PriorityQueue{K, V, O}) where {K, V, O <: Ordering} = new(pq) end _PQIteratorState(pq::PriorityQueue{K, V, O}) where {K, V, O <: Ordering} = _PQIteratorState{K, V, O}(pq) # Unordered iteration through key value pairs in a PriorityQueue # O(n) iteration. function _iterate(pq::PriorityQueue, state) (k, idx), i = state return (pq.xs[idx], i) end _iterate(pq::PriorityQueue, ::Nothing) = nothing Base.iterate(pq::PriorityQueue, ::Nothing) = nothing function Base.iterate(pq::PriorityQueue, ordered::Bool=true) ##CHUNK 10 # A copy constructor PriorityQueue(xs::Vector{Pair{K,V}}, o::O, index::Dict{K, Int}) where {K,V,O<:Ordering} = PriorityQueue{K,V,O}(xs, o, index) # Any-Any constructors PriorityQueue(o::Ordering=Forward) = PriorityQueue{Any,Any,typeof(o)}(o) # Construction from Pairs PriorityQueue(ps::Pair...) = PriorityQueue(Forward, ps) PriorityQueue(o::Ordering, ps::Pair...) = PriorityQueue(o, ps) PriorityQueue{K,V}(ps::Pair...) where {K,V} = PriorityQueue{K,V,ForwardOrdering}(Forward, ps) PriorityQueue{K,V}(o::Ord, ps::Pair...) where {K,V,Ord<:Ordering} = PriorityQueue{K,V,Ord}(o, ps) # Construction specifying Key/Value types # e.g., PriorityQueue{Int,Float64}([1=>1, 2=>2.0]) PriorityQueue{K,V}(kv) where {K,V} = PriorityQueue{K,V}(Forward, kv) function PriorityQueue{K,V}(o::Ord, kv) where {K,V,Ord<:Ordering} try PriorityQueue{K,V,Ord}(o, kv) catch e
237
251
DataStructures.jl
38
function Base.setindex!(pq::PriorityQueue{K, V}, value, key) where {K,V} i = get(pq.index, key, 0) if i != 0 @inbounds oldvalue = pq.xs[i].second pq.xs[i] = Pair{K,V}(key, value) if lt(pq.o, oldvalue, value) percolate_down!(pq, i) else percolate_up!(pq, i) end else push!(pq, key=>value) end return value end
function Base.setindex!(pq::PriorityQueue{K, V}, value, key) where {K,V} i = get(pq.index, key, 0) if i != 0 @inbounds oldvalue = pq.xs[i].second pq.xs[i] = Pair{K,V}(key, value) if lt(pq.o, oldvalue, value) percolate_down!(pq, i) else percolate_up!(pq, i) end else push!(pq, key=>value) end return value end
[ 237, 251 ]
function Base.setindex!(pq::PriorityQueue{K, V}, value, key) where {K,V} i = get(pq.index, key, 0) if i != 0 @inbounds oldvalue = pq.xs[i].second pq.xs[i] = Pair{K,V}(key, value) if lt(pq.o, oldvalue, value) percolate_down!(pq, i) else percolate_up!(pq, i) end else push!(pq, key=>value) end return value end
function Base.setindex!(pq::PriorityQueue{K, V}, value, key) where {K,V} i = get(pq.index, key, 0) if i != 0 @inbounds oldvalue = pq.xs[i].second pq.xs[i] = Pair{K,V}(key, value) if lt(pq.o, oldvalue, value) percolate_down!(pq, i) else percolate_up!(pq, i) end else push!(pq, key=>value) end return value end
Base.setindex!
237
251
src/priorityqueue.jl
#CURRENT FILE: DataStructures.jl/src/priorityqueue.jl ##CHUNK 1 break end end pq.index[x.first] = i pq.xs[i] = x end function percolate_up!(pq::PriorityQueue, i::Integer) x = pq.xs[i] @inbounds while i > 1 j = heapparent(i) xj = pq.xs[j] if lt(pq.o, x.second, xj.second) pq.index[xj.first] = i pq.xs[i] = xj i = j else break end ##CHUNK 2 x = pq.xs[i] @inbounds while (l = heapleft(i)) <= length(pq) r = heapright(i) j = r > length(pq) || lt(pq.o, pq.xs[l].second, pq.xs[r].second) ? l : r xj = pq.xs[j] if lt(pq.o, xj.second, x.second) pq.index[xj.first] = i pq.xs[i] = xj i = j else break end end pq.index[x.first] = i pq.xs[i] = x end function percolate_up!(pq::PriorityQueue, i::Integer) x = pq.xs[i] ##CHUNK 3 @inbounds while i > 1 j = heapparent(i) xj = pq.xs[j] if lt(pq.o, x.second, xj.second) pq.index[xj.first] = i pq.xs[i] = xj i = j else break end end pq.index[x.first] = i pq.xs[i] = x end # Equivalent to percolate_up! with an element having lower priority than any other function force_up!(pq::PriorityQueue, i::Integer) x = pq.xs[i] @inbounds while i > 1 j = heapparent(i) ##CHUNK 4 index = Dict{K, Int}() for (i, (k, v)) in enumerate(itr) xs[i] = Pair{K,V}(k, v) if haskey(index, k) throw(ArgumentError("PriorityQueue keys must be unique")) end index[k] = i end pq = new{K,V,O}(xs, o, index) # heapify for i in heapparent(length(pq.xs)):-1:1 percolate_down!(pq, i) end return pq end end # A copy constructor ##CHUNK 5 """ first(pq::PriorityQueue) Return the lowest priority pair (`k`, `v`) from `pq` without removing it from the priority queue. """ Base.first(pq::PriorityQueue) = first(pq.xs) function percolate_down!(pq::PriorityQueue, i::Integer) x = pq.xs[i] @inbounds while (l = heapleft(i)) <= length(pq) r = heapright(i) j = r > length(pq) || lt(pq.o, pq.xs[l].second, pq.xs[r].second) ? l : r xj = pq.xs[j] if lt(pq.o, xj.second, x.second) pq.index[xj.first] = i pq.xs[i] = xj i = j else ##CHUNK 6 y = pop!(pq.xs) if !isempty(pq) @inbounds pq.xs[1] = y pq.index[y.first] = 1 percolate_down!(pq, 1) end delete!(pq.index, x.first) return x end function Base.popat!(pq::PriorityQueue, key) idx = pq.index[key] force_up!(pq, idx) popfirst!(pq) end """ delete!(pq::PriorityQueue, key) Delete the mapping for the given `key` in a priority queue `pq` and return the priority queue. ##CHUNK 7 end pq.index[x.first] = i pq.xs[i] = x end # Equivalent to percolate_up! with an element having lower priority than any other function force_up!(pq::PriorityQueue, i::Integer) x = pq.xs[i] @inbounds while i > 1 j = heapparent(i) pq.index[pq.xs[j].first] = i pq.xs[i] = pq.xs[j] i = j end pq.index[x.first] = i pq.xs[i] = x end Base.getindex(pq::PriorityQueue, key) = pq.xs[pq.index[key]].second ##CHUNK 8 "c" => 1 julia> a PriorityQueue{String, Int64, Base.Order.ForwardOrdering} with 2 entries: "a" => 2 "b" => 3 ``` """ function Base.popfirst!(pq::PriorityQueue) x = pq.xs[1] y = pop!(pq.xs) if !isempty(pq) @inbounds pq.xs[1] = y pq.index[y.first] = 1 percolate_down!(pq, 1) end delete!(pq.index, x.first) return x end ##CHUNK 9 function Base.get(pq::PriorityQueue, key, default) i = get(pq.index, key, 0) i == 0 ? default : pq.xs[i].second end function Base.get!(pq::PriorityQueue, key, default) i = get(pq.index, key, 0) if i == 0 push!(pq, key=>default) return default else return pq.xs[i].second end end # Change the priority of an existing element, or enqueue it if it isn't present. """ push!(pq::PriorityQueue{K,V}, pair::Pair{K,V}) where {K,V} ##CHUNK 10 """ function Base.push!(pq::PriorityQueue{K,V}, pair::Pair{K,V}) where {K,V} key = pair.first if haskey(pq, key) throw(ArgumentError("PriorityQueue keys must be unique")) end push!(pq.xs, pair) pq.index[key] = length(pq) percolate_up!(pq, length(pq)) return pq end Base.push!(pq::PriorityQueue{K,V}, kv::Pair) where {K,V} = push!(pq, Pair{K,V}(kv.first, kv.second)) """ popfirst!(pq::PriorityQueue) Remove and return the lowest priority key and value from a priority queue `pq` as a pair.
277
287
DataStructures.jl
39
function Base.push!(pq::PriorityQueue{K,V}, pair::Pair{K,V}) where {K,V} key = pair.first if haskey(pq, key) throw(ArgumentError("PriorityQueue keys must be unique")) end push!(pq.xs, pair) pq.index[key] = length(pq) percolate_up!(pq, length(pq)) return pq end
function Base.push!(pq::PriorityQueue{K,V}, pair::Pair{K,V}) where {K,V} key = pair.first if haskey(pq, key) throw(ArgumentError("PriorityQueue keys must be unique")) end push!(pq.xs, pair) pq.index[key] = length(pq) percolate_up!(pq, length(pq)) return pq end
[ 277, 287 ]
function Base.push!(pq::PriorityQueue{K,V}, pair::Pair{K,V}) where {K,V} key = pair.first if haskey(pq, key) throw(ArgumentError("PriorityQueue keys must be unique")) end push!(pq.xs, pair) pq.index[key] = length(pq) percolate_up!(pq, length(pq)) return pq end
function Base.push!(pq::PriorityQueue{K,V}, pair::Pair{K,V}) where {K,V} key = pair.first if haskey(pq, key) throw(ArgumentError("PriorityQueue keys must be unique")) end push!(pq.xs, pair) pq.index[key] = length(pq) percolate_up!(pq, length(pq)) return pq end
Base.push!
277
287
src/priorityqueue.jl
#FILE: DataStructures.jl/test/test_priority_queue.jl ##CHUNK 1 ks, vs = 1:n, rand(1:pmax, n) pq = PriorityQueue(zip(ks, vs)) @test_throws ArgumentError push!(pq, 1=>10) end @testset "Iteration" begin pq = PriorityQueue(priorities) pq2 = PriorityQueue() for kv in pq push!(pq2, kv) end @test pq == pq2 end @testset "enqueing pairs via push!" begin pq = PriorityQueue() for kv in priorities push!(pq, kv) end test_issorted!(pq, priorities) ##CHUNK 2 ks, vs = 1:n, rand(1:pmax, n) priorities = Dict(zip(ks, vs)) @testset "first" begin pq1 = PriorityQueue(priorities) lowpri = findmin(vs) @test first(pq1)[2] == pq1[ks[lowpri[2]]] end @testset "enqueue error throw" begin ks, vs = 1:n, rand(1:pmax, n) pq = PriorityQueue(zip(ks, vs)) @test_throws ArgumentError push!(pq, 1=>10) end @testset "Iteration" begin pq = PriorityQueue(priorities) pq2 = PriorityQueue() for kv in pq push!(pq2, kv) ##CHUNK 3 for (k, v) in priorities pq[k] = v end for _ in 1:n k = rand(1:n) v = rand(1:pmax) pq[k] = v priorities[k] = v end test_issorted!(pq, priorities) end @testset "dequeuing" begin pq = PriorityQueue(priorities) @test_throws KeyError popat!(pq, 0) v, _ = popat!(pq, 10) @test v == 10 #CURRENT FILE: DataStructures.jl/src/priorityqueue.jl ##CHUNK 1 index = Dict{K, Int}() for (i, (k, v)) in enumerate(itr) xs[i] = Pair{K,V}(k, v) if haskey(index, k) throw(ArgumentError("PriorityQueue keys must be unique")) end index[k] = i end pq = new{K,V,O}(xs, o, index) # heapify for i in heapparent(length(pq.xs)):-1:1 percolate_down!(pq, i) end return pq end end # A copy constructor ##CHUNK 2 pq.xs[i] = Pair{K,V}(key, value) if lt(pq.o, oldvalue, value) percolate_down!(pq, i) else percolate_up!(pq, i) end else push!(pq, key=>value) end return value end """ push!(pq::PriorityQueue{K,V}, pair::Pair{K,V}) where {K,V} Insert the a key `k` into a priority queue `pq` with priority `v`. # Examples ```jldoctest ##CHUNK 3 else return pq.xs[i].second end end # Change the priority of an existing element, or enqueue it if it isn't present. function Base.setindex!(pq::PriorityQueue{K, V}, value, key) where {K,V} i = get(pq.index, key, 0) if i != 0 @inbounds oldvalue = pq.xs[i].second pq.xs[i] = Pair{K,V}(key, value) if lt(pq.o, oldvalue, value) percolate_down!(pq, i) else percolate_up!(pq, i) end else push!(pq, key=>value) end return value ##CHUNK 4 """ first(pq::PriorityQueue) Return the lowest priority pair (`k`, `v`) from `pq` without removing it from the priority queue. """ Base.first(pq::PriorityQueue) = first(pq.xs) function percolate_down!(pq::PriorityQueue, i::Integer) x = pq.xs[i] @inbounds while (l = heapleft(i)) <= length(pq) r = heapright(i) j = r > length(pq) || lt(pq.o, pq.xs[l].second, pq.xs[r].second) ? l : r xj = pq.xs[j] if lt(pq.o, xj.second, x.second) pq.index[xj.first] = i pq.xs[i] = xj i = j else ##CHUNK 5 ``` """ function Base.popfirst!(pq::PriorityQueue) x = pq.xs[1] y = pop!(pq.xs) if !isempty(pq) @inbounds pq.xs[1] = y pq.index[y.first] = 1 percolate_down!(pq, 1) end delete!(pq.index, x.first) return x end function Base.popat!(pq::PriorityQueue, key) idx = pq.index[key] force_up!(pq, idx) popfirst!(pq) end ##CHUNK 6 pq.index[pq.xs[j].first] = i pq.xs[i] = pq.xs[j] i = j end pq.index[x.first] = i pq.xs[i] = x end Base.getindex(pq::PriorityQueue, key) = pq.xs[pq.index[key]].second function Base.get(pq::PriorityQueue, key, default) i = get(pq.index, key, 0) i == 0 ? default : pq.xs[i].second end function Base.get!(pq::PriorityQueue, key, default) i = get(pq.index, key, 0) if i == 0 push!(pq, key=>default) return default ##CHUNK 7 function Base.get(pq::PriorityQueue, key, default) i = get(pq.index, key, 0) i == 0 ? default : pq.xs[i].second end function Base.get!(pq::PriorityQueue, key, default) i = get(pq.index, key, 0) if i == 0 push!(pq, key=>default) return default else return pq.xs[i].second end end # Change the priority of an existing element, or enqueue it if it isn't present. function Base.setindex!(pq::PriorityQueue{K, V}, value, key) where {K,V} i = get(pq.index, key, 0) if i != 0 @inbounds oldvalue = pq.xs[i].second
314
324
DataStructures.jl
40
function Base.popfirst!(pq::PriorityQueue) x = pq.xs[1] y = pop!(pq.xs) if !isempty(pq) @inbounds pq.xs[1] = y pq.index[y.first] = 1 percolate_down!(pq, 1) end delete!(pq.index, x.first) return x end
function Base.popfirst!(pq::PriorityQueue) x = pq.xs[1] y = pop!(pq.xs) if !isempty(pq) @inbounds pq.xs[1] = y pq.index[y.first] = 1 percolate_down!(pq, 1) end delete!(pq.index, x.first) return x end
[ 314, 324 ]
function Base.popfirst!(pq::PriorityQueue) x = pq.xs[1] y = pop!(pq.xs) if !isempty(pq) @inbounds pq.xs[1] = y pq.index[y.first] = 1 percolate_down!(pq, 1) end delete!(pq.index, x.first) return x end
function Base.popfirst!(pq::PriorityQueue) x = pq.xs[1] y = pop!(pq.xs) if !isempty(pq) @inbounds pq.xs[1] = y pq.index[y.first] = 1 percolate_down!(pq, 1) end delete!(pq.index, x.first) return x end
Base.popfirst!
314
324
src/priorityqueue.jl
#FILE: DataStructures.jl/src/heaps/arrays_as_heaps.jl ##CHUNK 1 @inline percolate_up!(xs::AbstractArray, i::Integer, o::Ordering) = percolate_up!(xs, i, xs[i], o) """ heappop!(v, [ord]) Given a binary heap-ordered array, remove and return the lowest ordered element. For efficiency, this function does not check that the array is indeed heap-ordered. """ function heappop!(xs::AbstractArray, o::Ordering=Forward) x = xs[1] y = pop!(xs) if !isempty(xs) percolate_down!(xs, 1, y, o) end return x end """ heappush!(v, x, [ord]) ##CHUNK 2 x = xs[1] y = pop!(xs) if !isempty(xs) percolate_down!(xs, 1, y, o) end return x end """ heappush!(v, x, [ord]) Given a binary heap-ordered array, push a new element `x`, preserving the heap property. For efficiency, this function does not check that the array is indeed heap-ordered. """ @inline function heappush!(xs::AbstractArray, x, o::Ordering=Forward) push!(xs, x) percolate_up!(xs, length(xs), o) return xs end #FILE: DataStructures.jl/src/deque.jl ##CHUNK 1 d.len -= 1 return x end """ popfirst!(d::Deque{T}) where T Remove the element at the front of deque `d`. """ function Base.popfirst!(d::Deque{T}) where T isempty(d) && throw(ArgumentError("Deque must be non-empty")) head = d.head @assert head.back >= head.front @inbounds x = head.data[head.front] Base._unsetindex!(head.data, head.front) # see issue/884 head.front += 1 if head.back < head.front if d.nblocks > 1 # release and detach the head block #FILE: DataStructures.jl/src/queue.jl ##CHUNK 1 Get the last element in queue `q`. """ Base.last(s::Queue) = last(s.store) """ push!(q::Queue, x) Inserts the value `x` to the end of the queue `q`. """ function Base.push!(q::Queue, x) push!(q.store, x) return q end """ popfirst!(q::Queue) Removes an element from the front of the queue `q` and returns it. """ Base.popfirst!(s::Queue) = popfirst!(s.store) #FILE: DataStructures.jl/src/circ_deque.jl ##CHUNK 1 D.n += 1 tmp = D.first - 1 D.first = ifelse(tmp < 1, D.capacity, tmp) @inbounds D.buffer[D.first] = v D end """ popfirst!(D::CircularDeque) Remove the element at the front. """ @inline Base.@propagate_inbounds function Base.popfirst!(D::CircularDeque) v = first(D) Base._unsetindex!(D.buffer, D.first) # see issue/884 D.n -= 1 tmp = D.first + 1 D.first = ifelse(tmp > D.capacity, 1, tmp) v end #CURRENT FILE: DataStructures.jl/src/priorityqueue.jl ##CHUNK 1 break end end pq.index[x.first] = i pq.xs[i] = x end function percolate_up!(pq::PriorityQueue, i::Integer) x = pq.xs[i] @inbounds while i > 1 j = heapparent(i) xj = pq.xs[j] if lt(pq.o, x.second, xj.second) pq.index[xj.first] = i pq.xs[i] = xj i = j else break end ##CHUNK 2 """ first(pq::PriorityQueue) Return the lowest priority pair (`k`, `v`) from `pq` without removing it from the priority queue. """ Base.first(pq::PriorityQueue) = first(pq.xs) function percolate_down!(pq::PriorityQueue, i::Integer) x = pq.xs[i] @inbounds while (l = heapleft(i)) <= length(pq) r = heapright(i) j = r > length(pq) || lt(pq.o, pq.xs[l].second, pq.xs[r].second) ? l : r xj = pq.xs[j] if lt(pq.o, xj.second, x.second) pq.index[xj.first] = i pq.xs[i] = xj i = j else ##CHUNK 3 @inbounds while i > 1 j = heapparent(i) xj = pq.xs[j] if lt(pq.o, x.second, xj.second) pq.index[xj.first] = i pq.xs[i] = xj i = j else break end end pq.index[x.first] = i pq.xs[i] = x end # Equivalent to percolate_up! with an element having lower priority than any other function force_up!(pq::PriorityQueue, i::Integer) x = pq.xs[i] @inbounds while i > 1 j = heapparent(i) ##CHUNK 4 x = pq.xs[i] @inbounds while (l = heapleft(i)) <= length(pq) r = heapright(i) j = r > length(pq) || lt(pq.o, pq.xs[l].second, pq.xs[r].second) ? l : r xj = pq.xs[j] if lt(pq.o, xj.second, x.second) pq.index[xj.first] = i pq.xs[i] = xj i = j else break end end pq.index[x.first] = i pq.xs[i] = x end function percolate_up!(pq::PriorityQueue, i::Integer) x = pq.xs[i] ##CHUNK 5 else return pq.xs[i].second end end # Change the priority of an existing element, or enqueue it if it isn't present. function Base.setindex!(pq::PriorityQueue{K, V}, value, key) where {K,V} i = get(pq.index, key, 0) if i != 0 @inbounds oldvalue = pq.xs[i].second pq.xs[i] = Pair{K,V}(key, value) if lt(pq.o, oldvalue, value) percolate_down!(pq, i) else percolate_up!(pq, i) end else push!(pq, key=>value) end return value
54
64
DataStructures.jl
41
function search_node(tree::RBTree{K}, d::K) where K node = tree.root while node !== tree.nil && d != node.data if d < node.data node = node.leftChild else node = node.rightChild end end return node end
function search_node(tree::RBTree{K}, d::K) where K node = tree.root while node !== tree.nil && d != node.data if d < node.data node = node.leftChild else node = node.rightChild end end return node end
[ 54, 64 ]
function search_node(tree::RBTree{K}, d::K) where K node = tree.root while node !== tree.nil && d != node.data if d < node.data node = node.leftChild else node = node.rightChild end end return node end
function search_node(tree::RBTree{K}, d::K) where K node = tree.root while node !== tree.nil && d != node.data if d < node.data node = node.leftChild else node = node.rightChild end end return node end
search_node
54
64
src/red_black_tree.jl
#FILE: DataStructures.jl/src/avl_tree.jl ##CHUNK 1 return node end function search_node(tree::AVLTree{K}, d::K) where K prev = nothing node = tree.root while node != nothing && node.data != nothing && node.data != d prev = node if d < node.data node = node.leftChild else node = node.rightChild end end return (node == nothing) ? prev : node end """ ##CHUNK 2 if balance < -1 if key > node.rightChild.data return left_rotate(node) else node.rightChild = right_rotate(node.rightChild) return left_rotate(node) end end return node end function Base.insert!(tree::AVLTree{K}, d::K) where K haskey(tree, d) && return tree tree.root = insert_node(tree.root, d) tree.count += 1 return tree end ##CHUNK 3 """ minimum_node(tree::AVLTree, node::AVLTreeNode) Returns the AVLTreeNode with minimum value in subtree of `node`. """ function minimum_node(node::Union{AVLTreeNode, Nothing}) while node != nothing && node.leftChild != nothing node = node.leftChild end return node end function search_node(tree::AVLTree{K}, d::K) where K prev = nothing node = tree.root while node != nothing && node.data != nothing && node.data != d prev = node if d < node.data #FILE: DataStructures.jl/src/splay_tree.jl ##CHUNK 1 x = maximum_node(s) splay!(tree, x) x.rightChild = t t.parent = x return x end end function search_node(tree::SplayTree{K}, d::K) where K node = tree.root prev = nothing while node != nothing && node.data != d prev = node if node.data < d node = node.rightChild else node = node.leftChild end end return (node == nothing) ? prev : node ##CHUNK 2 node = SplayTreeNode{K}(d) y = nothing x = tree.root while x !== nothing y = x if node.data > x.data x = x.rightChild else x = x.leftChild end end node.parent = y if y === nothing tree.root = node elseif node.data < y.data y.leftChild = node else y.rightChild = node ##CHUNK 3 prev = nothing while node != nothing && node.data != d prev = node if node.data < d node = node.rightChild else node = node.leftChild end end return (node == nothing) ? prev : node end function Base.haskey(tree::SplayTree{K}, d::K) where K node = tree.root if node === nothing return false else node = search_node(tree, d) (node === nothing) && return false is_found = (node.data == d) #CURRENT FILE: DataStructures.jl/src/red_black_tree.jl ##CHUNK 1 z = tree.nil node = tree.root while node !== tree.nil if node.data == d z = node end if d < node.data node = node.leftChild else node = node.rightChild end end (z === tree.nil) && return tree y = z y_original_color = y.color x = RBTreeNode{K}() ##CHUNK 2 node_y = nothing node_x = tree.root while node_x !== tree.nil node_y = node_x if node.data < node_x.data node_x = node_x.leftChild else node_x = node_x.rightChild end end node.parent = node_y if node_y == nothing tree.root = node elseif node.data < node_y.data node_y.leftChild = node else node_y.rightChild = node end ##CHUNK 3 node = search_node(tree, d) return (node.data == d) end """ insert_node!(tree::RBTree, node::RBTreeNode) Inserts `node` at proper location by traversing through the `tree` in a binary-search-tree fashion. """ function insert_node!(tree::RBTree, node::RBTreeNode) node_y = nothing node_x = tree.root while node_x !== tree.nil node_y = node_x if node.data < node_x.data node_x = node_x.leftChild else node_x = node_x.rightChild end ##CHUNK 4 end return node end """ delete!(tree::RBTree, key) Deletes `key` from `tree`, if present, else returns the unmodified tree. """ function Base.delete!(tree::RBTree{K}, d::K) where K z = tree.nil node = tree.root while node !== tree.nil if node.data == d z = node end if d < node.data node = node.leftChild
211
230
DataStructures.jl
42
function Base.insert!(tree::RBTree{K}, d::K) where K # if the key exists in the tree, no need to insert haskey(tree, d) && return tree # insert, if not present in the tree node = RBTreeNode{K}(d) node.leftChild = node.rightChild = tree.nil insert_node!(tree, node) if node.parent == nothing node.color = false elseif node.parent.parent == nothing ; else fix_insert!(tree, node) end tree.count += 1 return tree end
function Base.insert!(tree::RBTree{K}, d::K) where K # if the key exists in the tree, no need to insert haskey(tree, d) && return tree # insert, if not present in the tree node = RBTreeNode{K}(d) node.leftChild = node.rightChild = tree.nil insert_node!(tree, node) if node.parent == nothing node.color = false elseif node.parent.parent == nothing ; else fix_insert!(tree, node) end tree.count += 1 return tree end
[ 211, 230 ]
function Base.insert!(tree::RBTree{K}, d::K) where K # if the key exists in the tree, no need to insert haskey(tree, d) && return tree # insert, if not present in the tree node = RBTreeNode{K}(d) node.leftChild = node.rightChild = tree.nil insert_node!(tree, node) if node.parent == nothing node.color = false elseif node.parent.parent == nothing ; else fix_insert!(tree, node) end tree.count += 1 return tree end
function Base.insert!(tree::RBTree{K}, d::K) where K # if the key exists in the tree, no need to insert haskey(tree, d) && return tree # insert, if not present in the tree node = RBTreeNode{K}(d) node.leftChild = node.rightChild = tree.nil insert_node!(tree, node) if node.parent == nothing node.color = false elseif node.parent.parent == nothing ; else fix_insert!(tree, node) end tree.count += 1 return tree end
Base.insert!
211
230
src/red_black_tree.jl
#FILE: DataStructures.jl/src/splay_tree.jl ##CHUNK 1 s = x s.rightChild = nothing if s.leftChild !== nothing s.leftChild.parent = nothing end tree.root = _join!(tree, s.leftChild, t) tree.count -= 1 return tree end function Base.push!(tree::SplayTree{K}, d0) where K d = convert(K, d0) is_present = search_node(tree, d) if (is_present !== nothing) && (is_present.data == d) return tree end # only unique keys are inserted #FILE: DataStructures.jl/src/avl_tree.jl ##CHUNK 1 return node end function Base.insert!(tree::AVLTree{K}, d::K) where K haskey(tree, d) && return tree tree.root = insert_node(tree.root, d) tree.count += 1 return tree end """ push!(tree::AVLTree{K}, key) where K Insert `key` in AVL tree `tree`. """ function Base.push!(tree::AVLTree{K}, key) where K key0 = convert(K, key) insert!(tree, key0) end ##CHUNK 2 """ push!(tree::AVLTree{K}, key) where K Insert `key` in AVL tree `tree`. """ function Base.push!(tree::AVLTree{K}, key) where K key0 = convert(K, key) insert!(tree, key0) end function delete_node!(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = delete_node!(node.leftChild, key) elseif key > node.data node.rightChild = delete_node!(node.rightChild, key) else if node.leftChild == nothing result = node.rightChild return result ##CHUNK 3 if balance < -1 if key > node.rightChild.data return left_rotate(node) else node.rightChild = right_rotate(node.rightChild) return left_rotate(node) end end return node end function Base.insert!(tree::AVLTree{K}, d::K) where K haskey(tree, d) && return tree tree.root = insert_node(tree.root, d) tree.count += 1 return tree end #CURRENT FILE: DataStructures.jl/src/red_black_tree.jl ##CHUNK 1 """ push!(tree, key) Inserts `key` in the `tree` if it is not present. """ function Base.push!(tree::RBTree{K}, key0) where K key = convert(K, key0) insert!(tree, key) end """ delete_fix(tree::RBTree, node::Union{RBTreeNode, Nothing}) This method is called when a black node is deleted because it violates the black depth property of the RBTree. """ function delete_fix(tree::RBTree, node::Union{RBTreeNode, Nothing}) while node != tree.root && !node.color if node == node.parent.leftChild sibling = node.parent.rightChild ##CHUNK 2 end end tree.root.color = false end """ insert!(tree, key) Inserts `key` in the `tree` if it is not present. """ """ push!(tree, key) Inserts `key` in the `tree` if it is not present. """ function Base.push!(tree::RBTree{K}, key0) where K key = convert(K, key0) insert!(tree, key) end ##CHUNK 3 else # uncle is black in color if (node == parent.leftChild) # node is leftChild of its parent node = parent right_rotate!(tree, node) end # node is rightChild of its parent node.parent.color = false node.parent.parent.color = true left_rotate!(tree, node.parent.parent) end end end tree.root.color = false end """ insert!(tree, key) Inserts `key` in the `tree` if it is not present. """ ##CHUNK 4 end end return node end """ haskey(tree, key) Returns true if `key` is present in the `tree`, else returns false. """ function Base.haskey(tree::RBTree{K}, d::K) where K node = search_node(tree, d) return (node.data == d) end """ insert_node!(tree::RBTree, node::RBTreeNode) Inserts `node` at proper location by traversing through the `tree` in a binary-search-tree fashion. """ ##CHUNK 5 function Base.haskey(tree::RBTree{K}, d::K) where K node = search_node(tree, d) return (node.data == d) end """ insert_node!(tree::RBTree, node::RBTreeNode) Inserts `node` at proper location by traversing through the `tree` in a binary-search-tree fashion. """ function insert_node!(tree::RBTree, node::RBTreeNode) node_y = nothing node_x = tree.root while node_x !== tree.nil node_y = node_x if node.data < node_x.data node_x = node_x.leftChild else node_x = node_x.rightChild ##CHUNK 6 """ delete_fix(tree::RBTree, node::Union{RBTreeNode, Nothing}) This method is called when a black node is deleted because it violates the black depth property of the RBTree. """ function delete_fix(tree::RBTree, node::Union{RBTreeNode, Nothing}) while node != tree.root && !node.color if node == node.parent.leftChild sibling = node.parent.rightChild if sibling.color sibling.color = false node.parent.color = true left_rotate!(tree, node.parent) sibling = node.parent.rightChild end if !sibling.rightChild.color && !sibling.leftChild.color sibling.color = true node = node.parent
247
305
DataStructures.jl
43
function delete_fix(tree::RBTree, node::Union{RBTreeNode, Nothing}) while node != tree.root && !node.color if node == node.parent.leftChild sibling = node.parent.rightChild if sibling.color sibling.color = false node.parent.color = true left_rotate!(tree, node.parent) sibling = node.parent.rightChild end if !sibling.rightChild.color && !sibling.leftChild.color sibling.color = true node = node.parent else if !sibling.rightChild.color sibling.leftChild.color = false sibling.color = true right_rotate!(tree, sibling) sibling = node.parent.rightChild end sibling.color = node.parent.color node.parent.color = false sibling.rightChild.color = false left_rotate!(tree, node.parent) node = tree.root end else sibling = node.parent.leftChild if sibling.color sibling.color = false node.parent.color = true right_rotate!(tree, node.parent) sibling = node.parent.leftChild end if !sibling.rightChild.color && !sibling.leftChild.color sibling.color = true node = node.parent else if !sibling.leftChild.color sibling.rightChild.color = false sibling.color = true left_rotate!(tree, sibling) sibling = node.parent.leftChild end sibling.color = node.parent.color node.parent.color = false sibling.leftChild.color = false right_rotate!(tree, node.parent) node = tree.root end end end node.color = false return nothing end
function delete_fix(tree::RBTree, node::Union{RBTreeNode, Nothing}) while node != tree.root && !node.color if node == node.parent.leftChild sibling = node.parent.rightChild if sibling.color sibling.color = false node.parent.color = true left_rotate!(tree, node.parent) sibling = node.parent.rightChild end if !sibling.rightChild.color && !sibling.leftChild.color sibling.color = true node = node.parent else if !sibling.rightChild.color sibling.leftChild.color = false sibling.color = true right_rotate!(tree, sibling) sibling = node.parent.rightChild end sibling.color = node.parent.color node.parent.color = false sibling.rightChild.color = false left_rotate!(tree, node.parent) node = tree.root end else sibling = node.parent.leftChild if sibling.color sibling.color = false node.parent.color = true right_rotate!(tree, node.parent) sibling = node.parent.leftChild end if !sibling.rightChild.color && !sibling.leftChild.color sibling.color = true node = node.parent else if !sibling.leftChild.color sibling.rightChild.color = false sibling.color = true left_rotate!(tree, sibling) sibling = node.parent.leftChild end sibling.color = node.parent.color node.parent.color = false sibling.leftChild.color = false right_rotate!(tree, node.parent) node = tree.root end end end node.color = false return nothing end
[ 247, 305 ]
function delete_fix(tree::RBTree, node::Union{RBTreeNode, Nothing}) while node != tree.root && !node.color if node == node.parent.leftChild sibling = node.parent.rightChild if sibling.color sibling.color = false node.parent.color = true left_rotate!(tree, node.parent) sibling = node.parent.rightChild end if !sibling.rightChild.color && !sibling.leftChild.color sibling.color = true node = node.parent else if !sibling.rightChild.color sibling.leftChild.color = false sibling.color = true right_rotate!(tree, sibling) sibling = node.parent.rightChild end sibling.color = node.parent.color node.parent.color = false sibling.rightChild.color = false left_rotate!(tree, node.parent) node = tree.root end else sibling = node.parent.leftChild if sibling.color sibling.color = false node.parent.color = true right_rotate!(tree, node.parent) sibling = node.parent.leftChild end if !sibling.rightChild.color && !sibling.leftChild.color sibling.color = true node = node.parent else if !sibling.leftChild.color sibling.rightChild.color = false sibling.color = true left_rotate!(tree, sibling) sibling = node.parent.leftChild end sibling.color = node.parent.color node.parent.color = false sibling.leftChild.color = false right_rotate!(tree, node.parent) node = tree.root end end end node.color = false return nothing end
function delete_fix(tree::RBTree, node::Union{RBTreeNode, Nothing}) while node != tree.root && !node.color if node == node.parent.leftChild sibling = node.parent.rightChild if sibling.color sibling.color = false node.parent.color = true left_rotate!(tree, node.parent) sibling = node.parent.rightChild end if !sibling.rightChild.color && !sibling.leftChild.color sibling.color = true node = node.parent else if !sibling.rightChild.color sibling.leftChild.color = false sibling.color = true right_rotate!(tree, sibling) sibling = node.parent.rightChild end sibling.color = node.parent.color node.parent.color = false sibling.rightChild.color = false left_rotate!(tree, node.parent) node = tree.root end else sibling = node.parent.leftChild if sibling.color sibling.color = false node.parent.color = true right_rotate!(tree, node.parent) sibling = node.parent.leftChild end if !sibling.rightChild.color && !sibling.leftChild.color sibling.color = true node = node.parent else if !sibling.leftChild.color sibling.rightChild.color = false sibling.color = true left_rotate!(tree, sibling) sibling = node.parent.leftChild end sibling.color = node.parent.color node.parent.color = false sibling.leftChild.color = false right_rotate!(tree, node.parent) node = tree.root end end end node.color = false return nothing end
delete_fix
247
305
src/red_black_tree.jl
#FILE: DataStructures.jl/src/splay_tree.jl ##CHUNK 1 # double rotation elseif node_x == parent.leftChild && parent == grand_parent.leftChild # zig-zig rotation right_rotate!(tree, grand_parent) right_rotate!(tree, parent) elseif node_x == parent.rightChild && parent == grand_parent.rightChild # zag-zag rotation left_rotate!(tree, grand_parent) left_rotate!(tree, parent) elseif node_x == parent.rightChild && parent == grand_parent.leftChild # zig-zag rotation left_rotate!(tree, node_x.parent) right_rotate!(tree, node_x.parent) else # zag-zig rotation right_rotate!(tree, node_x.parent) left_rotate!(tree, node_x.parent) end end end #FILE: DataStructures.jl/src/balanced_tree.jl ##CHUNK 1 function findkeyless(t::BalancedTree23, k) curnode = t.rootloc for depthcount = 1 : t.depth - 1 @inbounds thisnode = t.tree[curnode] cmp = thisnode.child3 == 0 ? cmp2le_nonleaf(t.ord, thisnode, k) : cmp3le_nonleaf(t.ord, thisnode, k) curnode = cmp == 1 ? thisnode.child1 : cmp == 2 ? thisnode.child2 : thisnode.child3 end @inbounds thisnode = t.tree[curnode] cmp = thisnode.child3 == 0 ? cmp2le_leaf(t.ord, thisnode, k) : cmp3le_leaf(t.ord, thisnode, k) curnode = cmp == 1 ? thisnode.child1 : cmp == 2 ? thisnode.child2 : thisnode.child3 return curnode end ##CHUNK 2 if curdepth == t.depth replaceparent!(t.data, lc1, p) replaceparent!(t.data, lc2, p) else replaceparent!(t.tree, lc1, p) replaceparent!(t.tree, lc2, p) end push!(t.freetreeinds, leftsib) newchildcount = 1 t.deletionchild[1] = p else lc3 = t.tree[leftsib].child3 t.tree[p] = TreeNode{K}(lc3, t.deletionchild[1], 0, pparent, lk, defaultKey) sk2 = t.tree[leftsib].splitkey2 t.tree[leftsib] = TreeNode{K}(t.tree[leftsib].child1, t.tree[leftsib].child2, 0, pparent, t.tree[leftsib].splitkey1, defaultKey) #FILE: DataStructures.jl/src/mutable_list.jl ##CHUNK 1 end function Base.append!(l1::MutableLinkedList{T}, l2::MutableLinkedList{T}) where T l1.node.prev.next = l2.node.next # l1's last's next is now l2's first l2.node.prev.next = l1.node # l2's last's next is now l1.node l2.node.next.prev = l1.node.prev # l2's first's prev is now l1's last l1.node.prev = l2.node.prev # l1's first's prev is now l2's last l1.len += length(l2) # make l2 empty l2.node.prev = l2.node l2.node.next = l2.node l2.len = 0 return l1 end function Base.append!(l::MutableLinkedList, elts...) for elt in elts for v in elt push!(l, v) end #CURRENT FILE: DataStructures.jl/src/red_black_tree.jl ##CHUNK 1 else # uncle is black in color if (node == parent.leftChild) # node is leftChild of its parent node = parent right_rotate!(tree, node) end # node is rightChild of its parent node.parent.color = false node.parent.parent.color = true left_rotate!(tree, node.parent.parent) end end end tree.root.color = false end """ insert!(tree, key) Inserts `key` in the `tree` if it is not present. """ ##CHUNK 2 right_rotate!(tree, node.parent.parent) end else # parent is the rightChild of grand_parent uncle = grand_parent.leftChild if (uncle.color) # uncle is red in color grand_parent.color = true parent.color = false uncle.color = false node = grand_parent else # uncle is black in color if (node == parent.leftChild) # node is leftChild of its parent node = parent right_rotate!(tree, node) end # node is rightChild of its parent node.parent.color = false node.parent.parent.color = true left_rotate!(tree, node.parent.parent) end ##CHUNK 3 """ function right_rotate!(tree::RBTree, node_x::RBTreeNode) node_y = node_x.leftChild node_x.leftChild = node_y.rightChild if node_y.rightChild !== tree.nil node_y.rightChild.parent = node_x end node_y.parent = node_x.parent if (node_x.parent == nothing) tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else node_x.parent.rightChild = node_y end node_y.rightChild = node_x node_x.parent = node_y end """ ##CHUNK 4 end end """ left_rotate!(tree::RBTree, node_x::RBTreeNode) Performs a left-rotation on `node_x` and updates `tree.root`, if required. """ function left_rotate!(tree::RBTree, node_x::RBTreeNode) node_y = node_x.rightChild node_x.rightChild = node_y.leftChild if node_y.leftChild !== tree.nil node_y.leftChild.parent = node_x end node_y.parent = node_x.parent if (node_x.parent == nothing) tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else ##CHUNK 5 node_x.rightChild = node_y.leftChild if node_y.leftChild !== tree.nil node_y.leftChild.parent = node_x end node_y.parent = node_x.parent if (node_x.parent == nothing) tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else node_x.parent.rightChild = node_y end node_y.leftChild = node_x node_x.parent = node_y end """ right_rotate!(tree::RBTree, node_x::RBTreeNode) Performs a right-rotation on `node_x` and updates `tree.root`, if required. ##CHUNK 6 while node != tree.root && node.parent.color parent = node.parent grand_parent = parent.parent if (parent == grand_parent.leftChild) # parent is the leftChild of grand_parent uncle = grand_parent.rightChild if (uncle.color) # uncle is red in color grand_parent.color = true parent.color = false uncle.color = false node = grand_parent else # uncle is black in color if (node == parent.rightChild) # node is rightChild of its parent node = parent left_rotate!(tree, node) end # node is leftChild of its parent node.parent.color = false node.parent.parent.color = true
341
390
DataStructures.jl
44
function Base.delete!(tree::RBTree{K}, d::K) where K z = tree.nil node = tree.root while node !== tree.nil if node.data == d z = node end if d < node.data node = node.leftChild else node = node.rightChild end end (z === tree.nil) && return tree y = z y_original_color = y.color x = RBTreeNode{K}() if z.leftChild === tree.nil x = z.rightChild rb_transplant(tree, z, z.rightChild) elseif z.rightChild === tree.nil x = z.leftChild rb_transplant(tree, z, z.leftChild) else y = minimum_node(tree, z.rightChild) y_original_color = y.color x = y.rightChild if y.parent == z x.parent = y else rb_transplant(tree, y, y.rightChild) y.rightChild = z.rightChild y.rightChild.parent = y end rb_transplant(tree, z, y) y.leftChild = z.leftChild y.leftChild.parent = y y.color = z.color end !y_original_color && delete_fix(tree, x) tree.count -= 1 return tree end
function Base.delete!(tree::RBTree{K}, d::K) where K z = tree.nil node = tree.root while node !== tree.nil if node.data == d z = node end if d < node.data node = node.leftChild else node = node.rightChild end end (z === tree.nil) && return tree y = z y_original_color = y.color x = RBTreeNode{K}() if z.leftChild === tree.nil x = z.rightChild rb_transplant(tree, z, z.rightChild) elseif z.rightChild === tree.nil x = z.leftChild rb_transplant(tree, z, z.leftChild) else y = minimum_node(tree, z.rightChild) y_original_color = y.color x = y.rightChild if y.parent == z x.parent = y else rb_transplant(tree, y, y.rightChild) y.rightChild = z.rightChild y.rightChild.parent = y end rb_transplant(tree, z, y) y.leftChild = z.leftChild y.leftChild.parent = y y.color = z.color end !y_original_color && delete_fix(tree, x) tree.count -= 1 return tree end
[ 341, 390 ]
function Base.delete!(tree::RBTree{K}, d::K) where K z = tree.nil node = tree.root while node !== tree.nil if node.data == d z = node end if d < node.data node = node.leftChild else node = node.rightChild end end (z === tree.nil) && return tree y = z y_original_color = y.color x = RBTreeNode{K}() if z.leftChild === tree.nil x = z.rightChild rb_transplant(tree, z, z.rightChild) elseif z.rightChild === tree.nil x = z.leftChild rb_transplant(tree, z, z.leftChild) else y = minimum_node(tree, z.rightChild) y_original_color = y.color x = y.rightChild if y.parent == z x.parent = y else rb_transplant(tree, y, y.rightChild) y.rightChild = z.rightChild y.rightChild.parent = y end rb_transplant(tree, z, y) y.leftChild = z.leftChild y.leftChild.parent = y y.color = z.color end !y_original_color && delete_fix(tree, x) tree.count -= 1 return tree end
function Base.delete!(tree::RBTree{K}, d::K) where K z = tree.nil node = tree.root while node !== tree.nil if node.data == d z = node end if d < node.data node = node.leftChild else node = node.rightChild end end (z === tree.nil) && return tree y = z y_original_color = y.color x = RBTreeNode{K}() if z.leftChild === tree.nil x = z.rightChild rb_transplant(tree, z, z.rightChild) elseif z.rightChild === tree.nil x = z.leftChild rb_transplant(tree, z, z.leftChild) else y = minimum_node(tree, z.rightChild) y_original_color = y.color x = y.rightChild if y.parent == z x.parent = y else rb_transplant(tree, y, y.rightChild) y.rightChild = z.rightChild y.rightChild.parent = y end rb_transplant(tree, z, y) y.leftChild = z.leftChild y.leftChild.parent = y y.color = z.color end !y_original_color && delete_fix(tree, x) tree.count -= 1 return tree end
Base.delete!
341
390
src/red_black_tree.jl
#FILE: DataStructures.jl/src/splay_tree.jl ##CHUNK 1 # double rotation elseif node_x == parent.leftChild && parent == grand_parent.leftChild # zig-zig rotation right_rotate!(tree, grand_parent) right_rotate!(tree, parent) elseif node_x == parent.rightChild && parent == grand_parent.rightChild # zag-zag rotation left_rotate!(tree, grand_parent) left_rotate!(tree, parent) elseif node_x == parent.rightChild && parent == grand_parent.leftChild # zig-zag rotation left_rotate!(tree, node_x.parent) right_rotate!(tree, node_x.parent) else # zag-zig rotation right_rotate!(tree, node_x.parent) left_rotate!(tree, node_x.parent) end end end ##CHUNK 2 SplayTree{K}() where K = new{K}(nothing, 0) end Base.length(tree::SplayTree) = tree.count SplayTree() = SplayTree{Any}() function left_rotate!(tree::SplayTree, node_x::SplayTreeNode) node_y = node_x.rightChild node_x.rightChild = node_y.leftChild if node_y.leftChild != nothing node_y.leftChild.parent = node_x end node_y.parent = node_x.parent if node_x.parent == nothing tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else #FILE: DataStructures.jl/src/avl_tree.jl ##CHUNK 1 function delete_node!(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = delete_node!(node.leftChild, key) elseif key > node.data node.rightChild = delete_node!(node.rightChild, key) else if node.leftChild == nothing result = node.rightChild return result elseif node.rightChild == nothing result = node.leftChild return result else result = minimum_node(node.rightChild) node.data = result.data node.rightChild = delete_node!(node.rightChild, result.data) end end #FILE: DataStructures.jl/src/balanced_tree.jl ##CHUNK 1 if curdepth == t.depth replaceparent!(t.data, lc1, p) replaceparent!(t.data, lc2, p) else replaceparent!(t.tree, lc1, p) replaceparent!(t.tree, lc2, p) end push!(t.freetreeinds, leftsib) newchildcount = 1 t.deletionchild[1] = p else lc3 = t.tree[leftsib].child3 t.tree[p] = TreeNode{K}(lc3, t.deletionchild[1], 0, pparent, lk, defaultKey) sk2 = t.tree[leftsib].splitkey2 t.tree[leftsib] = TreeNode{K}(t.tree[leftsib].child1, t.tree[leftsib].child2, 0, pparent, t.tree[leftsib].splitkey1, defaultKey) #CURRENT FILE: DataStructures.jl/src/red_black_tree.jl ##CHUNK 1 """ function right_rotate!(tree::RBTree, node_x::RBTreeNode) node_y = node_x.leftChild node_x.leftChild = node_y.rightChild if node_y.rightChild !== tree.nil node_y.rightChild.parent = node_x end node_y.parent = node_x.parent if (node_x.parent == nothing) tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else node_x.parent.rightChild = node_y end node_y.rightChild = node_x node_x.parent = node_y end """ ##CHUNK 2 end end """ left_rotate!(tree::RBTree, node_x::RBTreeNode) Performs a left-rotation on `node_x` and updates `tree.root`, if required. """ function left_rotate!(tree::RBTree, node_x::RBTreeNode) node_y = node_x.rightChild node_x.rightChild = node_y.leftChild if node_y.leftChild !== tree.nil node_y.leftChild.parent = node_x end node_y.parent = node_x.parent if (node_x.parent == nothing) tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else ##CHUNK 3 """ delete_fix(tree::RBTree, node::Union{RBTreeNode, Nothing}) This method is called when a black node is deleted because it violates the black depth property of the RBTree. """ function delete_fix(tree::RBTree, node::Union{RBTreeNode, Nothing}) while node != tree.root && !node.color if node == node.parent.leftChild sibling = node.parent.rightChild if sibling.color sibling.color = false node.parent.color = true left_rotate!(tree, node.parent) sibling = node.parent.rightChild end if !sibling.rightChild.color && !sibling.leftChild.color sibling.color = true node = node.parent ##CHUNK 4 node_x.rightChild = node_y.leftChild if node_y.leftChild !== tree.nil node_y.leftChild.parent = node_x end node_y.parent = node_x.parent if (node_x.parent == nothing) tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else node_x.parent.rightChild = node_y end node_y.leftChild = node_x node_x.parent = node_y end """ right_rotate!(tree::RBTree, node_x::RBTreeNode) Performs a right-rotation on `node_x` and updates `tree.root`, if required. ##CHUNK 5 else # uncle is black in color if (node == parent.leftChild) # node is leftChild of its parent node = parent right_rotate!(tree, node) end # node is rightChild of its parent node.parent.color = false node.parent.parent.color = true left_rotate!(tree, node.parent.parent) end end end tree.root.color = false end """ insert!(tree, key) Inserts `key` in the `tree` if it is not present. """ ##CHUNK 6 else if !sibling.rightChild.color sibling.leftChild.color = false sibling.color = true right_rotate!(tree, sibling) sibling = node.parent.rightChild end sibling.color = node.parent.color node.parent.color = false sibling.rightChild.color = false left_rotate!(tree, node.parent) node = tree.root end else sibling = node.parent.leftChild if sibling.color sibling.color = false node.parent.color = true right_rotate!(tree, node.parent)
399
412
DataStructures.jl
45
function Base.getindex(tree::RBTree{K}, ind) where K @boundscheck (1 <= ind <= tree.count) || throw(ArgumentError("$ind should be in between 1 and $(tree.count)")) function traverse_tree_inorder(node::RBTreeNode{K}) where K if (node !== tree.nil) left = traverse_tree_inorder(node.leftChild) right = traverse_tree_inorder(node.rightChild) append!(push!(left, node.data), right) else return K[] end end arr = traverse_tree_inorder(tree.root) return @inbounds arr[ind] end
function Base.getindex(tree::RBTree{K}, ind) where K @boundscheck (1 <= ind <= tree.count) || throw(ArgumentError("$ind should be in between 1 and $(tree.count)")) function traverse_tree_inorder(node::RBTreeNode{K}) where K if (node !== tree.nil) left = traverse_tree_inorder(node.leftChild) right = traverse_tree_inorder(node.rightChild) append!(push!(left, node.data), right) else return K[] end end arr = traverse_tree_inorder(tree.root) return @inbounds arr[ind] end
[ 399, 412 ]
function Base.getindex(tree::RBTree{K}, ind) where K @boundscheck (1 <= ind <= tree.count) || throw(ArgumentError("$ind should be in between 1 and $(tree.count)")) function traverse_tree_inorder(node::RBTreeNode{K}) where K if (node !== tree.nil) left = traverse_tree_inorder(node.leftChild) right = traverse_tree_inorder(node.rightChild) append!(push!(left, node.data), right) else return K[] end end arr = traverse_tree_inorder(tree.root) return @inbounds arr[ind] end
function Base.getindex(tree::RBTree{K}, ind) where K @boundscheck (1 <= ind <= tree.count) || throw(ArgumentError("$ind should be in between 1 and $(tree.count)")) function traverse_tree_inorder(node::RBTreeNode{K}) where K if (node !== tree.nil) left = traverse_tree_inorder(node.leftChild) right = traverse_tree_inorder(node.rightChild) append!(push!(left, node.data), right) else return K[] end end arr = traverse_tree_inorder(tree.root) return @inbounds arr[ind] end
traverse_tree_inorder
399
412
src/red_black_tree.jl
#FILE: DataStructures.jl/src/splay_tree.jl ##CHUNK 1 end splay!(tree, node) tree.count += 1 return tree end function Base.getindex(tree::SplayTree{K}, ind) where K @boundscheck (1 <= ind <= tree.count) || throw(KeyError("$ind should be in between 1 and $(tree.count)")) function traverse_tree_inorder(node::Union{SplayTreeNode, Nothing}) if (node != nothing) left = traverse_tree_inorder(node.leftChild) right = traverse_tree_inorder(node.rightChild) append!(push!(left, node.data), right) else return K[] end end arr = traverse_tree_inorder(tree.root) return @inbounds arr[ind] end ##CHUNK 2 end end node.parent = y if y === nothing tree.root = node elseif node.data < y.data y.leftChild = node else y.rightChild = node end splay!(tree, node) tree.count += 1 return tree end function Base.getindex(tree::SplayTree{K}, ind) where K @boundscheck (1 <= ind <= tree.count) || throw(KeyError("$ind should be in between 1 and $(tree.count)")) function traverse_tree_inorder(node::Union{SplayTreeNode, Nothing}) if (node != nothing) #FILE: DataStructures.jl/src/avl_tree.jl ##CHUNK 1 julia> tree[4] 7 julia> tree[8] 15 ``` """ function Base.getindex(tree::AVLTree{K}, ind::Integer) where K @boundscheck (1 <= ind <= tree.count) || throw(BoundsError("$ind should be in between 1 and $(tree.count)")) function traverse_tree(node::AVLTreeNode_or_null, idx) if (node != nothing) L = get_subsize(node.leftChild) if idx <= L return traverse_tree(node.leftChild, idx) elseif idx == L + 1 return node.data else return traverse_tree(node.rightChild, idx - L - 1) end ##CHUNK 2 function traverse_tree(node::AVLTreeNode_or_null, idx) if (node != nothing) L = get_subsize(node.leftChild) if idx <= L return traverse_tree(node.leftChild, idx) elseif idx == L + 1 return node.data else return traverse_tree(node.rightChild, idx - L - 1) end end end value = traverse_tree(tree.root, ind) return value end ##CHUNK 3 """ in(key, tree::AVLTree) `In` infix operator for `key` and `tree` types. Analogous to [`haskey(tree::AVLTree{K}, k::K) where K`](@ref). """ Base.in(key, tree::AVLTree) = haskey(tree, key) function insert_node(node::Nothing, key::K) where K return AVLTreeNode{K}(key) end function insert_node(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = insert_node(node.leftChild, key) else node.rightChild = insert_node(node.rightChild, key) end node.subsize = compute_subtree_size(node) node.height = compute_height(node) #FILE: DataStructures.jl/src/fenwick.jl ##CHUNK 1 n = length(a) tree = FenwickTree{U}(n) @inbounds for i = 1:n inc!(tree, i, a[i]) end tree end Base.length(ft::FenwickTree) = ft.n Base.eltype(::Type{FenwickTree{T}}) where T = T """ inc!(ft::FenwickTree{T}, ind::Integer, val) Increases the value of the [`FenwickTree`] by `val` from the index `ind` upto the length of the Fenwick Tree. """ function inc!(ft::FenwickTree{T}, ind::Integer, val = 1) where T val0 = convert(T, val) i = ind #FILE: DataStructures.jl/src/balanced_tree.jl ##CHUNK 1 @inbounds p = t.data[i].parent prevchild = 0 depthp = t.depth @inbounds while true if depthp < t.depth p = t.tree[ii].parent end if t.tree[p].child3 == ii prevchild = t.tree[p].child2 break end if t.tree[p].child2 == ii prevchild = t.tree[p].child1 break end ii = p depthp -= 1 end @inbounds while true if depthp == t.depth #CURRENT FILE: DataStructures.jl/src/red_black_tree.jl ##CHUNK 1 end RBTree() = RBTree{Any}() Base.length(tree::RBTree) = tree.count """ search_node(tree, key) Returns the last visited node, while traversing through in binary-search-tree fashion looking for `key`. """ search_node(tree, key) function search_node(tree::RBTree{K}, d::K) where K node = tree.root while node !== tree.nil && d != node.data if d < node.data node = node.leftChild else node = node.rightChild ##CHUNK 2 nil::RBTreeNode{K} count::Int function RBTree{K}() where K rb = new() rb.nil = create_null_node(K) rb.root = rb.nil rb.count = 0 return rb end end RBTree() = RBTree{Any}() Base.length(tree::RBTree) = tree.count """ search_node(tree, key) Returns the last visited node, while traversing through in binary-search-tree fashion looking for `key`. ##CHUNK 3 function Base.haskey(tree::RBTree{K}, d::K) where K node = search_node(tree, d) return (node.data == d) end """ insert_node!(tree::RBTree, node::RBTreeNode) Inserts `node` at proper location by traversing through the `tree` in a binary-search-tree fashion. """ function insert_node!(tree::RBTree, node::RBTreeNode) node_y = nothing node_x = tree.root while node_x !== tree.nil node_y = node_x if node.data < node_x.data node_x = node_x.leftChild else node_x = node_x.rightChild
97
148
DataStructures.jl
46
function rh_insert!(h::RobinDict{K, V}, key::K, val::V) where {K, V} sz = length(h.keys) (h.count > ROBIN_DICT_LOAD_FACTOR * sz) && rehash!(h, sz<<2) # table full @assert h.count != length(h.keys) ckey, cval, chash = key, val, hash_key(key) sz = length(h.keys) index_init = desired_index(chash, sz) index_curr = index_init probe_distance = 0 probe_current = 0 @inbounds while true if (isslotempty(h, index_curr)) || (isslotfilled(h, index_curr) && isequal(h.keys[index_curr], ckey)) break end probe_distance = calculate_distance(h, index_curr) if probe_current > probe_distance h.vals[index_curr], cval = cval, h.vals[index_curr] h.keys[index_curr], ckey = ckey, h.keys[index_curr] h.hashes[index_curr], chash = chash, h.hashes[index_curr] probe_current = probe_distance end probe_current += 1 index_curr = (index_curr & (sz - 1)) + 1 end @inbounds if isslotfilled(h, index_curr) && isequal(h.keys[index_curr], ckey) h.vals[index_curr] = cval return index_curr end @inbounds if isslotempty(h, index_curr) h.count += 1 end @inbounds h.vals[index_curr] = cval @inbounds h.keys[index_curr] = ckey @inbounds h.hashes[index_curr] = chash @assert probe_current >= 0 if h.idxfloor == 0 h.idxfloor = index_curr else h.idxfloor = min(h.idxfloor, index_curr) end return index_curr end
function rh_insert!(h::RobinDict{K, V}, key::K, val::V) where {K, V} sz = length(h.keys) (h.count > ROBIN_DICT_LOAD_FACTOR * sz) && rehash!(h, sz<<2) # table full @assert h.count != length(h.keys) ckey, cval, chash = key, val, hash_key(key) sz = length(h.keys) index_init = desired_index(chash, sz) index_curr = index_init probe_distance = 0 probe_current = 0 @inbounds while true if (isslotempty(h, index_curr)) || (isslotfilled(h, index_curr) && isequal(h.keys[index_curr], ckey)) break end probe_distance = calculate_distance(h, index_curr) if probe_current > probe_distance h.vals[index_curr], cval = cval, h.vals[index_curr] h.keys[index_curr], ckey = ckey, h.keys[index_curr] h.hashes[index_curr], chash = chash, h.hashes[index_curr] probe_current = probe_distance end probe_current += 1 index_curr = (index_curr & (sz - 1)) + 1 end @inbounds if isslotfilled(h, index_curr) && isequal(h.keys[index_curr], ckey) h.vals[index_curr] = cval return index_curr end @inbounds if isslotempty(h, index_curr) h.count += 1 end @inbounds h.vals[index_curr] = cval @inbounds h.keys[index_curr] = ckey @inbounds h.hashes[index_curr] = chash @assert probe_current >= 0 if h.idxfloor == 0 h.idxfloor = index_curr else h.idxfloor = min(h.idxfloor, index_curr) end return index_curr end
[ 97, 148 ]
function rh_insert!(h::RobinDict{K, V}, key::K, val::V) where {K, V} sz = length(h.keys) (h.count > ROBIN_DICT_LOAD_FACTOR * sz) && rehash!(h, sz<<2) # table full @assert h.count != length(h.keys) ckey, cval, chash = key, val, hash_key(key) sz = length(h.keys) index_init = desired_index(chash, sz) index_curr = index_init probe_distance = 0 probe_current = 0 @inbounds while true if (isslotempty(h, index_curr)) || (isslotfilled(h, index_curr) && isequal(h.keys[index_curr], ckey)) break end probe_distance = calculate_distance(h, index_curr) if probe_current > probe_distance h.vals[index_curr], cval = cval, h.vals[index_curr] h.keys[index_curr], ckey = ckey, h.keys[index_curr] h.hashes[index_curr], chash = chash, h.hashes[index_curr] probe_current = probe_distance end probe_current += 1 index_curr = (index_curr & (sz - 1)) + 1 end @inbounds if isslotfilled(h, index_curr) && isequal(h.keys[index_curr], ckey) h.vals[index_curr] = cval return index_curr end @inbounds if isslotempty(h, index_curr) h.count += 1 end @inbounds h.vals[index_curr] = cval @inbounds h.keys[index_curr] = ckey @inbounds h.hashes[index_curr] = chash @assert probe_current >= 0 if h.idxfloor == 0 h.idxfloor = index_curr else h.idxfloor = min(h.idxfloor, index_curr) end return index_curr end
function rh_insert!(h::RobinDict{K, V}, key::K, val::V) where {K, V} sz = length(h.keys) (h.count > ROBIN_DICT_LOAD_FACTOR * sz) && rehash!(h, sz<<2) # table full @assert h.count != length(h.keys) ckey, cval, chash = key, val, hash_key(key) sz = length(h.keys) index_init = desired_index(chash, sz) index_curr = index_init probe_distance = 0 probe_current = 0 @inbounds while true if (isslotempty(h, index_curr)) || (isslotfilled(h, index_curr) && isequal(h.keys[index_curr], ckey)) break end probe_distance = calculate_distance(h, index_curr) if probe_current > probe_distance h.vals[index_curr], cval = cval, h.vals[index_curr] h.keys[index_curr], ckey = ckey, h.keys[index_curr] h.hashes[index_curr], chash = chash, h.hashes[index_curr] probe_current = probe_distance end probe_current += 1 index_curr = (index_curr & (sz - 1)) + 1 end @inbounds if isslotfilled(h, index_curr) && isequal(h.keys[index_curr], ckey) h.vals[index_curr] = cval return index_curr end @inbounds if isslotempty(h, index_curr) h.count += 1 end @inbounds h.vals[index_curr] = cval @inbounds h.keys[index_curr] = ckey @inbounds h.hashes[index_curr] = chash @assert probe_current >= 0 if h.idxfloor == 0 h.idxfloor = index_curr else h.idxfloor = min(h.idxfloor, index_curr) end return index_curr end
rh_insert!
97
148
src/robin_dict.jl
#FILE: DataStructures.jl/test/test_robin_dict.jl ##CHUNK 1 # Functions which are not exported, but are required for checking invariants hash_key(key) = (hash(key)%UInt32) | 0x80000000 desired_index(hash, sz) = (hash & (sz - 1)) + 1 isslotfilled(h::RobinDict, index) = (h.hashes[index] != 0) isslotempty(h::RobinDict, index) = (h.hashes[index] == 0) function calculate_distance(h::RobinDict{K, V}, index) where {K, V} @assert isslotfilled(h, index) sz = length(h.keys) @inbounds index_init = desired_index(h.hashes[index], sz) return (index - index_init + sz) & (sz - 1) end function get_idxfloor(h::RobinDict) @inbounds for i = 1:length(h.keys) if isslotfilled(h, i) return i end end return 0 #CURRENT FILE: DataStructures.jl/src/robin_dict.jl ##CHUNK 1 ckey, cval, chash = key, val, hash sz = length(h_new.keys) index_init = desired_index(chash, sz) index_curr = index_init probe_distance = 0 probe_current = 0 @inbounds while true if (isslotempty(h_new, index_curr)) break end probe_distance = calculate_distance(h_new, index_curr) if probe_current > probe_distance h_new.vals[index_curr], cval = cval, h_new.vals[index_curr] h_new.keys[index_curr], ckey = ckey, h_new.keys[index_curr] h_new.hashes[index_curr], chash = chash, h_new.hashes[index_curr] probe_current = probe_distance end ##CHUNK 2 break end probe_distance = calculate_distance(h_new, index_curr) if probe_current > probe_distance h_new.vals[index_curr], cval = cval, h_new.vals[index_curr] h_new.keys[index_curr], ckey = ckey, h_new.keys[index_curr] h_new.hashes[index_curr], chash = chash, h_new.hashes[index_curr] probe_current = probe_distance end probe_current += 1 index_curr = (index_curr & (sz - 1)) + 1 end @inbounds if isslotempty(h_new, index_curr) h_new.count += 1 end @inbounds h_new.vals[index_curr] = cval @inbounds h_new.keys[index_curr] = ckey ##CHUNK 3 sz = length(h.keys) @inbounds index_init = desired_index(h.hashes[index], sz) return (index - index_init + sz) & (sz - 1) end # insert algorithm function rh_insert_for_rehash!(h_new::RobinDict{K, V}, key::K, val::V, hash::UInt32) where {K, V} # table full @assert h_new.count != length(h_new.keys) ckey, cval, chash = key, val, hash sz = length(h_new.keys) index_init = desired_index(chash, sz) index_curr = index_init probe_distance = 0 probe_current = 0 @inbounds while true if (isslotempty(h_new, index_curr)) ##CHUNK 4 probe_current += 1 index_curr = (index_curr & (sz - 1)) + 1 end @inbounds if isslotempty(h_new, index_curr) h_new.count += 1 end @inbounds h_new.vals[index_curr] = cval @inbounds h_new.keys[index_curr] = ckey @inbounds h_new.hashes[index_curr] = chash @assert probe_current >= 0 if h_new.idxfloor == 0 h_new.idxfloor = index_curr else h_new.idxfloor = min(h_new.idxfloor, index_curr) end return index_curr ##CHUNK 5 h.count = 0 h.idxfloor = 0 return h end function rh_search(h::RobinDict{K, V}, key) where {K, V} sz = length(h.keys) chash = hash_key(key) index = desired_index(chash, sz) cdibs = 0 @inbounds while true if isslotempty(h, index) return -1 elseif cdibs > calculate_distance(h, index) return -1 elseif h.hashes[index] == chash && (h.keys[index] === key || isequal(h.keys[index], key)) return index end index = (index & (sz - 1)) + 1 end ##CHUNK 6 rethrow(e) end end end hash_key(key) = (hash(key)%UInt32) | 0x80000000 desired_index(hash, sz) = (hash & (sz - 1)) + 1 function calculate_distance(h::RobinDict{K, V}, index) where {K, V} @assert isslotfilled(h, index) sz = length(h.keys) @inbounds index_init = desired_index(h.hashes[index], sz) return (index - index_init + sz) & (sz - 1) end # insert algorithm function rh_insert_for_rehash!(h_new::RobinDict{K, V}, key::K, val::V, hash::UInt32) where {K, V} # table full @assert h_new.count != length(h_new.keys) ##CHUNK 7 @inbounds h_new.hashes[index_curr] = chash @assert probe_current >= 0 if h_new.idxfloor == 0 h_new.idxfloor = index_curr else h_new.idxfloor = min(h_new.idxfloor, index_curr) end return index_curr end #rehash! algorithm function rehash!(h::RobinDict{K,V}, newsz = length(h.keys)) where {K, V} oldk = h.keys oldv = h.vals oldh = h.hashes sz = length(oldk) newsz = _tablesz(newsz) if h.count == 0 ##CHUNK 8 resize!(h.keys, newsz) resize!(h.vals, newsz) resize!(h.hashes, newsz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end h.keys = Vector{K}(undef, newsz) h.vals = Vector{V}(undef, newsz) h.hashes = zeros(UInt32,newsz) h.count = 0 h.idxfloor = 0 for i = 1:sz @inbounds if oldh[i] != 0 k = oldk[i] v = oldv[i] rh_insert_for_rehash!(h, k, v, oldh[i]) ##CHUNK 9 """ function Base.empty!(h::RobinDict{K,V}) where {K, V} sz = length(h.keys) empty!(h.hashes) empty!(h.keys) empty!(h.vals) resize!(h.keys, sz) resize!(h.vals, sz) resize!(h.hashes, sz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end function rh_search(h::RobinDict{K, V}, key) where {K, V} sz = length(h.keys) chash = hash_key(key) index = desired_index(chash, sz) cdibs = 0
150
193
DataStructures.jl
47
function rh_insert_for_rehash!(h_new::RobinDict{K, V}, key::K, val::V, hash::UInt32) where {K, V} # table full @assert h_new.count != length(h_new.keys) ckey, cval, chash = key, val, hash sz = length(h_new.keys) index_init = desired_index(chash, sz) index_curr = index_init probe_distance = 0 probe_current = 0 @inbounds while true if (isslotempty(h_new, index_curr)) break end probe_distance = calculate_distance(h_new, index_curr) if probe_current > probe_distance h_new.vals[index_curr], cval = cval, h_new.vals[index_curr] h_new.keys[index_curr], ckey = ckey, h_new.keys[index_curr] h_new.hashes[index_curr], chash = chash, h_new.hashes[index_curr] probe_current = probe_distance end probe_current += 1 index_curr = (index_curr & (sz - 1)) + 1 end @inbounds if isslotempty(h_new, index_curr) h_new.count += 1 end @inbounds h_new.vals[index_curr] = cval @inbounds h_new.keys[index_curr] = ckey @inbounds h_new.hashes[index_curr] = chash @assert probe_current >= 0 if h_new.idxfloor == 0 h_new.idxfloor = index_curr else h_new.idxfloor = min(h_new.idxfloor, index_curr) end return index_curr end
function rh_insert_for_rehash!(h_new::RobinDict{K, V}, key::K, val::V, hash::UInt32) where {K, V} # table full @assert h_new.count != length(h_new.keys) ckey, cval, chash = key, val, hash sz = length(h_new.keys) index_init = desired_index(chash, sz) index_curr = index_init probe_distance = 0 probe_current = 0 @inbounds while true if (isslotempty(h_new, index_curr)) break end probe_distance = calculate_distance(h_new, index_curr) if probe_current > probe_distance h_new.vals[index_curr], cval = cval, h_new.vals[index_curr] h_new.keys[index_curr], ckey = ckey, h_new.keys[index_curr] h_new.hashes[index_curr], chash = chash, h_new.hashes[index_curr] probe_current = probe_distance end probe_current += 1 index_curr = (index_curr & (sz - 1)) + 1 end @inbounds if isslotempty(h_new, index_curr) h_new.count += 1 end @inbounds h_new.vals[index_curr] = cval @inbounds h_new.keys[index_curr] = ckey @inbounds h_new.hashes[index_curr] = chash @assert probe_current >= 0 if h_new.idxfloor == 0 h_new.idxfloor = index_curr else h_new.idxfloor = min(h_new.idxfloor, index_curr) end return index_curr end
[ 150, 193 ]
function rh_insert_for_rehash!(h_new::RobinDict{K, V}, key::K, val::V, hash::UInt32) where {K, V} # table full @assert h_new.count != length(h_new.keys) ckey, cval, chash = key, val, hash sz = length(h_new.keys) index_init = desired_index(chash, sz) index_curr = index_init probe_distance = 0 probe_current = 0 @inbounds while true if (isslotempty(h_new, index_curr)) break end probe_distance = calculate_distance(h_new, index_curr) if probe_current > probe_distance h_new.vals[index_curr], cval = cval, h_new.vals[index_curr] h_new.keys[index_curr], ckey = ckey, h_new.keys[index_curr] h_new.hashes[index_curr], chash = chash, h_new.hashes[index_curr] probe_current = probe_distance end probe_current += 1 index_curr = (index_curr & (sz - 1)) + 1 end @inbounds if isslotempty(h_new, index_curr) h_new.count += 1 end @inbounds h_new.vals[index_curr] = cval @inbounds h_new.keys[index_curr] = ckey @inbounds h_new.hashes[index_curr] = chash @assert probe_current >= 0 if h_new.idxfloor == 0 h_new.idxfloor = index_curr else h_new.idxfloor = min(h_new.idxfloor, index_curr) end return index_curr end
function rh_insert_for_rehash!(h_new::RobinDict{K, V}, key::K, val::V, hash::UInt32) where {K, V} # table full @assert h_new.count != length(h_new.keys) ckey, cval, chash = key, val, hash sz = length(h_new.keys) index_init = desired_index(chash, sz) index_curr = index_init probe_distance = 0 probe_current = 0 @inbounds while true if (isslotempty(h_new, index_curr)) break end probe_distance = calculate_distance(h_new, index_curr) if probe_current > probe_distance h_new.vals[index_curr], cval = cval, h_new.vals[index_curr] h_new.keys[index_curr], ckey = ckey, h_new.keys[index_curr] h_new.hashes[index_curr], chash = chash, h_new.hashes[index_curr] probe_current = probe_distance end probe_current += 1 index_curr = (index_curr & (sz - 1)) + 1 end @inbounds if isslotempty(h_new, index_curr) h_new.count += 1 end @inbounds h_new.vals[index_curr] = cval @inbounds h_new.keys[index_curr] = ckey @inbounds h_new.hashes[index_curr] = chash @assert probe_current >= 0 if h_new.idxfloor == 0 h_new.idxfloor = index_curr else h_new.idxfloor = min(h_new.idxfloor, index_curr) end return index_curr end
rh_insert_for_rehash!
150
193
src/robin_dict.jl
#FILE: DataStructures.jl/src/ordered_robin_dict.jl ##CHUNK 1 _setindex!(h, v0, key0) else @assert haskey(h, key0) @inbounds orig_v = h.vals[index] !isequal(orig_v, v0) && (@inbounds h.vals[index] = v0) end check_for_rehash(h) && rehash!(h) return h end # rehash when there are ALLOWABLE_USELESS_GROWTH % # tombstones, or non-mirrored entries in the dictionary function check_for_rehash(h::OrderedRobinDict) keysl = length(h.keys) dictl = length(h) return (keysl > (1 + ALLOWABLE_USELESS_GROWTH)*dictl) end ##CHUNK 2 end Base.@propagate_inbounds function Base.iterate(h::OrderedRobinDict) isempty(h) && return nothing check_for_rehash(h) && rehash!(h) index = get_first_filled_index(h) return (Pair(h.keys[index], h.vals[index]), index+1) end Base.@propagate_inbounds function Base.iterate(h::OrderedRobinDict, i) length(h.keys) < i && return nothing index = get_next_filled_index(h, i) (index < 0) && return nothing return (Pair(h.keys[index], h.vals[index]), index+1) end Base.filter!(f, d::Union{RobinDict, OrderedRobinDict}) = Base.filter_in_one_pass!(f, d) function Base.merge(d::OrderedRobinDict, others::AbstractDict...) K,V = _merge_kvtypes(d, others...) #CURRENT FILE: DataStructures.jl/src/robin_dict.jl ##CHUNK 1 @inbounds while true if (isslotempty(h, index_curr)) || (isslotfilled(h, index_curr) && isequal(h.keys[index_curr], ckey)) break end probe_distance = calculate_distance(h, index_curr) if probe_current > probe_distance h.vals[index_curr], cval = cval, h.vals[index_curr] h.keys[index_curr], ckey = ckey, h.keys[index_curr] h.hashes[index_curr], chash = chash, h.hashes[index_curr] probe_current = probe_distance end probe_current += 1 index_curr = (index_curr & (sz - 1)) + 1 end @inbounds if isslotfilled(h, index_curr) && isequal(h.keys[index_curr], ckey) h.vals[index_curr] = cval return index_curr end ##CHUNK 2 @inbounds if isslotempty(h, index_curr) h.count += 1 end @inbounds h.vals[index_curr] = cval @inbounds h.keys[index_curr] = ckey @inbounds h.hashes[index_curr] = chash @assert probe_current >= 0 if h.idxfloor == 0 h.idxfloor = index_curr else h.idxfloor = min(h.idxfloor, index_curr) end return index_curr end ##CHUNK 3 # table full @assert h.count != length(h.keys) ckey, cval, chash = key, val, hash_key(key) sz = length(h.keys) index_init = desired_index(chash, sz) index_curr = index_init probe_distance = 0 probe_current = 0 @inbounds while true if (isslotempty(h, index_curr)) || (isslotfilled(h, index_curr) && isequal(h.keys[index_curr], ckey)) break end probe_distance = calculate_distance(h, index_curr) if probe_current > probe_distance h.vals[index_curr], cval = cval, h.vals[index_curr] h.keys[index_curr], ckey = ckey, h.keys[index_curr] h.hashes[index_curr], chash = chash, h.hashes[index_curr] ##CHUNK 4 rethrow(e) end end end hash_key(key) = (hash(key)%UInt32) | 0x80000000 desired_index(hash, sz) = (hash & (sz - 1)) + 1 function calculate_distance(h::RobinDict{K, V}, index) where {K, V} @assert isslotfilled(h, index) sz = length(h.keys) @inbounds index_init = desired_index(h.hashes[index], sz) return (index - index_init + sz) & (sz - 1) end # insert algorithm function rh_insert!(h::RobinDict{K, V}, key::K, val::V) where {K, V} sz = length(h.keys) (h.count > ROBIN_DICT_LOAD_FACTOR * sz) && rehash!(h, sz<<2) ##CHUNK 5 rehash!(d, newsz) end Base.@propagate_inbounds isslotfilled(h::RobinDict, index) = (h.hashes[index] != 0) Base.@propagate_inbounds isslotempty(h::RobinDict, index) = (h.hashes[index] == 0) function Base.setindex!(h::RobinDict{K,V}, v0, key0) where {K, V} key = convert(K, key0) isequal(key, key0) || throw(ArgumentError("$key0 is not a valid key for type $K")) _setindex!(h, key, v0) end function _setindex!(h::RobinDict{K,V}, key::K, v0) where {K, V} v = convert(V, v0) index = rh_insert!(h, key, v) @assert index > 0 return h end ##CHUNK 6 return h end function Base.sizehint!(d::RobinDict, newsz::Integer) newsz = _tablesz(newsz*2) # *2 for keys and values in same array oldsz = length(d.keys) # grow at least 25% if newsz < (oldsz*5)>>2 return d end rehash!(d, newsz) end Base.@propagate_inbounds isslotfilled(h::RobinDict, index) = (h.hashes[index] != 0) Base.@propagate_inbounds isslotempty(h::RobinDict, index) = (h.hashes[index] == 0) function Base.setindex!(h::RobinDict{K,V}, v0, key0) where {K, V} key = convert(K, key0) isequal(key, key0) || throw(ArgumentError("$key0 is not a valid key for type $K")) ##CHUNK 7 sz = length(h.keys) @inbounds index_init = desired_index(h.hashes[index], sz) return (index - index_init + sz) & (sz - 1) end # insert algorithm function rh_insert!(h::RobinDict{K, V}, key::K, val::V) where {K, V} sz = length(h.keys) (h.count > ROBIN_DICT_LOAD_FACTOR * sz) && rehash!(h, sz<<2) # table full @assert h.count != length(h.keys) ckey, cval, chash = key, val, hash_key(key) sz = length(h.keys) index_init = desired_index(chash, sz) index_curr = index_init probe_distance = 0 probe_current = 0 ##CHUNK 8 next = (index & (sz - 1)) + 1 @inbounds while next != index0 h.vals[curr] = h.vals[next] h.keys[curr] = h.keys[next] h.hashes[curr] = h.hashes[next] curr = next next = (next & (sz-1)) + 1 end #curr is at the last position, reset back to normal isbitstype(K) || isbitsunion(K) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.keys, curr-1) isbitstype(V) || isbitsunion(V) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.vals, curr-1) @inbounds h.hashes[curr] = 0x0 h.count -= 1 # this is necessary because key at idxfloor might get deleted h.idxfloor = get_next_filled(h, h.idxfloor) return h end
196
226
DataStructures.jl
48
function rehash!(h::RobinDict{K,V}, newsz = length(h.keys)) where {K, V} oldk = h.keys oldv = h.vals oldh = h.hashes sz = length(oldk) newsz = _tablesz(newsz) if h.count == 0 resize!(h.keys, newsz) resize!(h.vals, newsz) resize!(h.hashes, newsz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end h.keys = Vector{K}(undef, newsz) h.vals = Vector{V}(undef, newsz) h.hashes = zeros(UInt32,newsz) h.count = 0 h.idxfloor = 0 for i = 1:sz @inbounds if oldh[i] != 0 k = oldk[i] v = oldv[i] rh_insert_for_rehash!(h, k, v, oldh[i]) end end return h end
function rehash!(h::RobinDict{K,V}, newsz = length(h.keys)) where {K, V} oldk = h.keys oldv = h.vals oldh = h.hashes sz = length(oldk) newsz = _tablesz(newsz) if h.count == 0 resize!(h.keys, newsz) resize!(h.vals, newsz) resize!(h.hashes, newsz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end h.keys = Vector{K}(undef, newsz) h.vals = Vector{V}(undef, newsz) h.hashes = zeros(UInt32,newsz) h.count = 0 h.idxfloor = 0 for i = 1:sz @inbounds if oldh[i] != 0 k = oldk[i] v = oldv[i] rh_insert_for_rehash!(h, k, v, oldh[i]) end end return h end
[ 196, 226 ]
function rehash!(h::RobinDict{K,V}, newsz = length(h.keys)) where {K, V} oldk = h.keys oldv = h.vals oldh = h.hashes sz = length(oldk) newsz = _tablesz(newsz) if h.count == 0 resize!(h.keys, newsz) resize!(h.vals, newsz) resize!(h.hashes, newsz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end h.keys = Vector{K}(undef, newsz) h.vals = Vector{V}(undef, newsz) h.hashes = zeros(UInt32,newsz) h.count = 0 h.idxfloor = 0 for i = 1:sz @inbounds if oldh[i] != 0 k = oldk[i] v = oldv[i] rh_insert_for_rehash!(h, k, v, oldh[i]) end end return h end
function rehash!(h::RobinDict{K,V}, newsz = length(h.keys)) where {K, V} oldk = h.keys oldv = h.vals oldh = h.hashes sz = length(oldk) newsz = _tablesz(newsz) if h.count == 0 resize!(h.keys, newsz) resize!(h.vals, newsz) resize!(h.hashes, newsz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end h.keys = Vector{K}(undef, newsz) h.vals = Vector{V}(undef, newsz) h.hashes = zeros(UInt32,newsz) h.count = 0 h.idxfloor = 0 for i = 1:sz @inbounds if oldh[i] != 0 k = oldk[i] v = oldv[i] rh_insert_for_rehash!(h, k, v, oldh[i]) end end return h end
rehash!
196
226
src/robin_dict.jl
#FILE: DataStructures.jl/src/swiss_dict.jl ##CHUNK 1 if newsz < (oldsz*5)>>2 return d end rehash!(d, newsz) end function rehash!(h::SwissDict{K,V}, newsz = length(h.keys)) where {K, V} olds = h.slots oldk = h.keys oldv = h.vals sz = length(oldk) newsz = _tablesz(newsz) (newsz*SWISS_DICT_LOAD_FACTOR) > h.count || (newsz <<= 1) h.age += 1 h.idxfloor = 1 if h.count == 0 resize!(h.slots, newsz>>4) fill!(h.slots, _expand16(0x00)) resize!(h.keys, newsz) resize!(h.vals, newsz) ##CHUNK 2 sz = length(oldk) newsz = _tablesz(newsz) (newsz*SWISS_DICT_LOAD_FACTOR) > h.count || (newsz <<= 1) h.age += 1 h.idxfloor = 1 if h.count == 0 resize!(h.slots, newsz>>4) fill!(h.slots, _expand16(0x00)) resize!(h.keys, newsz) resize!(h.vals, newsz) h.nbfull = 0 return h end nssz = newsz>>4 slots = fill(_expand16(0x00), nssz) keys = Vector{K}(undef, newsz) vals = Vector{V}(undef, newsz) age0 = h.age nbfull = 0 is = _iterslots(h, 1) ##CHUNK 3 sz = length(h.keys) if h.count*4 < sz && sz > 16 rehash!(h, sz>>1) end end function Base.sizehint!(d::SwissDict, newsz::Integer) newsz = _tablesz(newsz*2) # *2 for keys and values in same array oldsz = length(d.keys) # grow at least 25% if newsz < (oldsz*5)>>2 return d end rehash!(d, newsz) end function rehash!(h::SwissDict{K,V}, newsz = length(h.keys)) where {K, V} olds = h.slots oldk = h.keys oldv = h.vals ##CHUNK 4 fill!(h.slots, _expand16(0x00)) sz = length(h.keys) empty!(h.keys) empty!(h.vals) resize!(h.keys, sz) resize!(h.vals, sz) h.nbfull = 0 h.count = 0 h.age += 1 h.idxfloor = 1 return h end function Base.setindex!(h::SwissDict{K,V}, v0, key0) where {K, V} key = convert(K, key0) _setindex!(h, v0, key) end function _setindex!(h::SwissDict{K,V}, v0, key::K) where {K, V} v = convert(V, v0) #FILE: DataStructures.jl/src/ordered_robin_dict.jl ##CHUNK 1 end end h.keys = hk h.vals = hv for (idx, k) in enumerate(h.keys) h.dict[k] = idx end return h end function Base.sizehint!(d::OrderedRobinDict, newsz::Integer) oldsz = length(d) # grow at least 25% if newsz < (oldsz*5)>>2 return d end sizehint!(d.keys, newsz) sizehint!(d.vals, newsz) #CURRENT FILE: DataStructures.jl/src/robin_dict.jl ##CHUNK 1 ``` """ function Base.empty!(h::RobinDict{K,V}) where {K, V} sz = length(h.keys) empty!(h.hashes) empty!(h.keys) empty!(h.vals) resize!(h.keys, sz) resize!(h.vals, sz) resize!(h.hashes, sz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end function rh_search(h::RobinDict{K, V}, key) where {K, V} sz = length(h.keys) chash = hash_key(key) index = desired_index(chash, sz) ##CHUNK 2 end return index_curr end #rehash! algorithm function Base.sizehint!(d::RobinDict, newsz::Integer) newsz = _tablesz(newsz*2) # *2 for keys and values in same array oldsz = length(d.keys) # grow at least 25% if newsz < (oldsz*5)>>2 return d end rehash!(d, newsz) end Base.@propagate_inbounds isslotfilled(h::RobinDict, index) = (h.hashes[index] != 0) Base.@propagate_inbounds isslotempty(h::RobinDict, index) = (h.hashes[index] == 0) ##CHUNK 3 if newsz < (oldsz*5)>>2 return d end rehash!(d, newsz) end Base.@propagate_inbounds isslotfilled(h::RobinDict, index) = (h.hashes[index] != 0) Base.@propagate_inbounds isslotempty(h::RobinDict, index) = (h.hashes[index] == 0) function Base.setindex!(h::RobinDict{K,V}, v0, key0) where {K, V} key = convert(K, key0) isequal(key, key0) || throw(ArgumentError("$key0 is not a valid key for type $K")) _setindex!(h, key, v0) end function _setindex!(h::RobinDict{K,V}, key::K, v0) where {K, V} v = convert(V, v0) index = rh_insert!(h, key, v) @assert index > 0 ##CHUNK 4 sz = length(h.keys) @inbounds index_init = desired_index(h.hashes[index], sz) return (index - index_init + sz) & (sz - 1) end # insert algorithm function rh_insert!(h::RobinDict{K, V}, key::K, val::V) where {K, V} sz = length(h.keys) (h.count > ROBIN_DICT_LOAD_FACTOR * sz) && rehash!(h, sz<<2) # table full @assert h.count != length(h.keys) ckey, cval, chash = key, val, hash_key(key) sz = length(h.keys) index_init = desired_index(chash, sz) index_curr = index_init probe_distance = 0 probe_current = 0 ##CHUNK 5 next = (next & (sz-1)) + 1 end #curr is at the last position, reset back to normal isbitstype(K) || isbitsunion(K) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.keys, curr-1) isbitstype(V) || isbitsunion(V) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.vals, curr-1) @inbounds h.hashes[curr] = 0x0 h.count -= 1 # this is necessary because key at idxfloor might get deleted h.idxfloor = get_next_filled(h, h.idxfloor) return h end function _pop!(h::RobinDict, index) @inbounds val = h.vals[index] rh_delete!(h, index) return val end
274
286
DataStructures.jl
49
function Base.empty!(h::RobinDict{K,V}) where {K, V} sz = length(h.keys) empty!(h.hashes) empty!(h.keys) empty!(h.vals) resize!(h.keys, sz) resize!(h.vals, sz) resize!(h.hashes, sz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end
function Base.empty!(h::RobinDict{K,V}) where {K, V} sz = length(h.keys) empty!(h.hashes) empty!(h.keys) empty!(h.vals) resize!(h.keys, sz) resize!(h.vals, sz) resize!(h.hashes, sz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end
[ 274, 286 ]
function Base.empty!(h::RobinDict{K,V}) where {K, V} sz = length(h.keys) empty!(h.hashes) empty!(h.keys) empty!(h.vals) resize!(h.keys, sz) resize!(h.vals, sz) resize!(h.hashes, sz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end
function Base.empty!(h::RobinDict{K,V}) where {K, V} sz = length(h.keys) empty!(h.hashes) empty!(h.keys) empty!(h.vals) resize!(h.keys, sz) resize!(h.vals, sz) resize!(h.hashes, sz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end
Base.empty!
274
286
src/robin_dict.jl
#FILE: DataStructures.jl/src/ordered_robin_dict.jl ##CHUNK 1 julia> empty!(A); julia> A OrderedRobinDict{String, Int64}() ``` """ function Base.empty!(h::OrderedRobinDict{K,V}) where {K, V} empty!(h.dict) empty!(h.keys) empty!(h.vals) h.count = 0 return h end function _setindex!(h::OrderedRobinDict, v, key) hk, hv = h.keys, h.vals push!(hk, key) push!(hv, v) nk = length(hk) ##CHUNK 2 empty!(h.vals) h.count = 0 return h end function _setindex!(h::OrderedRobinDict, v, key) hk, hv = h.keys, h.vals push!(hk, key) push!(hv, v) nk = length(hk) @inbounds h.dict[key] = Int32(nk) h.count += 1 end function Base.setindex!(h::OrderedRobinDict{K, V}, v0, key0) where {K,V} key = convert(K, key0) v = convert(V, v0) index = get(h.dict, key, -2) if index < 0 ##CHUNK 3 function rehash!(h::OrderedRobinDict{K, V}) where {K, V} keys = h.keys vals = h.vals hk = Vector{K}() hv = Vector{V}() for (idx, (k, v)) in enumerate(zip(keys, vals)) if get(h.dict, k, -1) == idx push!(hk, k) push!(hv, v) end end h.keys = hk h.vals = hv for (idx, k) in enumerate(h.keys) h.dict[k] = idx end return h #FILE: DataStructures.jl/src/swiss_dict.jl ##CHUNK 1 fill!(h.slots, _expand16(0x00)) sz = length(h.keys) empty!(h.keys) empty!(h.vals) resize!(h.keys, sz) resize!(h.vals, sz) h.nbfull = 0 h.count = 0 h.age += 1 h.idxfloor = 1 return h end function Base.setindex!(h::SwissDict{K,V}, v0, key0) where {K, V} key = convert(K, key0) _setindex!(h, v0, key) end function _setindex!(h::SwissDict{K,V}, v0, key::K) where {K, V} v = convert(V, v0) ##CHUNK 2 "b" => 2 julia> empty!(A); julia> A SwissDict{String, Int64}() ``` """ function Base.empty!(h::SwissDict{K,V}) where {K, V} fill!(h.slots, _expand16(0x00)) sz = length(h.keys) empty!(h.keys) empty!(h.vals) resize!(h.keys, sz) resize!(h.vals, sz) h.nbfull = 0 h.count = 0 h.age += 1 h.idxfloor = 1 ##CHUNK 3 vals[idx] = v nbfull += iszero(idx & 0x0f) count += 1 if h.age != age0 return rehash!(h, newsz) end is = _iterslots(h, s) end h.slots = slots h.keys = keys h.vals = vals h.nbfull = nbfull @assert h.age == age0 @assert h.count == count return h end Base.isempty(t::SwissDict) = (t.count == 0) Base.length(t::SwissDict) = t.count #FILE: DataStructures.jl/test/test_robin_dict.jl ##CHUNK 1 @testset "empty" begin h = RobinDict() for i=1:1000 h[i] = i+1 end length0 = length(h.hashes) empty!(h) @test h.count == 0 @test h.idxfloor == 0 @test length(h.hashes) == length(h.keys) == length(h.vals) == length0 for i=-1000:1000 @test !haskey(h, i) end end @testset "ArgumentError" begin @test_throws ArgumentError RobinDict(0) @test_throws ArgumentError RobinDict([1]) @test_throws ArgumentError RobinDict([(1,2),0]) #CURRENT FILE: DataStructures.jl/src/robin_dict.jl ##CHUNK 1 end return index_curr end #rehash! algorithm function rehash!(h::RobinDict{K,V}, newsz = length(h.keys)) where {K, V} oldk = h.keys oldv = h.vals oldh = h.hashes sz = length(oldk) newsz = _tablesz(newsz) if h.count == 0 resize!(h.keys, newsz) resize!(h.vals, newsz) resize!(h.hashes, newsz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end ##CHUNK 2 h.keys = Vector{K}(undef, newsz) h.vals = Vector{V}(undef, newsz) h.hashes = zeros(UInt32,newsz) h.count = 0 h.idxfloor = 0 for i = 1:sz @inbounds if oldh[i] != 0 k = oldk[i] v = oldv[i] rh_insert_for_rehash!(h, k, v, oldh[i]) end end return h end function Base.sizehint!(d::RobinDict, newsz::Integer) newsz = _tablesz(newsz*2) # *2 for keys and values in same array oldsz = length(d.keys) ##CHUNK 3 newsz = _tablesz(newsz) if h.count == 0 resize!(h.keys, newsz) resize!(h.vals, newsz) resize!(h.hashes, newsz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end h.keys = Vector{K}(undef, newsz) h.vals = Vector{V}(undef, newsz) h.hashes = zeros(UInt32,newsz) h.count = 0 h.idxfloor = 0 for i = 1:sz @inbounds if oldh[i] != 0 k = oldk[i]
459
494
DataStructures.jl
50
function rh_delete!(h::RobinDict{K, V}, index) where {K, V} @assert index > 0 # this assumes that there is a key/value present in the dictionary at index index0 = index sz = length(h.keys) @inbounds while true index0 = (index0 & (sz - 1)) + 1 if isslotempty(h, index0) || calculate_distance(h, index0) == 0 break end end #index0 represents the position before which we have to shift backwards # the backwards shifting algorithm curr = index next = (index & (sz - 1)) + 1 @inbounds while next != index0 h.vals[curr] = h.vals[next] h.keys[curr] = h.keys[next] h.hashes[curr] = h.hashes[next] curr = next next = (next & (sz-1)) + 1 end #curr is at the last position, reset back to normal isbitstype(K) || isbitsunion(K) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.keys, curr-1) isbitstype(V) || isbitsunion(V) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.vals, curr-1) @inbounds h.hashes[curr] = 0x0 h.count -= 1 # this is necessary because key at idxfloor might get deleted h.idxfloor = get_next_filled(h, h.idxfloor) return h end
function rh_delete!(h::RobinDict{K, V}, index) where {K, V} @assert index > 0 # this assumes that there is a key/value present in the dictionary at index index0 = index sz = length(h.keys) @inbounds while true index0 = (index0 & (sz - 1)) + 1 if isslotempty(h, index0) || calculate_distance(h, index0) == 0 break end end #index0 represents the position before which we have to shift backwards # the backwards shifting algorithm curr = index next = (index & (sz - 1)) + 1 @inbounds while next != index0 h.vals[curr] = h.vals[next] h.keys[curr] = h.keys[next] h.hashes[curr] = h.hashes[next] curr = next next = (next & (sz-1)) + 1 end #curr is at the last position, reset back to normal isbitstype(K) || isbitsunion(K) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.keys, curr-1) isbitstype(V) || isbitsunion(V) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.vals, curr-1) @inbounds h.hashes[curr] = 0x0 h.count -= 1 # this is necessary because key at idxfloor might get deleted h.idxfloor = get_next_filled(h, h.idxfloor) return h end
[ 459, 494 ]
function rh_delete!(h::RobinDict{K, V}, index) where {K, V} @assert index > 0 # this assumes that there is a key/value present in the dictionary at index index0 = index sz = length(h.keys) @inbounds while true index0 = (index0 & (sz - 1)) + 1 if isslotempty(h, index0) || calculate_distance(h, index0) == 0 break end end #index0 represents the position before which we have to shift backwards # the backwards shifting algorithm curr = index next = (index & (sz - 1)) + 1 @inbounds while next != index0 h.vals[curr] = h.vals[next] h.keys[curr] = h.keys[next] h.hashes[curr] = h.hashes[next] curr = next next = (next & (sz-1)) + 1 end #curr is at the last position, reset back to normal isbitstype(K) || isbitsunion(K) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.keys, curr-1) isbitstype(V) || isbitsunion(V) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.vals, curr-1) @inbounds h.hashes[curr] = 0x0 h.count -= 1 # this is necessary because key at idxfloor might get deleted h.idxfloor = get_next_filled(h, h.idxfloor) return h end
function rh_delete!(h::RobinDict{K, V}, index) where {K, V} @assert index > 0 # this assumes that there is a key/value present in the dictionary at index index0 = index sz = length(h.keys) @inbounds while true index0 = (index0 & (sz - 1)) + 1 if isslotempty(h, index0) || calculate_distance(h, index0) == 0 break end end #index0 represents the position before which we have to shift backwards # the backwards shifting algorithm curr = index next = (index & (sz - 1)) + 1 @inbounds while next != index0 h.vals[curr] = h.vals[next] h.keys[curr] = h.keys[next] h.hashes[curr] = h.hashes[next] curr = next next = (next & (sz-1)) + 1 end #curr is at the last position, reset back to normal isbitstype(K) || isbitsunion(K) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.keys, curr-1) isbitstype(V) || isbitsunion(V) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.vals, curr-1) @inbounds h.hashes[curr] = 0x0 h.count -= 1 # this is necessary because key at idxfloor might get deleted h.idxfloor = get_next_filled(h, h.idxfloor) return h end
rh_delete!
459
494
src/robin_dict.jl
#FILE: DataStructures.jl/src/ordered_robin_dict.jl ##CHUNK 1 end function Base.pop!(h::OrderedRobinDict) check_for_rehash(h) && rehash!(h) index = length(h.keys) while (index > 0) isslotfilled(h, index) && break index -= 1 end index == 0 && rehash!(h) @inbounds key = h.keys[index] return key => _pop!(h, index) end function Base.pop!(h::OrderedRobinDict, key) index = get(h.dict, key, -1) (index > 0) ? _pop!(h, index) : throw(KeyError(key)) end """ #FILE: DataStructures.jl/src/swiss_dict.jl ##CHUNK 1 return h end function Base.setindex!(h::SwissDict{K,V}, v0, key0) where {K, V} key = convert(K, key0) _setindex!(h, v0, key) end function _setindex!(h::SwissDict{K,V}, v0, key::K) where {K, V} v = convert(V, v0) index, tag = ht_keyindex2!(h, key) if index > 0 h.age += 1 @inbounds h.keys[index] = key @inbounds h.vals[index] = v else _setindex!(h, v, key, -index, tag) end ##CHUNK 2 off = trailing_zeros(cands) idx = i*16 + off + 1 return -idx, tag end i = (i+1) & (sz-1) end end function _setindex!(h::SwissDict, v, key, index, tag) @inbounds h.keys[index] = key @inbounds h.vals[index] = v h.count += 1 h.age += 1 so = _slotget(h.slots, index) h.nbfull += (iszero(index & 0x0f) & (so==0x00)) _slotset!(h.slots, tag, index) if index < h.idxfloor h.idxfloor = index end maybe_rehash_grow!(h) #FILE: DataStructures.jl/test/test_robin_dict.jl ##CHUNK 1 # Functions which are not exported, but are required for checking invariants hash_key(key) = (hash(key)%UInt32) | 0x80000000 desired_index(hash, sz) = (hash & (sz - 1)) + 1 isslotfilled(h::RobinDict, index) = (h.hashes[index] != 0) isslotempty(h::RobinDict, index) = (h.hashes[index] == 0) function calculate_distance(h::RobinDict{K, V}, index) where {K, V} @assert isslotfilled(h, index) sz = length(h.keys) @inbounds index_init = desired_index(h.hashes[index], sz) return (index - index_init + sz) & (sz - 1) end function get_idxfloor(h::RobinDict) @inbounds for i = 1:length(h.keys) if isslotfilled(h, i) return i end end return 0 #FILE: DataStructures.jl/src/int_set.jl ##CHUNK 1 idx = n+1 if 1 <= idx <= length(s.bits) unsafe_getindex(s.bits, idx) != s.inverse else ifelse((idx <= 0) | (idx > typemax(Int)), false, s.inverse) end end function findnextidx(s::IntSet, i::Int, invert=false) if s.inverse ⊻ invert # i+1 could rollover causing a BoundsError in findnext/findnextnot nextidx = i == typemax(Int) ? 0 : something(findnextnot(s.bits, i+1), 0) # Extend indices beyond the length of the bits since it is inverted nextidx = nextidx == 0 ? max(i, length(s.bits))+1 : nextidx else nextidx = i == typemax(Int) ? 0 : something(findnext(s.bits, i+1), 0) end return nextidx end #CURRENT FILE: DataStructures.jl/src/robin_dict.jl ##CHUNK 1 rethrow(e) end end end hash_key(key) = (hash(key)%UInt32) | 0x80000000 desired_index(hash, sz) = (hash & (sz - 1)) + 1 function calculate_distance(h::RobinDict{K, V}, index) where {K, V} @assert isslotfilled(h, index) sz = length(h.keys) @inbounds index_init = desired_index(h.hashes[index], sz) return (index - index_init + sz) & (sz - 1) end # insert algorithm function rh_insert!(h::RobinDict{K, V}, key::K, val::V) where {K, V} sz = length(h.keys) (h.count > ROBIN_DICT_LOAD_FACTOR * sz) && rehash!(h, sz<<2) ##CHUNK 2 ``` """ function Base.pop!(h::RobinDict{K, V}, key0, default) where {K, V} key = convert(K, key0) index = rh_search(h, key) return index > 0 ? _pop!(h, index) : default end function Base.pop!(h::RobinDict) isempty(h) && throw(ArgumentError("dict must be non-empty")) idx = h.idxfloor @inbounds key = h.keys[idx] @inbounds val = h.vals[idx] rh_delete!(h, idx) return key => val end """ delete!(collection, key) ##CHUNK 3 v = oldv[i] rh_insert_for_rehash!(h, k, v, oldh[i]) end end return h end function Base.sizehint!(d::RobinDict, newsz::Integer) newsz = _tablesz(newsz*2) # *2 for keys and values in same array oldsz = length(d.keys) # grow at least 25% if newsz < (oldsz*5)>>2 return d end rehash!(d, newsz) end Base.@propagate_inbounds isslotfilled(h::RobinDict, index) = (h.hashes[index] != 0) Base.@propagate_inbounds isslotempty(h::RobinDict, index) = (h.hashes[index] == 0) ##CHUNK 4 # grow at least 25% if newsz < (oldsz*5)>>2 return d end rehash!(d, newsz) end Base.@propagate_inbounds isslotfilled(h::RobinDict, index) = (h.hashes[index] != 0) Base.@propagate_inbounds isslotempty(h::RobinDict, index) = (h.hashes[index] == 0) function Base.setindex!(h::RobinDict{K,V}, v0, key0) where {K, V} key = convert(K, key0) isequal(key, key0) || throw(ArgumentError("$key0 is not a valid key for type $K")) _setindex!(h, key, v0) end function _setindex!(h::RobinDict{K,V}, key::K, v0) where {K, V} v = convert(V, v0) index = rh_insert!(h, key, v) ##CHUNK 5 index = desired_index(chash, sz) cdibs = 0 @inbounds while true if isslotempty(h, index) return -1 elseif cdibs > calculate_distance(h, index) return -1 elseif h.hashes[index] == chash && (h.keys[index] === key || isequal(h.keys[index], key)) return index end index = (index & (sz - 1)) + 1 end end """ get!(collection, key, default) Return the value stored for the given key, or if no mapping for the key is present, store `key => default`, and return `default`.
395
418
DataStructures.jl
51
function Base.iterate(twoss::IntersectTwoSortedSets, state = TwoSortedSets_State(firstindex(twoss.m1), firstindex(twoss.m2))) m1 = twoss.m1 m2 = twoss.m2 ord = orderobject(m1) p1 = state.p1 p2 = state.p2 while p1 != pastendsemitoken(m1) && p2 != pastendsemitoken(m2) @inbounds k1 = deref((m1, p1)) @inbounds k2 = deref((m2, p2)) if lt(ord, k1, k2) @inbounds p1 = advance((m1, p1)) continue end if lt(ord, k2, k1) @inbounds p2 = advance((m2, p2)) continue end @inbounds return (k1, TwoSortedSets_State(advance((m1, p1)), advance((m2, p2)))) end return nothing end
function Base.iterate(twoss::IntersectTwoSortedSets, state = TwoSortedSets_State(firstindex(twoss.m1), firstindex(twoss.m2))) m1 = twoss.m1 m2 = twoss.m2 ord = orderobject(m1) p1 = state.p1 p2 = state.p2 while p1 != pastendsemitoken(m1) && p2 != pastendsemitoken(m2) @inbounds k1 = deref((m1, p1)) @inbounds k2 = deref((m2, p2)) if lt(ord, k1, k2) @inbounds p1 = advance((m1, p1)) continue end if lt(ord, k2, k1) @inbounds p2 = advance((m2, p2)) continue end @inbounds return (k1, TwoSortedSets_State(advance((m1, p1)), advance((m2, p2)))) end return nothing end
[ 395, 418 ]
function Base.iterate(twoss::IntersectTwoSortedSets, state = TwoSortedSets_State(firstindex(twoss.m1), firstindex(twoss.m2))) m1 = twoss.m1 m2 = twoss.m2 ord = orderobject(m1) p1 = state.p1 p2 = state.p2 while p1 != pastendsemitoken(m1) && p2 != pastendsemitoken(m2) @inbounds k1 = deref((m1, p1)) @inbounds k2 = deref((m2, p2)) if lt(ord, k1, k2) @inbounds p1 = advance((m1, p1)) continue end if lt(ord, k2, k1) @inbounds p2 = advance((m2, p2)) continue end @inbounds return (k1, TwoSortedSets_State(advance((m1, p1)), advance((m2, p2)))) end return nothing end
function Base.iterate(twoss::IntersectTwoSortedSets, state = TwoSortedSets_State(firstindex(twoss.m1), firstindex(twoss.m2))) m1 = twoss.m1 m2 = twoss.m2 ord = orderobject(m1) p1 = state.p1 p2 = state.p2 while p1 != pastendsemitoken(m1) && p2 != pastendsemitoken(m2) @inbounds k1 = deref((m1, p1)) @inbounds k2 = deref((m2, p2)) if lt(ord, k1, k2) @inbounds p1 = advance((m1, p1)) continue end if lt(ord, k2, k1) @inbounds p2 = advance((m2, p2)) continue end @inbounds return (k1, TwoSortedSets_State(advance((m1, p1)), advance((m2, p2)))) end return nothing end
Base.iterate
395
418
src/sorted_set.jl
#FILE: DataStructures.jl/src/sorted_dict.jl ##CHUNK 1 p2 = firstindex(m2) while true p1 == pastendsemitoken(m1) && return p2 == pastendsemitoken(m2) p2 == pastendsemitoken(m2) && return false @inbounds k1,d1 = deref((m1,p1)) @inbounds k2,d2 = deref((m2,p2)) (!eq(ord,k1,k2) || !isequal(d1,d2)) && return false @inbounds p1 = advance((m1,p1)) @inbounds p2 = advance((m2,p2)) end end function mergetwo!(m::SortedDict{K,D,Ord}, m2) where {K,D,Ord <: Ordering} for (k,v) in m2 m[convert(K,k)] = convert(D,v) end end ##CHUNK 2 Time: O(*cn*) """ function Base.isequal(m1::SortedDict{K, D, Ord}, m2::SortedDict{K, D, Ord}) where {K, D, Ord <: Ordering} ord = orderobject(m1) if ord != orderobject(m2) return invoke((==), Tuple{AbstractDict, AbstractDict}, m1, m2) end p1 = firstindex(m1) p2 = firstindex(m2) while true p1 == pastendsemitoken(m1) && return p2 == pastendsemitoken(m2) p2 == pastendsemitoken(m2) && return false @inbounds k1,d1 = deref((m1,p1)) @inbounds k2,d2 = deref((m2,p2)) (!eq(ord,k1,k2) || !isequal(d1,d2)) && return false @inbounds p1 = advance((m1,p1)) @inbounds p2 = advance((m2,p2)) end #FILE: DataStructures.jl/src/sorted_multi_dict.jl ##CHUNK 1 return p2 == pastendsemitoken(m2) end if p2 == pastendsemitoken(m2) return false end @inbounds k1,d1 = deref((m1,p1)) @inbounds k2,d2 = deref((m2,p2)) (!eq(ord,k1,k2) || !isequal(d1,d2)) && return false @inbounds p1 = advance((m1,p1)) @inbounds p2 = advance((m2,p2)) end end function mergetwo!(m::SortedMultiDict{K,D,Ord}, iterable) where {K,D,Ord <: Ordering} for (k,v) in iterable insert!(m.bt, convert(K,k), convert(D,v), true) end end #FILE: DataStructures.jl/test/test_sorted_containers.jl ##CHUNK 1 my_assert(m1[12] == 33.1) for j = 1 : 3 my_assert(ii != pastendsemitoken(m1)) pr = deref((m1,ii)) my_assert(pr[1] == expected[1][j] && pr[2] == expected[2][j]) checkcorrectness(m1.bt, false) oldii = ii ii = advance((m1,ii)) delete!((m1,oldii)) end checkcorrectness(m1.bt, false) checkcorrectness(m2.bt, false) my_assert(length(m2) == 3) ii = firstindex(m2) for j = 1 : 3 pr = deref((m2,ii)) my_assert(pr[1] == expected[1][j] && pr[2] == expected[2][j]) ii = advance((m2,ii)) end #CURRENT FILE: DataStructures.jl/src/sorted_set.jl ##CHUNK 1 ord = orderobject(m1) if ord != orderobject(m2) return invoke(issetequal, Tuple{AbstractSet, AbstractSet}, m1, m2) end p1 = firstindex(m1) p2 = firstindex(m2) while true p1 == pastendsemitoken(m1) && return p2 == pastendsemitoken(m2) p2 == pastendsemitoken(m2) && return false @inbounds k1 = deref((m1,p1)) @inbounds k2 = deref((m2,p2)) !eq(ord,k1,k2) && return false @inbounds p1 = advance((m1,p1)) @inbounds p2 = advance((m2,p2)) end end Base.issetequal(m1::SortedSet, m2::SortedSet) = isequal(m1, m2) ##CHUNK 2 if m1end @inbounds return (deref((m2, p2)), TwoSortedSets_State(p1, advance((m2,p2)))) end if m2end @inbounds return (deref((m1, p1)), TwoSortedSets_State(advance((m1,p1)), p2)) end @inbounds k1 = deref((m1, p1)) @inbounds k2 = deref((m2, p2)) if lt(ord, k1, k2) @inbounds return (k1, TwoSortedSets_State(advance((m1,p1)), p2)) end if lt(ord, k2, k1) @inbounds return (k2, TwoSortedSets_State(p1, advance((m2,p2)))) end @inbounds p1 = advance((m1,p1)) @inbounds p2 = advance((m2,p2)) end end ##CHUNK 3 p1 = state.p1 p2 = state.p2 while true m1end = p1 == pastendsemitoken(m1) m2end = p2 == pastendsemitoken(m2) if m1end return nothing end if m2end @inbounds return (deref((m1, p1)), TwoSortedSets_State(advance((m1,p1)), p2)) end @inbounds k1 = deref((m1, p1)) @inbounds k2 = deref((m2, p2)) if lt(ord, k1, k2) @inbounds return (k1, TwoSortedSets_State(advance((m1,p1)), p2)) end if !lt(ord, k2, k1) @inbounds p1 = advance((m1,p1)) end @inbounds p2 = advance((m2, p2)) ##CHUNK 4 end @inbounds k1 = deref((m1, p1)) @inbounds k2 = deref((m2, p2)) if lt(ord, k1, k2) @inbounds return (k1, TwoSortedSets_State(advance((m1,p1)), p2)) end if !lt(ord, k2, k1) @inbounds p1 = advance((m1,p1)) end @inbounds p2 = advance((m2, p2)) end end """ Base.setdiff(ss1::SortedSet{K,Ord}, ss2::SortedSet{K,Ord}) where {K, Ord<:Ordering} Base.setdiff(ss1::SortedSet, others...) Return the set difference, i.e., a sorted set containing entries in `ss1` but not in `ss2` or successive arguments. Time for the first form: O(*cn*) ##CHUNK 5 m1::SortedSet{K,Ord} m2::SortedSet{K,Ord} end function Base.iterate(twoss::SetdiffTwoSortedSets, state = TwoSortedSets_State(firstindex(twoss.m1), firstindex(twoss.m2))) m1 = twoss.m1 m2 = twoss.m2 ord = orderobject(m1) p1 = state.p1 p2 = state.p2 while true m1end = p1 == pastendsemitoken(m1) m2end = p2 == pastendsemitoken(m2) if m1end return nothing end if m2end @inbounds return (deref((m1, p1)), TwoSortedSets_State(advance((m1,p1)), p2)) ##CHUNK 6 if lt(ord, k1, k2) @inbounds return (k1, TwoSortedSets_State(advance((m1,p1)), p2)) end if lt(ord, k2, k1) @inbounds return (k2, TwoSortedSets_State(p1, advance((m2,p2)))) end @inbounds p1 = advance((m1,p1)) @inbounds p2 = advance((m2,p2)) end end """ Base.symdiff(ss1::SortedSet, iterable) Compute and return the symmetric difference of `ss1` and `iterable`, i.e., a sorted set containing entries that are in one of `ss1` or `iterable` but not both. Time: O(*cn*), where *n* is the total size of the two containers if both are sorted sets with the same key and order objects. Otherwise, the time is O(*cn* log *n*)
648
670
DataStructures.jl
52
function Base.issubset(m1::SortedSet{K,Ord}, m2::SortedSet{K,Ord}) where {K, Ord <: Ordering} ord = orderobject(m1) if ord != orderobject(m2) || length(m1) < length(m2) / log2(length(m2) + 2) return invoke(issubset, Tuple{Any, SortedSet}, m1, m2) end p1 = firstindex(m1) p2 = firstindex(m2) while p1 != pastendsemitoken(m1) p2 == pastendsemitoken(m2) && return false @inbounds k1 = deref((m1, p1)) @inbounds k2 = deref((m2, p2)) if eq(ord, k1, k2) @inbounds p1 = advance((m1,p1)) @inbounds p2 = advance((m2,p2)) elseif lt(ord, k1,k2) return false else @inbounds p2 = advance((m2,p2)) end end return true end
function Base.issubset(m1::SortedSet{K,Ord}, m2::SortedSet{K,Ord}) where {K, Ord <: Ordering} ord = orderobject(m1) if ord != orderobject(m2) || length(m1) < length(m2) / log2(length(m2) + 2) return invoke(issubset, Tuple{Any, SortedSet}, m1, m2) end p1 = firstindex(m1) p2 = firstindex(m2) while p1 != pastendsemitoken(m1) p2 == pastendsemitoken(m2) && return false @inbounds k1 = deref((m1, p1)) @inbounds k2 = deref((m2, p2)) if eq(ord, k1, k2) @inbounds p1 = advance((m1,p1)) @inbounds p2 = advance((m2,p2)) elseif lt(ord, k1,k2) return false else @inbounds p2 = advance((m2,p2)) end end return true end
[ 648, 670 ]
function Base.issubset(m1::SortedSet{K,Ord}, m2::SortedSet{K,Ord}) where {K, Ord <: Ordering} ord = orderobject(m1) if ord != orderobject(m2) || length(m1) < length(m2) / log2(length(m2) + 2) return invoke(issubset, Tuple{Any, SortedSet}, m1, m2) end p1 = firstindex(m1) p2 = firstindex(m2) while p1 != pastendsemitoken(m1) p2 == pastendsemitoken(m2) && return false @inbounds k1 = deref((m1, p1)) @inbounds k2 = deref((m2, p2)) if eq(ord, k1, k2) @inbounds p1 = advance((m1,p1)) @inbounds p2 = advance((m2,p2)) elseif lt(ord, k1,k2) return false else @inbounds p2 = advance((m2,p2)) end end return true end
function Base.issubset(m1::SortedSet{K,Ord}, m2::SortedSet{K,Ord}) where {K, Ord <: Ordering} ord = orderobject(m1) if ord != orderobject(m2) || length(m1) < length(m2) / log2(length(m2) + 2) return invoke(issubset, Tuple{Any, SortedSet}, m1, m2) end p1 = firstindex(m1) p2 = firstindex(m2) while p1 != pastendsemitoken(m1) p2 == pastendsemitoken(m2) && return false @inbounds k1 = deref((m1, p1)) @inbounds k2 = deref((m2, p2)) if eq(ord, k1, k2) @inbounds p1 = advance((m1,p1)) @inbounds p2 = advance((m2,p2)) elseif lt(ord, k1,k2) return false else @inbounds p2 = advance((m2,p2)) end end return true end
Base.issubset
648
670
src/sorted_set.jl
#FILE: DataStructures.jl/src/sorted_dict.jl ##CHUNK 1 Time: O(*cn*) """ function Base.isequal(m1::SortedDict{K, D, Ord}, m2::SortedDict{K, D, Ord}) where {K, D, Ord <: Ordering} ord = orderobject(m1) if ord != orderobject(m2) return invoke((==), Tuple{AbstractDict, AbstractDict}, m1, m2) end p1 = firstindex(m1) p2 = firstindex(m2) while true p1 == pastendsemitoken(m1) && return p2 == pastendsemitoken(m2) p2 == pastendsemitoken(m2) && return false @inbounds k1,d1 = deref((m1,p1)) @inbounds k2,d2 = deref((m2,p2)) (!eq(ord,k1,k2) || !isequal(d1,d2)) && return false @inbounds p1 = advance((m1,p1)) @inbounds p2 = advance((m2,p2)) end #FILE: DataStructures.jl/src/sorted_multi_dict.jl ##CHUNK 1 function Base.isequal(m1::SortedMultiDict{K, D, Ord}, m2::SortedMultiDict{K, D, Ord}) where {K, D, Ord <: Ordering} ord = orderobject(m1) if ord != orderobject(m2) return false end p1 = firstindex(m1) p2 = firstindex(m2) while true if p1 == pastendsemitoken(m1) return p2 == pastendsemitoken(m2) end if p2 == pastendsemitoken(m2) return false end @inbounds k1,d1 = deref((m1,p1)) @inbounds k2,d2 = deref((m2,p2)) (!eq(ord,k1,k2) || !isequal(d1,d2)) && return false @inbounds p1 = advance((m1,p1)) @inbounds p2 = advance((m2,p2)) #CURRENT FILE: DataStructures.jl/src/sorted_set.jl ##CHUNK 1 firstindex(twoss.m2))) m1 = twoss.m1 m2 = twoss.m2 ord = orderobject(m1) p1 = state.p1 p2 = state.p2 while true m1end = p1 == pastendsemitoken(m1) m2end = p2 == pastendsemitoken(m2) if m1end return nothing end if m2end @inbounds return (deref((m1, p1)), TwoSortedSets_State(advance((m1,p1)), p2)) end @inbounds k1 = deref((m1, p1)) @inbounds k2 = deref((m2, p2)) if lt(ord, k1, k2) @inbounds return (k1, TwoSortedSets_State(advance((m1,p1)), p2)) end ##CHUNK 2 ord = orderobject(m1) if ord != orderobject(m2) return invoke(issetequal, Tuple{AbstractSet, AbstractSet}, m1, m2) end p1 = firstindex(m1) p2 = firstindex(m2) while true p1 == pastendsemitoken(m1) && return p2 == pastendsemitoken(m2) p2 == pastendsemitoken(m2) && return false @inbounds k1 = deref((m1,p1)) @inbounds k2 = deref((m2,p2)) !eq(ord,k1,k2) && return false @inbounds p1 = advance((m1,p1)) @inbounds p2 = advance((m2,p2)) end end Base.issetequal(m1::SortedSet, m2::SortedSet) = isequal(m1, m2) ##CHUNK 3 p1::IntSemiToken p2::IntSemiToken end function Base.iterate(twoss::IntersectTwoSortedSets, state = TwoSortedSets_State(firstindex(twoss.m1), firstindex(twoss.m2))) m1 = twoss.m1 m2 = twoss.m2 ord = orderobject(m1) p1 = state.p1 p2 = state.p2 while p1 != pastendsemitoken(m1) && p2 != pastendsemitoken(m2) @inbounds k1 = deref((m1, p1)) @inbounds k2 = deref((m2, p2)) if lt(ord, k1, k2) @inbounds p1 = advance((m1, p1)) continue end if lt(ord, k2, k1) ##CHUNK 4 return nothing end if m2end @inbounds return (deref((m1, p1)), TwoSortedSets_State(advance((m1,p1)), p2)) end @inbounds k1 = deref((m1, p1)) @inbounds k2 = deref((m2, p2)) if lt(ord, k1, k2) @inbounds return (k1, TwoSortedSets_State(advance((m1,p1)), p2)) end if !lt(ord, k2, k1) @inbounds p1 = advance((m1,p1)) end @inbounds p2 = advance((m2, p2)) end end """ Base.setdiff(ss1::SortedSet{K,Ord}, ss2::SortedSet{K,Ord}) where {K, Ord<:Ordering} ##CHUNK 5 end struct SetdiffTwoSortedSets{K, Ord <: Ordering} m1::SortedSet{K,Ord} m2::SortedSet{K,Ord} end function Base.iterate(twoss::SetdiffTwoSortedSets, state = TwoSortedSets_State(firstindex(twoss.m1), firstindex(twoss.m2))) m1 = twoss.m1 m2 = twoss.m2 ord = orderobject(m1) p1 = state.p1 p2 = state.p2 while true m1end = p1 == pastendsemitoken(m1) m2end = p2 == pastendsemitoken(m2) if m1end ##CHUNK 6 function Base.iterate(twoss::SymdiffTwoSortedSets, state = TwoSortedSets_State(firstindex(twoss.m1), firstindex(twoss.m2))) m1 = twoss.m1 m2 = twoss.m2 ord = orderobject(m1) p1 = state.p1 p2 = state.p2 while true m1end = p1 == pastendsemitoken(m1) m2end = p2 == pastendsemitoken(m2) if m1end && m2end return nothing end if m1end @inbounds return (deref((m2, p2)), TwoSortedSets_State(p1, advance((m2,p2)))) end if m2end @inbounds return (deref((m1, p1)), ##CHUNK 7 m2end = p2 == pastendsemitoken(m2) if m1end && m2end return nothing end if m1end @inbounds return (deref((m2, p2)), TwoSortedSets_State(p1, advance((m2,p2)))) end if m2end @inbounds return (deref((m1, p1)), TwoSortedSets_State(advance((m1,p1)), p2)) end @inbounds k1 = deref((m1, p1)) @inbounds k2 = deref((m2, p2)) if lt(ord, k1, k2) @inbounds return (k1, TwoSortedSets_State(advance((m1,p1)), p2)) end if lt(ord, k2, k1) @inbounds return (k2, TwoSortedSets_State(p1, advance((m2,p2)))) end ##CHUNK 8 TwoSortedSets_State(advance((m1,p1)), p2)) end @inbounds k1 = deref((m1, p1)) @inbounds k2 = deref((m2, p2)) if lt(ord, k1, k2) @inbounds return (k1, TwoSortedSets_State(advance((m1,p1)), p2)) end if lt(ord, k2, k1) @inbounds return (k2, TwoSortedSets_State(p1, advance((m2,p2)))) end @inbounds p1 = advance((m1,p1)) @inbounds p2 = advance((m2,p2)) end end """ Base.symdiff(ss1::SortedSet, iterable) Compute and return the symmetric difference of `ss1` and `iterable`, i.e., a sorted set
30
44
DataStructures.jl
53
function Base.copy!(to::SparseIntSet, from::SparseIntSet) to.packed = copy(from.packed) #we want to keep the null pages === NULL_INT_PAGE resize!(to.reverse, length(from.reverse)) for i in eachindex(from.reverse) page = from.reverse[i] if page === NULL_INT_PAGE to.reverse[i] = NULL_INT_PAGE else to.reverse[i] = copy(from.reverse[i]) end end to.counters = copy(from.counters) return to end
function Base.copy!(to::SparseIntSet, from::SparseIntSet) to.packed = copy(from.packed) #we want to keep the null pages === NULL_INT_PAGE resize!(to.reverse, length(from.reverse)) for i in eachindex(from.reverse) page = from.reverse[i] if page === NULL_INT_PAGE to.reverse[i] = NULL_INT_PAGE else to.reverse[i] = copy(from.reverse[i]) end end to.counters = copy(from.counters) return to end
[ 30, 44 ]
function Base.copy!(to::SparseIntSet, from::SparseIntSet) to.packed = copy(from.packed) #we want to keep the null pages === NULL_INT_PAGE resize!(to.reverse, length(from.reverse)) for i in eachindex(from.reverse) page = from.reverse[i] if page === NULL_INT_PAGE to.reverse[i] = NULL_INT_PAGE else to.reverse[i] = copy(from.reverse[i]) end end to.counters = copy(from.counters) return to end
function Base.copy!(to::SparseIntSet, from::SparseIntSet) to.packed = copy(from.packed) #we want to keep the null pages === NULL_INT_PAGE resize!(to.reverse, length(from.reverse)) for i in eachindex(from.reverse) page = from.reverse[i] if page === NULL_INT_PAGE to.reverse[i] = NULL_INT_PAGE else to.reverse[i] = copy(from.reverse[i]) end end to.counters = copy(from.counters) return to end
Base.copy!
30
44
src/sparse_int_set.jl
#CURRENT FILE: DataStructures.jl/src/sparse_int_set.jl ##CHUNK 1 page = @inbounds s.reverse[pageid] return page !== NULL_INT_PAGE && @inbounds page[offset] != 0 end end Base.length(s::SparseIntSet) = length(s.packed) @inline function Base.push!(s::SparseIntSet, i::Integer) i <= 0 && throw(DomainError("Only positive Ints allowed.")) pageid, offset = pageid_offset(s, i) pages = s.reverse plen = length(pages) if pageid > plen # Create new null pages up to pageid and fresh (zero-filled) one at pageid sizehint!(pages, pageid) sizehint!(s.counters, pageid) for i in 1:pageid - plen - 1 push!(pages, NULL_INT_PAGE) ##CHUNK 2 empty!(s.reverse) empty!(s.counters) return s end Base.isempty(s::SparseIntSet) = isempty(s.packed) Base.copy(s::SparseIntSet) = copy!(SparseIntSet(), s) function pageid_offset(s::SparseIntSet, i) pageid = div(i - 1, INT_PER_PAGE) + 1 return pageid, (i - 1) & (INT_PER_PAGE - 1) + 1 end function Base.in(i, s::SparseIntSet) pageid, offset = pageid_offset(s, i) if pageid > length(s.reverse) return false else ##CHUNK 3 s.reverse[to_page][to_offset] = s.reverse[from_page][from_offset] s.reverse[from_page][from_offset] = 0 s.counters[from_page] -= 1 pop!(s.packed) end cleanup!(s, from_page) return id end @inline function cleanup!(s::SparseIntSet, pageid::Int) if s.counters[pageid] == 0 s.reverse[pageid] = NULL_INT_PAGE end end @inline function Base.pop!(s::SparseIntSet, id::Integer, default) id < 0 && throw(ArgumentError("Int to pop needs to be positive.")) return in(id, s) ? (@inbounds pop!(s, id)) : default end Base.popfirst!(s::SparseIntSet) = pop!(s, first(s)) ##CHUNK 4 push!(s.counters, 0) end push!(pages, zeros(Int, INT_PER_PAGE)) push!(s.counters, 0) elseif pages[pageid] === NULL_INT_PAGE #assign a page to previous null page pages[pageid] = zeros(Int, INT_PER_PAGE) end page = pages[pageid] if page[offset] == 0 @inbounds page[offset] = length(s) + 1 @inbounds s.counters[pageid] += 1 push!(s.packed, i) return s end return s end @inline function Base.push!(s::SparseIntSet, is::Integer...) for i in is ##CHUNK 5 const INT_PER_PAGE = div(ccall(:jl_getpagesize, Clong, ()), sizeof(Int)) # we use this to mark pages not in use, it must never be written to. const NULL_INT_PAGE = Vector{Int}() mutable struct SparseIntSet packed ::Vector{Int} reverse::Vector{Vector{Int}} counters::Vector{Int} # counts the number of real elements in each page of reverse. end ##CHUNK 6 @boundscheck if !in(id, s) throw(BoundsError(s, id)) end @inbounds begin packed_endid = s.packed[end] from_page, from_offset = pageid_offset(s, id) to_page, to_offset = pageid_offset(s, packed_endid) packed_id = s.reverse[from_page][from_offset] s.packed[packed_id] = packed_endid s.reverse[to_page][to_offset] = s.reverse[from_page][from_offset] s.reverse[from_page][from_offset] = 0 s.counters[from_page] -= 1 pop!(s.packed) end cleanup!(s, from_page) return id end @inline function cleanup!(s::SparseIntSet, pageid::Int) ##CHUNK 7 const INT_PER_PAGE = div(ccall(:jl_getpagesize, Clong, ()), sizeof(Int)) # we use this to mark pages not in use, it must never be written to. const NULL_INT_PAGE = Vector{Int}() mutable struct SparseIntSet packed ::Vector{Int} reverse::Vector{Vector{Int}} counters::Vector{Int} # counts the number of real elements in each page of reverse. end SparseIntSet() = SparseIntSet(Int[], Vector{Int}[], Int[]) SparseIntSet(indices) = union!(SparseIntSet(), indices) Base.eltype(::Type{SparseIntSet}) = Int Base.empty(::SparseIntSet) = SparseIntSet() function Base.empty!(s::SparseIntSet) empty!(s.packed) ##CHUNK 8 pageid, offset = pageid_offset(s, i) pages = s.reverse plen = length(pages) if pageid > plen # Create new null pages up to pageid and fresh (zero-filled) one at pageid sizehint!(pages, pageid) sizehint!(s.counters, pageid) for i in 1:pageid - plen - 1 push!(pages, NULL_INT_PAGE) push!(s.counters, 0) end push!(pages, zeros(Int, INT_PER_PAGE)) push!(s.counters, 0) elseif pages[pageid] === NULL_INT_PAGE #assign a page to previous null page pages[pageid] = zeros(Int, INT_PER_PAGE) end page = pages[pageid] if page[offset] == 0 ##CHUNK 9 function pageid_offset(s::SparseIntSet, i) pageid = div(i - 1, INT_PER_PAGE) + 1 return pageid, (i - 1) & (INT_PER_PAGE - 1) + 1 end function Base.in(i, s::SparseIntSet) pageid, offset = pageid_offset(s, i) if pageid > length(s.reverse) return false else page = @inbounds s.reverse[pageid] return page !== NULL_INT_PAGE && @inbounds page[offset] != 0 end end Base.length(s::SparseIntSet) = length(s.packed) @inline function Base.push!(s::SparseIntSet, i::Integer) i <= 0 && throw(DomainError("Only positive Ints allowed.")) ##CHUNK 10 push!(s, i) end return s end @inline Base.@propagate_inbounds function Base.pop!(s::SparseIntSet) if isempty(s) throw(ArgumentError("Cannot pop an empty set.")) end id = pop!(s.packed) pageid, offset = pageid_offset(s, id) @inbounds s.reverse[pageid][offset] = 0 @inbounds s.counters[pageid] -= 1 cleanup!(s, pageid) return id end @inline Base.@propagate_inbounds function Base.pop!(s::SparseIntSet, id::Integer) id < 0 && throw(ArgumentError("Int to pop needs to be positive."))
129
141
DataStructures.jl
54
function search_node(tree::SplayTree{K}, d::K) where K node = tree.root prev = nothing while node != nothing && node.data != d prev = node if node.data < d node = node.rightChild else node = node.leftChild end end return (node == nothing) ? prev : node end
function search_node(tree::SplayTree{K}, d::K) where K node = tree.root prev = nothing while node != nothing && node.data != d prev = node if node.data < d node = node.rightChild else node = node.leftChild end end return (node == nothing) ? prev : node end
[ 129, 141 ]
function search_node(tree::SplayTree{K}, d::K) where K node = tree.root prev = nothing while node != nothing && node.data != d prev = node if node.data < d node = node.rightChild else node = node.leftChild end end return (node == nothing) ? prev : node end
function search_node(tree::SplayTree{K}, d::K) where K node = tree.root prev = nothing while node != nothing && node.data != d prev = node if node.data < d node = node.rightChild else node = node.leftChild end end return (node == nothing) ? prev : node end
search_node
129
141
src/splay_tree.jl
#FILE: DataStructures.jl/src/avl_tree.jl ##CHUNK 1 return node end function search_node(tree::AVLTree{K}, d::K) where K prev = nothing node = tree.root while node != nothing && node.data != nothing && node.data != d prev = node if d < node.data node = node.leftChild else node = node.rightChild end end return (node == nothing) ? prev : node end """ ##CHUNK 2 """ minimum_node(tree::AVLTree, node::AVLTreeNode) Returns the AVLTreeNode with minimum value in subtree of `node`. """ function minimum_node(node::Union{AVLTreeNode, Nothing}) while node != nothing && node.leftChild != nothing node = node.leftChild end return node end function search_node(tree::AVLTree{K}, d::K) where K prev = nothing node = tree.root while node != nothing && node.data != nothing && node.data != d prev = node if d < node.data #FILE: DataStructures.jl/src/red_black_tree.jl ##CHUNK 1 function insert_node!(tree::RBTree, node::RBTreeNode) node_y = nothing node_x = tree.root while node_x !== tree.nil node_y = node_x if node.data < node_x.data node_x = node_x.leftChild else node_x = node_x.rightChild end end node.parent = node_y if node_y == nothing tree.root = node elseif node.data < node_y.data node_y.leftChild = node else node_y.rightChild = node ##CHUNK 2 end RBTree() = RBTree{Any}() Base.length(tree::RBTree) = tree.count """ search_node(tree, key) Returns the last visited node, while traversing through in binary-search-tree fashion looking for `key`. """ search_node(tree, key) function search_node(tree::RBTree{K}, d::K) where K node = tree.root while node !== tree.nil && d != node.data if d < node.data node = node.leftChild else node = node.rightChild ##CHUNK 3 """ search_node(tree, key) function search_node(tree::RBTree{K}, d::K) where K node = tree.root while node !== tree.nil && d != node.data if d < node.data node = node.leftChild else node = node.rightChild end end return node end """ haskey(tree, key) Returns true if `key` is present in the `tree`, else returns false. """ ##CHUNK 4 function Base.haskey(tree::RBTree{K}, d::K) where K node = search_node(tree, d) return (node.data == d) end """ insert_node!(tree::RBTree, node::RBTreeNode) Inserts `node` at proper location by traversing through the `tree` in a binary-search-tree fashion. """ function insert_node!(tree::RBTree, node::RBTreeNode) node_y = nothing node_x = tree.root while node_x !== tree.nil node_y = node_x if node.data < node_x.data node_x = node_x.leftChild else node_x = node_x.rightChild #CURRENT FILE: DataStructures.jl/src/splay_tree.jl ##CHUNK 1 while x !== nothing y = x if node.data > x.data x = x.rightChild else x = x.leftChild end end node.parent = y if y === nothing tree.root = node elseif node.data < y.data y.leftChild = node else y.rightChild = node end splay!(tree, node) tree.count += 1 ##CHUNK 2 function Base.push!(tree::SplayTree{K}, d0) where K d = convert(K, d0) is_present = search_node(tree, d) if (is_present !== nothing) && (is_present.data == d) return tree end # only unique keys are inserted node = SplayTreeNode{K}(d) y = nothing x = tree.root while x !== nothing y = x if node.data > x.data x = x.rightChild else x = x.leftChild end end node.parent = y ##CHUNK 3 if y === nothing tree.root = node elseif node.data < y.data y.leftChild = node else y.rightChild = node end splay!(tree, node) tree.count += 1 return tree end function Base.getindex(tree::SplayTree{K}, ind) where K @boundscheck (1 <= ind <= tree.count) || throw(KeyError("$ind should be in between 1 and $(tree.count)")) function traverse_tree_inorder(node::Union{SplayTreeNode, Nothing}) if (node != nothing) left = traverse_tree_inorder(node.leftChild) right = traverse_tree_inorder(node.rightChild) append!(push!(left, node.data), right) ##CHUNK 4 x = maximum_node(s) splay!(tree, x) x.rightChild = t t.parent = x return x end end function Base.haskey(tree::SplayTree{K}, d::K) where K node = tree.root if node === nothing return false else node = search_node(tree, d) (node === nothing) && return false is_found = (node.data == d) is_found && splay!(tree, node) return is_found end
158
182
DataStructures.jl
55
function Base.delete!(tree::SplayTree{K}, d::K) where K node = tree.root x = search_node(tree, d) (x == nothing) && return tree t = nothing s = nothing splay!(tree, x) if x.rightChild !== nothing t = x.rightChild t.parent = nothing end s = x s.rightChild = nothing if s.leftChild !== nothing s.leftChild.parent = nothing end tree.root = _join!(tree, s.leftChild, t) tree.count -= 1 return tree end
function Base.delete!(tree::SplayTree{K}, d::K) where K node = tree.root x = search_node(tree, d) (x == nothing) && return tree t = nothing s = nothing splay!(tree, x) if x.rightChild !== nothing t = x.rightChild t.parent = nothing end s = x s.rightChild = nothing if s.leftChild !== nothing s.leftChild.parent = nothing end tree.root = _join!(tree, s.leftChild, t) tree.count -= 1 return tree end
[ 158, 182 ]
function Base.delete!(tree::SplayTree{K}, d::K) where K node = tree.root x = search_node(tree, d) (x == nothing) && return tree t = nothing s = nothing splay!(tree, x) if x.rightChild !== nothing t = x.rightChild t.parent = nothing end s = x s.rightChild = nothing if s.leftChild !== nothing s.leftChild.parent = nothing end tree.root = _join!(tree, s.leftChild, t) tree.count -= 1 return tree end
function Base.delete!(tree::SplayTree{K}, d::K) where K node = tree.root x = search_node(tree, d) (x == nothing) && return tree t = nothing s = nothing splay!(tree, x) if x.rightChild !== nothing t = x.rightChild t.parent = nothing end s = x s.rightChild = nothing if s.leftChild !== nothing s.leftChild.parent = nothing end tree.root = _join!(tree, s.leftChild, t) tree.count -= 1 return tree end
Base.delete!
158
182
src/splay_tree.jl
#FILE: DataStructures.jl/src/red_black_tree.jl ##CHUNK 1 function Base.delete!(tree::RBTree{K}, d::K) where K z = tree.nil node = tree.root while node !== tree.nil if node.data == d z = node end if d < node.data node = node.leftChild else node = node.rightChild end end (z === tree.nil) && return tree y = z y_original_color = y.color ##CHUNK 2 node = node.leftChild end return node end """ delete!(tree::RBTree, key) Deletes `key` from `tree`, if present, else returns the unmodified tree. """ function Base.delete!(tree::RBTree{K}, d::K) where K z = tree.nil node = tree.root while node !== tree.nil if node.data == d z = node end if d < node.data ##CHUNK 3 function Base.haskey(tree::RBTree{K}, d::K) where K node = search_node(tree, d) return (node.data == d) end """ insert_node!(tree::RBTree, node::RBTreeNode) Inserts `node` at proper location by traversing through the `tree` in a binary-search-tree fashion. """ function insert_node!(tree::RBTree, node::RBTreeNode) node_y = nothing node_x = tree.root while node_x !== tree.nil node_y = node_x if node.data < node_x.data node_x = node_x.leftChild else node_x = node_x.rightChild #FILE: DataStructures.jl/src/avl_tree.jl ##CHUNK 1 function delete_node!(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = delete_node!(node.leftChild, key) elseif key > node.data node.rightChild = delete_node!(node.rightChild, key) else if node.leftChild == nothing result = node.rightChild return result elseif node.rightChild == nothing result = node.leftChild return result else result = minimum_node(node.rightChild) node.data = result.data node.rightChild = delete_node!(node.rightChild, result.data) end end #CURRENT FILE: DataStructures.jl/src/splay_tree.jl ##CHUNK 1 SplayTree{K}() where K = new{K}(nothing, 0) end Base.length(tree::SplayTree) = tree.count SplayTree() = SplayTree{Any}() function left_rotate!(tree::SplayTree, node_x::SplayTreeNode) node_y = node_x.rightChild node_x.rightChild = node_y.leftChild if node_y.leftChild != nothing node_y.leftChild.parent = node_x end node_y.parent = node_x.parent if node_x.parent == nothing tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else ##CHUNK 2 x = maximum_node(s) splay!(tree, x) x.rightChild = t t.parent = x return x end end function search_node(tree::SplayTree{K}, d::K) where K node = tree.root prev = nothing while node != nothing && node.data != d prev = node if node.data < d node = node.rightChild else node = node.leftChild end end return (node == nothing) ? prev : node ##CHUNK 3 is_present = search_node(tree, d) if (is_present !== nothing) && (is_present.data == d) return tree end # only unique keys are inserted node = SplayTreeNode{K}(d) y = nothing x = tree.root while x !== nothing y = x if node.data > x.data x = x.rightChild else x = x.leftChild end end node.parent = y if y === nothing ##CHUNK 4 node_x.parent.rightChild = node_y end if node_y != nothing node_y.leftChild = node_x end node_x.parent = node_y end function right_rotate!(tree::SplayTree, node_x::SplayTreeNode) node_y = node_x.leftChild node_x.leftChild = node_y.rightChild if node_y.rightChild != nothing node_y.rightChild.parent = node_x end node_y.parent = node_x.parent if node_x.parent == nothing tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else ##CHUNK 5 y = x if node.data > x.data x = x.rightChild else x = x.leftChild end end node.parent = y if y === nothing tree.root = node elseif node.data < y.data y.leftChild = node else y.rightChild = node end splay!(tree, node) tree.count += 1 return tree end ##CHUNK 6 # All the items in S are smaller than the items in T. # This is a two-step process. # In the first step, splay the largest node in S. This moves the largest node to the root node. # In the second step, set the right child of the new root of S to T. function _join!(tree::SplayTree, s::Union{SplayTreeNode, Nothing}, t::Union{SplayTreeNode, Nothing}) if s === nothing return t elseif t === nothing return s else x = maximum_node(s) splay!(tree, x) x.rightChild = t t.parent = x return x end end function search_node(tree::SplayTree{K}, d::K) where K node = tree.root
184
215
DataStructures.jl
56
function Base.push!(tree::SplayTree{K}, d0) where K d = convert(K, d0) is_present = search_node(tree, d) if (is_present !== nothing) && (is_present.data == d) return tree end # only unique keys are inserted node = SplayTreeNode{K}(d) y = nothing x = tree.root while x !== nothing y = x if node.data > x.data x = x.rightChild else x = x.leftChild end end node.parent = y if y === nothing tree.root = node elseif node.data < y.data y.leftChild = node else y.rightChild = node end splay!(tree, node) tree.count += 1 return tree end
function Base.push!(tree::SplayTree{K}, d0) where K d = convert(K, d0) is_present = search_node(tree, d) if (is_present !== nothing) && (is_present.data == d) return tree end # only unique keys are inserted node = SplayTreeNode{K}(d) y = nothing x = tree.root while x !== nothing y = x if node.data > x.data x = x.rightChild else x = x.leftChild end end node.parent = y if y === nothing tree.root = node elseif node.data < y.data y.leftChild = node else y.rightChild = node end splay!(tree, node) tree.count += 1 return tree end
[ 184, 215 ]
function Base.push!(tree::SplayTree{K}, d0) where K d = convert(K, d0) is_present = search_node(tree, d) if (is_present !== nothing) && (is_present.data == d) return tree end # only unique keys are inserted node = SplayTreeNode{K}(d) y = nothing x = tree.root while x !== nothing y = x if node.data > x.data x = x.rightChild else x = x.leftChild end end node.parent = y if y === nothing tree.root = node elseif node.data < y.data y.leftChild = node else y.rightChild = node end splay!(tree, node) tree.count += 1 return tree end
function Base.push!(tree::SplayTree{K}, d0) where K d = convert(K, d0) is_present = search_node(tree, d) if (is_present !== nothing) && (is_present.data == d) return tree end # only unique keys are inserted node = SplayTreeNode{K}(d) y = nothing x = tree.root while x !== nothing y = x if node.data > x.data x = x.rightChild else x = x.leftChild end end node.parent = y if y === nothing tree.root = node elseif node.data < y.data y.leftChild = node else y.rightChild = node end splay!(tree, node) tree.count += 1 return tree end
Base.push!
184
215
src/splay_tree.jl
#FILE: DataStructures.jl/src/red_black_tree.jl ##CHUNK 1 function Base.insert!(tree::RBTree{K}, d::K) where K # if the key exists in the tree, no need to insert haskey(tree, d) && return tree # insert, if not present in the tree node = RBTreeNode{K}(d) node.leftChild = node.rightChild = tree.nil insert_node!(tree, node) if node.parent == nothing node.color = false elseif node.parent.parent == nothing ; else fix_insert!(tree, node) end tree.count += 1 return tree end ##CHUNK 2 end end tree.root.color = false end """ insert!(tree, key) Inserts `key` in the `tree` if it is not present. """ function Base.insert!(tree::RBTree{K}, d::K) where K # if the key exists in the tree, no need to insert haskey(tree, d) && return tree # insert, if not present in the tree node = RBTreeNode{K}(d) node.leftChild = node.rightChild = tree.nil insert_node!(tree, node) ##CHUNK 3 function Base.haskey(tree::RBTree{K}, d::K) where K node = search_node(tree, d) return (node.data == d) end """ insert_node!(tree::RBTree, node::RBTreeNode) Inserts `node` at proper location by traversing through the `tree` in a binary-search-tree fashion. """ function insert_node!(tree::RBTree, node::RBTreeNode) node_y = nothing node_x = tree.root while node_x !== tree.nil node_y = node_x if node.data < node_x.data node_x = node_x.leftChild else node_x = node_x.rightChild ##CHUNK 4 """ search_node(tree, key) function search_node(tree::RBTree{K}, d::K) where K node = tree.root while node !== tree.nil && d != node.data if d < node.data node = node.leftChild else node = node.rightChild end end return node end """ haskey(tree, key) Returns true if `key` is present in the `tree`, else returns false. """ ##CHUNK 5 function insert_node!(tree::RBTree, node::RBTreeNode) node_y = nothing node_x = tree.root while node_x !== tree.nil node_y = node_x if node.data < node_x.data node_x = node_x.leftChild else node_x = node_x.rightChild end end node.parent = node_y if node_y == nothing tree.root = node elseif node.data < node_y.data node_y.leftChild = node else node_y.rightChild = node #FILE: DataStructures.jl/src/avl_tree.jl ##CHUNK 1 return node end function search_node(tree::AVLTree{K}, d::K) where K prev = nothing node = tree.root while node != nothing && node.data != nothing && node.data != d prev = node if d < node.data node = node.leftChild else node = node.rightChild end end return (node == nothing) ? prev : node end """ #CURRENT FILE: DataStructures.jl/src/splay_tree.jl ##CHUNK 1 x = maximum_node(s) splay!(tree, x) x.rightChild = t t.parent = x return x end end function search_node(tree::SplayTree{K}, d::K) where K node = tree.root prev = nothing while node != nothing && node.data != d prev = node if node.data < d node = node.rightChild else node = node.leftChild end end return (node == nothing) ? prev : node ##CHUNK 2 SplayTree{K}() where K = new{K}(nothing, 0) end Base.length(tree::SplayTree) = tree.count SplayTree() = SplayTree{Any}() function left_rotate!(tree::SplayTree, node_x::SplayTreeNode) node_y = node_x.rightChild node_x.rightChild = node_y.leftChild if node_y.leftChild != nothing node_y.leftChild.parent = node_x end node_y.parent = node_x.parent if node_x.parent == nothing tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else ##CHUNK 3 # All the items in S are smaller than the items in T. # This is a two-step process. # In the first step, splay the largest node in S. This moves the largest node to the root node. # In the second step, set the right child of the new root of S to T. function _join!(tree::SplayTree, s::Union{SplayTreeNode, Nothing}, t::Union{SplayTreeNode, Nothing}) if s === nothing return t elseif t === nothing return s else x = maximum_node(s) splay!(tree, x) x.rightChild = t t.parent = x return x end end function search_node(tree::SplayTree{K}, d::K) where K node = tree.root ##CHUNK 4 node_x.leftChild = node_y.rightChild if node_y.rightChild != nothing node_y.rightChild.parent = node_x end node_y.parent = node_x.parent if node_x.parent == nothing tree.root = node_y elseif (node_x == node_x.parent.leftChild) node_x.parent.leftChild = node_y else node_x.parent.rightChild = node_y end node_y.rightChild = node_x node_x.parent = node_y end # The splaying operation moves node_x to the root of the tree using the series of rotations. function splay!(tree::SplayTree, node_x::SplayTreeNode) while node_x.parent !== nothing parent = node_x.parent
217
230
DataStructures.jl
57
function Base.getindex(tree::SplayTree{K}, ind) where K @boundscheck (1 <= ind <= tree.count) || throw(KeyError("$ind should be in between 1 and $(tree.count)")) function traverse_tree_inorder(node::Union{SplayTreeNode, Nothing}) if (node != nothing) left = traverse_tree_inorder(node.leftChild) right = traverse_tree_inorder(node.rightChild) append!(push!(left, node.data), right) else return K[] end end arr = traverse_tree_inorder(tree.root) return @inbounds arr[ind] end
function Base.getindex(tree::SplayTree{K}, ind) where K @boundscheck (1 <= ind <= tree.count) || throw(KeyError("$ind should be in between 1 and $(tree.count)")) function traverse_tree_inorder(node::Union{SplayTreeNode, Nothing}) if (node != nothing) left = traverse_tree_inorder(node.leftChild) right = traverse_tree_inorder(node.rightChild) append!(push!(left, node.data), right) else return K[] end end arr = traverse_tree_inorder(tree.root) return @inbounds arr[ind] end
[ 217, 230 ]
function Base.getindex(tree::SplayTree{K}, ind) where K @boundscheck (1 <= ind <= tree.count) || throw(KeyError("$ind should be in between 1 and $(tree.count)")) function traverse_tree_inorder(node::Union{SplayTreeNode, Nothing}) if (node != nothing) left = traverse_tree_inorder(node.leftChild) right = traverse_tree_inorder(node.rightChild) append!(push!(left, node.data), right) else return K[] end end arr = traverse_tree_inorder(tree.root) return @inbounds arr[ind] end
function Base.getindex(tree::SplayTree{K}, ind) where K @boundscheck (1 <= ind <= tree.count) || throw(KeyError("$ind should be in between 1 and $(tree.count)")) function traverse_tree_inorder(node::Union{SplayTreeNode, Nothing}) if (node != nothing) left = traverse_tree_inorder(node.leftChild) right = traverse_tree_inorder(node.rightChild) append!(push!(left, node.data), right) else return K[] end end arr = traverse_tree_inorder(tree.root) return @inbounds arr[ind] end
traverse_tree_inorder
217
230
src/splay_tree.jl
#FILE: DataStructures.jl/src/red_black_tree.jl ##CHUNK 1 function traverse_tree_inorder(node::RBTreeNode{K}) where K if (node !== tree.nil) left = traverse_tree_inorder(node.leftChild) right = traverse_tree_inorder(node.rightChild) append!(push!(left, node.data), right) else return K[] end end arr = traverse_tree_inorder(tree.root) return @inbounds arr[ind] end ##CHUNK 2 Base.in(key, tree::RBTree) = haskey(tree, key) """ getindex(tree, ind) Gets the key present at index `ind` of the tree. Indexing is done in increasing order of key. """ function Base.getindex(tree::RBTree{K}, ind) where K @boundscheck (1 <= ind <= tree.count) || throw(ArgumentError("$ind should be in between 1 and $(tree.count)")) function traverse_tree_inorder(node::RBTreeNode{K}) where K if (node !== tree.nil) left = traverse_tree_inorder(node.leftChild) right = traverse_tree_inorder(node.rightChild) append!(push!(left, node.data), right) else return K[] end end arr = traverse_tree_inorder(tree.root) ##CHUNK 3 rb_transplant(tree, z, y) y.leftChild = z.leftChild y.leftChild.parent = y y.color = z.color end !y_original_color && delete_fix(tree, x) tree.count -= 1 return tree end Base.in(key, tree::RBTree) = haskey(tree, key) """ getindex(tree, ind) Gets the key present at index `ind` of the tree. Indexing is done in increasing order of key. """ function Base.getindex(tree::RBTree{K}, ind) where K @boundscheck (1 <= ind <= tree.count) || throw(ArgumentError("$ind should be in between 1 and $(tree.count)")) ##CHUNK 4 end RBTree() = RBTree{Any}() Base.length(tree::RBTree) = tree.count """ search_node(tree, key) Returns the last visited node, while traversing through in binary-search-tree fashion looking for `key`. """ search_node(tree, key) function search_node(tree::RBTree{K}, d::K) where K node = tree.root while node !== tree.nil && d != node.data if d < node.data node = node.leftChild else node = node.rightChild #FILE: DataStructures.jl/src/avl_tree.jl ##CHUNK 1 julia> tree[4] 7 julia> tree[8] 15 ``` """ function Base.getindex(tree::AVLTree{K}, ind::Integer) where K @boundscheck (1 <= ind <= tree.count) || throw(BoundsError("$ind should be in between 1 and $(tree.count)")) function traverse_tree(node::AVLTreeNode_or_null, idx) if (node != nothing) L = get_subsize(node.leftChild) if idx <= L return traverse_tree(node.leftChild, idx) elseif idx == L + 1 return node.data else return traverse_tree(node.rightChild, idx - L - 1) end ##CHUNK 2 function traverse_tree(node::AVLTreeNode_or_null, idx) if (node != nothing) L = get_subsize(node.leftChild) if idx <= L return traverse_tree(node.leftChild, idx) elseif idx == L + 1 return node.data else return traverse_tree(node.rightChild, idx - L - 1) end end end value = traverse_tree(tree.root, ind) return value end ##CHUNK 3 """ in(key, tree::AVLTree) `In` infix operator for `key` and `tree` types. Analogous to [`haskey(tree::AVLTree{K}, k::K) where K`](@ref). """ Base.in(key, tree::AVLTree) = haskey(tree, key) function insert_node(node::Nothing, key::K) where K return AVLTreeNode{K}(key) end function insert_node(node::AVLTreeNode{K}, key::K) where K if key < node.data node.leftChild = insert_node(node.leftChild, key) else node.rightChild = insert_node(node.rightChild, key) end node.subsize = compute_subtree_size(node) node.height = compute_height(node) #FILE: DataStructures.jl/src/balanced_tree.jl ##CHUNK 1 @inbounds p = t.data[i].parent prevchild = 0 depthp = t.depth @inbounds while true if depthp < t.depth p = t.tree[ii].parent end if t.tree[p].child3 == ii prevchild = t.tree[p].child2 break end if t.tree[p].child2 == ii prevchild = t.tree[p].child1 break end ii = p depthp -= 1 end @inbounds while true if depthp == t.depth ##CHUNK 2 @invariant i != 2 && i in t.useddatacells @inbounds p = t.data[i].parent nextchild = 0 depthp = t.depth @inbounds while true if depthp < t.depth p = t.tree[ii].parent end if t.tree[p].child1 == ii nextchild = t.tree[p].child2 break end if t.tree[p].child2 == ii && t.tree[p].child3 > 0 nextchild = t.tree[p].child3 break end ii = p depthp -= 1 end @inbounds while true #FILE: DataStructures.jl/src/fenwick.jl ##CHUNK 1 n = length(a) tree = FenwickTree{U}(n) @inbounds for i = 1:n inc!(tree, i, a[i]) end tree end Base.length(ft::FenwickTree) = ft.n Base.eltype(::Type{FenwickTree{T}}) where T = T """ inc!(ft::FenwickTree{T}, ind::Integer, val) Increases the value of the [`FenwickTree`] by `val` from the index `ind` upto the length of the Fenwick Tree. """ function inc!(ft::FenwickTree{T}, ind::Integer, val = 1) where T val0 = convert(T, val) i = ind #CURRENT FILE: DataStructures.jl/src/splay_tree.jl
149
170
DataStructures.jl
58
function ht_keyindex(h::SwissDict, key, i0, tag) slots = h.slots keys = h.keys sz = length(slots) i = i0 & (sz-1) _prefetchr(pointer(h.keys, i*16+1)) _prefetchr(pointer(h.vals, i*16+1)) #Todo/discuss: _prefetchr(pointer(h.keys, i*16+9))? @inbounds while true msk = slots[i+1] cands, done = _find_candidates(msk, tag) while cands != 0 off = trailing_zeros(cands) idx = i*16 + off + 1 isequal(keys[idx], key) && return idx cands = _blsr(cands) end done && break i = (i+1) & (sz-1) end return -1 end
function ht_keyindex(h::SwissDict, key, i0, tag) slots = h.slots keys = h.keys sz = length(slots) i = i0 & (sz-1) _prefetchr(pointer(h.keys, i*16+1)) _prefetchr(pointer(h.vals, i*16+1)) #Todo/discuss: _prefetchr(pointer(h.keys, i*16+9))? @inbounds while true msk = slots[i+1] cands, done = _find_candidates(msk, tag) while cands != 0 off = trailing_zeros(cands) idx = i*16 + off + 1 isequal(keys[idx], key) && return idx cands = _blsr(cands) end done && break i = (i+1) & (sz-1) end return -1 end
[ 149, 170 ]
function ht_keyindex(h::SwissDict, key, i0, tag) slots = h.slots keys = h.keys sz = length(slots) i = i0 & (sz-1) _prefetchr(pointer(h.keys, i*16+1)) _prefetchr(pointer(h.vals, i*16+1)) #Todo/discuss: _prefetchr(pointer(h.keys, i*16+9))? @inbounds while true msk = slots[i+1] cands, done = _find_candidates(msk, tag) while cands != 0 off = trailing_zeros(cands) idx = i*16 + off + 1 isequal(keys[idx], key) && return idx cands = _blsr(cands) end done && break i = (i+1) & (sz-1) end return -1 end
function ht_keyindex(h::SwissDict, key, i0, tag) slots = h.slots keys = h.keys sz = length(slots) i = i0 & (sz-1) _prefetchr(pointer(h.keys, i*16+1)) _prefetchr(pointer(h.vals, i*16+1)) #Todo/discuss: _prefetchr(pointer(h.keys, i*16+9))? @inbounds while true msk = slots[i+1] cands, done = _find_candidates(msk, tag) while cands != 0 off = trailing_zeros(cands) idx = i*16 + off + 1 isequal(keys[idx], key) && return idx cands = _blsr(cands) end done && break i = (i+1) & (sz-1) end return -1 end
ht_keyindex
149
170
src/swiss_dict.jl
#FILE: DataStructures.jl/src/robin_dict.jl ##CHUNK 1 curr = next next = (next & (sz-1)) + 1 end #curr is at the last position, reset back to normal isbitstype(K) || isbitsunion(K) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.keys, curr-1) isbitstype(V) || isbitsunion(V) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.vals, curr-1) @inbounds h.hashes[curr] = 0x0 h.count -= 1 # this is necessary because key at idxfloor might get deleted h.idxfloor = get_next_filled(h, h.idxfloor) return h end function _pop!(h::RobinDict, index) @inbounds val = h.vals[index] rh_delete!(h, index) return val end ##CHUNK 2 v = oldv[i] rh_insert_for_rehash!(h, k, v, oldh[i]) end end return h end function Base.sizehint!(d::RobinDict, newsz::Integer) newsz = _tablesz(newsz*2) # *2 for keys and values in same array oldsz = length(d.keys) # grow at least 25% if newsz < (oldsz*5)>>2 return d end rehash!(d, newsz) end Base.@propagate_inbounds isslotfilled(h::RobinDict, index) = (h.hashes[index] != 0) Base.@propagate_inbounds isslotempty(h::RobinDict, index) = (h.hashes[index] == 0) #FILE: DataStructures.jl/src/int_set.jl ##CHUNK 1 idx = n+1 if 1 <= idx <= length(s.bits) unsafe_getindex(s.bits, idx) != s.inverse else ifelse((idx <= 0) | (idx > typemax(Int)), false, s.inverse) end end function findnextidx(s::IntSet, i::Int, invert=false) if s.inverse ⊻ invert # i+1 could rollover causing a BoundsError in findnext/findnextnot nextidx = i == typemax(Int) ? 0 : something(findnextnot(s.bits, i+1), 0) # Extend indices beyond the length of the bits since it is inverted nextidx = nextidx == 0 ? max(i, length(s.bits))+1 : nextidx else nextidx = i == typemax(Int) ? 0 : something(findnext(s.bits, i+1), 0) end return nextidx end ##CHUNK 2 # i+1 could rollover causing a BoundsError in findnext/findnextnot nextidx = i == typemax(Int) ? 0 : something(findnextnot(s.bits, i+1), 0) # Extend indices beyond the length of the bits since it is inverted nextidx = nextidx == 0 ? max(i, length(s.bits))+1 : nextidx else nextidx = i == typemax(Int) ? 0 : something(findnext(s.bits, i+1), 0) end return nextidx end Base.iterate(s::IntSet) = iterate(s, findnextidx(s, 0)) function Base.iterate(s::IntSet, i::Int, invert=false) i <= 0 && return nothing return (i-1, findnextidx(s, i, invert)) end # Nextnot iterates through elements *not* in the set nextnot(s::IntSet, i) = iterate(s, i, true) #FILE: DataStructures.jl/src/sorted_dict.jl ##CHUNK 1 firsti == 0 && return nothing foundi = firsti @inbounds firstk = deref_key((sds.vec[firsti], state[firsti])) for i = firsti + 1 : N if state[i] != pastendsemitoken(sds.vec[i]) @inbounds k2 = deref_key((sds.vec[i], state[i])) if !lt(ord, firstk, k2) foundi = i firstk = k2 end end end foundsemitoken = state[foundi] for i = firsti : N @inbounds if state[i] != pastendsemitoken(sds.vec[i]) && eq(ord, deref_key((sds.vec[i], state[i])), firstk) state[i] = advance((sds.vec[i], state[i])) end end @inbounds return (deref((sds.vec[foundi], foundsemitoken)), state) #FILE: DataStructures.jl/src/sorted_multi_dict.jl ##CHUNK 1 function in_(k_, d_, m::SortedMultiDict) k = convert(keytype(m), k_) d = convert(valtype(m), d_) i1 = findkeyless(m.bt, k) i2,exactfound = findkey(m.bt,k) !exactfound && return false ord = m.bt.ord while true i1 = nextloc0(m.bt, i1) @invariant eq(ord, m.bt.data[i1].k, k) m.bt.data[i1].d == d && return true i1 == i2 && return false end end """ Base.in(p::Pair, smd::SortedMultiDict) #CURRENT FILE: DataStructures.jl/src/swiss_dict.jl ##CHUNK 1 # and the key would be inserted at pos # This version is for use by setindex! and get!. It never rehashes. ht_keyindex2!(h::SwissDict, key) = ht_keyindex2!(h, key, _hashtag(hash(key))...) @inline function ht_keyindex2!(h::SwissDict, key, i0, tag) slots = h.slots keys = h.keys sz = length(slots) i = i0 & (sz-1) _prefetchw(pointer(h.keys, i*16+1)) _prefetchw(pointer(h.vals, i*16+1)) #Todo/discuss: _prefetchr(pointer(h.keys, i*16+9))? @inbounds while true msk = slots[i+1] cands, done = _find_candidates(msk, tag) while cands != 0 off = trailing_zeros(cands) idx = i*16 + off + 1 isequal(keys[idx], key) && return idx, tag cands = _blsr(cands) end ##CHUNK 2 return -idx, tag end i = (i+1) & (sz-1) end end function _setindex!(h::SwissDict, v, key, index, tag) @inbounds h.keys[index] = key @inbounds h.vals[index] = v h.count += 1 h.age += 1 so = _slotget(h.slots, index) h.nbfull += (iszero(index & 0x0f) & (so==0x00)) _slotset!(h.slots, tag, index) if index < h.idxfloor h.idxfloor = index end maybe_rehash_grow!(h) end ##CHUNK 3 end @inline _find_free(v::_u8x16) = _vcmp_le(v, _expand16(UInt8(1))) # Basic operations # get the index where a key is stored, or -1 if not present ht_keyindex(h::SwissDict, key) = ht_keyindex(h::SwissDict, key, _hashtag(hash(key))...) # get the index where a key is stored, or -pos if not present # and the key would be inserted at pos # This version is for use by setindex! and get!. It never rehashes. ht_keyindex2!(h::SwissDict, key) = ht_keyindex2!(h, key, _hashtag(hash(key))...) @inline function ht_keyindex2!(h::SwissDict, key, i0, tag) slots = h.slots keys = h.keys sz = length(slots) i = i0 & (sz-1) _prefetchw(pointer(h.keys, i*16+1)) _prefetchw(pointer(h.vals, i*16+1)) ##CHUNK 4 return (hi, tag) end Base.@propagate_inbounds function _slotget(slots::Vector{_u8x16}, i::Int) @boundscheck 0 < i <= length(slots)*16 || throw(BoundsError(slots, 1 + (i-1)>>4)) GC.@preserve slots begin return unsafe_load(convert(Ptr{UInt8}, pointer(slots)), i) end end Base.@propagate_inbounds function _slotset!(slots::Vector{_u8x16}, v::UInt8, i::Int) @boundscheck 0 < i <= length(slots)*16 || throw(BoundsError(slots, 1 + (i-1)>>4)) GC.@preserve slots begin return unsafe_store!(convert(Ptr{UInt8}, pointer(slots)), v, i) end end @inline function _find_candidates(v::_u8x16, tag::UInt8) match = _vcmp_eq(v, _expand16(tag)) return (match, v[16].value === 0x00)
244
254
DataStructures.jl
59
function _iterslots(h::SwissDict, state) i, sl = state while iszero(sl) i += 1 i <= length(h.slots) || return nothing @inbounds msk = h.slots[i] sl = _find_free(msk) sl = (~sl & 0xffff) end return ((i-1)*16 + trailing_zeros(sl) + 1, (i, _blsr(sl))) end
function _iterslots(h::SwissDict, state) i, sl = state while iszero(sl) i += 1 i <= length(h.slots) || return nothing @inbounds msk = h.slots[i] sl = _find_free(msk) sl = (~sl & 0xffff) end return ((i-1)*16 + trailing_zeros(sl) + 1, (i, _blsr(sl))) end
[ 244, 254 ]
function _iterslots(h::SwissDict, state) i, sl = state while iszero(sl) i += 1 i <= length(h.slots) || return nothing @inbounds msk = h.slots[i] sl = _find_free(msk) sl = (~sl & 0xffff) end return ((i-1)*16 + trailing_zeros(sl) + 1, (i, _blsr(sl))) end
function _iterslots(h::SwissDict, state) i, sl = state while iszero(sl) i += 1 i <= length(h.slots) || return nothing @inbounds msk = h.slots[i] sl = _find_free(msk) sl = (~sl & 0xffff) end return ((i-1)*16 + trailing_zeros(sl) + 1, (i, _blsr(sl))) end
_iterslots
244
254
src/swiss_dict.jl
#CURRENT FILE: DataStructures.jl/src/swiss_dict.jl ##CHUNK 1 end function _delete!(h::SwissDict{K,V}, index) where {K,V} # Caller is responsible for maybe shrinking the SwissDict after the deletion. isbitstype(K) || isbitsunion(K) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.keys, index-1) isbitstype(V) || isbitsunion(V) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.vals, index-1) isboundary = iszero(index & 0x0f) #boundaries: 16, 32, ... @inbounds _slotset!(h.slots, ifelse(isboundary, 0x01, 0x00), index) h.count -= 1 h.age += 1 maybe_rehash_shrink!(h) end # fast iteration over active slots. function _iterslots(h::SwissDict, start::Int) i0 = ((start-1) & (length(h.keys)-1))>>4 + 1 off = (start-1) & 0x0f @inbounds sl = _find_free(h.slots[i0>>4 + 1]) sl = ((~sl & 0xffff)>>off) << off ##CHUNK 2 cands = _blsr(cands) end done && break i = (i+1) & (sz-1) end i = i0 & (sz-1) @inbounds while true msk = slots[i+1] cands = _find_free(msk) if cands != 0 off = trailing_zeros(cands) idx = i*16 + off + 1 return -idx, tag end i = (i+1) & (sz-1) end end function _setindex!(h::SwissDict, v, key, index, tag) @inbounds h.keys[index] = key ##CHUNK 3 @inbounds while is !== nothing i, s = is k = oldk[i] v = oldv[i] i0, t = _hashtag(hash(k)) i = i0 & (nssz-1) idx = 0 while true msk = slots[i + 1] cands = _find_free(msk) if cands != 0 off = trailing_zeros(cands) idx = i*16 + off + 1 break end i = (i+1) & (nssz-1) end _slotset!(slots, t, idx) keys[idx] = k vals[idx] = v ##CHUNK 4 keys = h.keys sz = length(slots) i = i0 & (sz-1) _prefetchr(pointer(h.keys, i*16+1)) _prefetchr(pointer(h.vals, i*16+1)) #Todo/discuss: _prefetchr(pointer(h.keys, i*16+9))? @inbounds while true msk = slots[i+1] cands, done = _find_candidates(msk, tag) while cands != 0 off = trailing_zeros(cands) idx = i*16 + off + 1 isequal(keys[idx], key) && return idx cands = _blsr(cands) end done && break i = (i+1) & (sz-1) end return -1 end ##CHUNK 5 _prefetchw(pointer(h.keys, i*16+1)) _prefetchw(pointer(h.vals, i*16+1)) #Todo/discuss: _prefetchr(pointer(h.keys, i*16+9))? @inbounds while true msk = slots[i+1] cands, done = _find_candidates(msk, tag) while cands != 0 off = trailing_zeros(cands) idx = i*16 + off + 1 isequal(keys[idx], key) && return idx, tag cands = _blsr(cands) end done && break i = (i+1) & (sz-1) end i = i0 & (sz-1) @inbounds while true msk = slots[i+1] cands = _find_free(msk) if cands != 0 ##CHUNK 6 if cands != 0 off = trailing_zeros(cands) idx = i*16 + off + 1 break end i = (i+1) & (nssz-1) end _slotset!(slots, t, idx) keys[idx] = k vals[idx] = v nbfull += iszero(idx & 0x0f) count += 1 if h.age != age0 return rehash!(h, newsz) end is = _iterslots(h, s) end h.slots = slots h.keys = keys h.vals = vals ##CHUNK 7 return h end nssz = newsz>>4 slots = fill(_expand16(0x00), nssz) keys = Vector{K}(undef, newsz) vals = Vector{V}(undef, newsz) age0 = h.age nbfull = 0 is = _iterslots(h, 1) count = 0 @inbounds while is !== nothing i, s = is k = oldk[i] v = oldv[i] i0, t = _hashtag(hash(k)) i = i0 & (nssz-1) idx = 0 while true msk = slots[i + 1] cands = _find_free(msk) ##CHUNK 8 end @inline _find_free(v::_u8x16) = _vcmp_le(v, _expand16(UInt8(1))) # Basic operations # get the index where a key is stored, or -1 if not present ht_keyindex(h::SwissDict, key) = ht_keyindex(h::SwissDict, key, _hashtag(hash(key))...) function ht_keyindex(h::SwissDict, key, i0, tag) slots = h.slots keys = h.keys sz = length(slots) i = i0 & (sz-1) _prefetchr(pointer(h.keys, i*16+1)) _prefetchr(pointer(h.vals, i*16+1)) #Todo/discuss: _prefetchr(pointer(h.keys, i*16+9))? @inbounds while true msk = slots[i+1] cands, done = _find_candidates(msk, tag) while cands != 0 ##CHUNK 9 off = trailing_zeros(cands) idx = i*16 + off + 1 return -idx, tag end i = (i+1) & (sz-1) end end function _setindex!(h::SwissDict, v, key, index, tag) @inbounds h.keys[index] = key @inbounds h.vals[index] = v h.count += 1 h.age += 1 so = _slotget(h.slots, index) h.nbfull += (iszero(index & 0x0f) & (so==0x00)) _slotset!(h.slots, tag, index) if index < h.idxfloor h.idxfloor = index end maybe_rehash_grow!(h) ##CHUNK 10 Base.@propagate_inbounds function _slotset!(slots::Vector{_u8x16}, v::UInt8, i::Int) @boundscheck 0 < i <= length(slots)*16 || throw(BoundsError(slots, 1 + (i-1)>>4)) GC.@preserve slots begin return unsafe_store!(convert(Ptr{UInt8}, pointer(slots)), v, i) end end @inline function _find_candidates(v::_u8x16, tag::UInt8) match = _vcmp_eq(v, _expand16(tag)) return (match, v[16].value === 0x00) end @inline _find_free(v::_u8x16) = _vcmp_le(v, _expand16(UInt8(1))) # Basic operations # get the index where a key is stored, or -1 if not present ht_keyindex(h::SwissDict, key) = ht_keyindex(h::SwissDict, key, _hashtag(hash(key))...) function ht_keyindex(h::SwissDict, key, i0, tag) slots = h.slots
287
346
DataStructures.jl
60
function rehash!(h::SwissDict{K,V}, newsz = length(h.keys)) where {K, V} olds = h.slots oldk = h.keys oldv = h.vals sz = length(oldk) newsz = _tablesz(newsz) (newsz*SWISS_DICT_LOAD_FACTOR) > h.count || (newsz <<= 1) h.age += 1 h.idxfloor = 1 if h.count == 0 resize!(h.slots, newsz>>4) fill!(h.slots, _expand16(0x00)) resize!(h.keys, newsz) resize!(h.vals, newsz) h.nbfull = 0 return h end nssz = newsz>>4 slots = fill(_expand16(0x00), nssz) keys = Vector{K}(undef, newsz) vals = Vector{V}(undef, newsz) age0 = h.age nbfull = 0 is = _iterslots(h, 1) count = 0 @inbounds while is !== nothing i, s = is k = oldk[i] v = oldv[i] i0, t = _hashtag(hash(k)) i = i0 & (nssz-1) idx = 0 while true msk = slots[i + 1] cands = _find_free(msk) if cands != 0 off = trailing_zeros(cands) idx = i*16 + off + 1 break end i = (i+1) & (nssz-1) end _slotset!(slots, t, idx) keys[idx] = k vals[idx] = v nbfull += iszero(idx & 0x0f) count += 1 if h.age != age0 return rehash!(h, newsz) end is = _iterslots(h, s) end h.slots = slots h.keys = keys h.vals = vals h.nbfull = nbfull @assert h.age == age0 @assert h.count == count return h end
function rehash!(h::SwissDict{K,V}, newsz = length(h.keys)) where {K, V} olds = h.slots oldk = h.keys oldv = h.vals sz = length(oldk) newsz = _tablesz(newsz) (newsz*SWISS_DICT_LOAD_FACTOR) > h.count || (newsz <<= 1) h.age += 1 h.idxfloor = 1 if h.count == 0 resize!(h.slots, newsz>>4) fill!(h.slots, _expand16(0x00)) resize!(h.keys, newsz) resize!(h.vals, newsz) h.nbfull = 0 return h end nssz = newsz>>4 slots = fill(_expand16(0x00), nssz) keys = Vector{K}(undef, newsz) vals = Vector{V}(undef, newsz) age0 = h.age nbfull = 0 is = _iterslots(h, 1) count = 0 @inbounds while is !== nothing i, s = is k = oldk[i] v = oldv[i] i0, t = _hashtag(hash(k)) i = i0 & (nssz-1) idx = 0 while true msk = slots[i + 1] cands = _find_free(msk) if cands != 0 off = trailing_zeros(cands) idx = i*16 + off + 1 break end i = (i+1) & (nssz-1) end _slotset!(slots, t, idx) keys[idx] = k vals[idx] = v nbfull += iszero(idx & 0x0f) count += 1 if h.age != age0 return rehash!(h, newsz) end is = _iterslots(h, s) end h.slots = slots h.keys = keys h.vals = vals h.nbfull = nbfull @assert h.age == age0 @assert h.count == count return h end
[ 287, 346 ]
function rehash!(h::SwissDict{K,V}, newsz = length(h.keys)) where {K, V} olds = h.slots oldk = h.keys oldv = h.vals sz = length(oldk) newsz = _tablesz(newsz) (newsz*SWISS_DICT_LOAD_FACTOR) > h.count || (newsz <<= 1) h.age += 1 h.idxfloor = 1 if h.count == 0 resize!(h.slots, newsz>>4) fill!(h.slots, _expand16(0x00)) resize!(h.keys, newsz) resize!(h.vals, newsz) h.nbfull = 0 return h end nssz = newsz>>4 slots = fill(_expand16(0x00), nssz) keys = Vector{K}(undef, newsz) vals = Vector{V}(undef, newsz) age0 = h.age nbfull = 0 is = _iterslots(h, 1) count = 0 @inbounds while is !== nothing i, s = is k = oldk[i] v = oldv[i] i0, t = _hashtag(hash(k)) i = i0 & (nssz-1) idx = 0 while true msk = slots[i + 1] cands = _find_free(msk) if cands != 0 off = trailing_zeros(cands) idx = i*16 + off + 1 break end i = (i+1) & (nssz-1) end _slotset!(slots, t, idx) keys[idx] = k vals[idx] = v nbfull += iszero(idx & 0x0f) count += 1 if h.age != age0 return rehash!(h, newsz) end is = _iterslots(h, s) end h.slots = slots h.keys = keys h.vals = vals h.nbfull = nbfull @assert h.age == age0 @assert h.count == count return h end
function rehash!(h::SwissDict{K,V}, newsz = length(h.keys)) where {K, V} olds = h.slots oldk = h.keys oldv = h.vals sz = length(oldk) newsz = _tablesz(newsz) (newsz*SWISS_DICT_LOAD_FACTOR) > h.count || (newsz <<= 1) h.age += 1 h.idxfloor = 1 if h.count == 0 resize!(h.slots, newsz>>4) fill!(h.slots, _expand16(0x00)) resize!(h.keys, newsz) resize!(h.vals, newsz) h.nbfull = 0 return h end nssz = newsz>>4 slots = fill(_expand16(0x00), nssz) keys = Vector{K}(undef, newsz) vals = Vector{V}(undef, newsz) age0 = h.age nbfull = 0 is = _iterslots(h, 1) count = 0 @inbounds while is !== nothing i, s = is k = oldk[i] v = oldv[i] i0, t = _hashtag(hash(k)) i = i0 & (nssz-1) idx = 0 while true msk = slots[i + 1] cands = _find_free(msk) if cands != 0 off = trailing_zeros(cands) idx = i*16 + off + 1 break end i = (i+1) & (nssz-1) end _slotset!(slots, t, idx) keys[idx] = k vals[idx] = v nbfull += iszero(idx & 0x0f) count += 1 if h.age != age0 return rehash!(h, newsz) end is = _iterslots(h, s) end h.slots = slots h.keys = keys h.vals = vals h.nbfull = nbfull @assert h.age == age0 @assert h.count == count return h end
rehash!
287
346
src/swiss_dict.jl
#FILE: DataStructures.jl/src/robin_dict.jl ##CHUNK 1 end return index_curr end #rehash! algorithm function rehash!(h::RobinDict{K,V}, newsz = length(h.keys)) where {K, V} oldk = h.keys oldv = h.vals oldh = h.hashes sz = length(oldk) newsz = _tablesz(newsz) if h.count == 0 resize!(h.keys, newsz) resize!(h.vals, newsz) resize!(h.hashes, newsz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end ##CHUNK 2 h.keys = Vector{K}(undef, newsz) h.vals = Vector{V}(undef, newsz) h.hashes = zeros(UInt32,newsz) h.count = 0 h.idxfloor = 0 for i = 1:sz @inbounds if oldh[i] != 0 k = oldk[i] v = oldv[i] rh_insert_for_rehash!(h, k, v, oldh[i]) end end return h end function Base.sizehint!(d::RobinDict, newsz::Integer) newsz = _tablesz(newsz*2) # *2 for keys and values in same array oldsz = length(d.keys) ##CHUNK 3 v = oldv[i] rh_insert_for_rehash!(h, k, v, oldh[i]) end end return h end function Base.sizehint!(d::RobinDict, newsz::Integer) newsz = _tablesz(newsz*2) # *2 for keys and values in same array oldsz = length(d.keys) # grow at least 25% if newsz < (oldsz*5)>>2 return d end rehash!(d, newsz) end Base.@propagate_inbounds isslotfilled(h::RobinDict, index) = (h.hashes[index] != 0) Base.@propagate_inbounds isslotempty(h::RobinDict, index) = (h.hashes[index] == 0) ##CHUNK 4 newsz = _tablesz(newsz) if h.count == 0 resize!(h.keys, newsz) resize!(h.vals, newsz) resize!(h.hashes, newsz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end h.keys = Vector{K}(undef, newsz) h.vals = Vector{V}(undef, newsz) h.hashes = zeros(UInt32,newsz) h.count = 0 h.idxfloor = 0 for i = 1:sz @inbounds if oldh[i] != 0 k = oldk[i] #CURRENT FILE: DataStructures.jl/src/swiss_dict.jl ##CHUNK 1 fill!(h.slots, _expand16(0x00)) sz = length(h.keys) empty!(h.keys) empty!(h.vals) resize!(h.keys, sz) resize!(h.vals, sz) h.nbfull = 0 h.count = 0 h.age += 1 h.idxfloor = 1 return h end function Base.setindex!(h::SwissDict{K,V}, v0, key0) where {K, V} key = convert(K, key0) _setindex!(h, v0, key) end function _setindex!(h::SwissDict{K,V}, v0, key::K) where {K, V} v = convert(V, v0) ##CHUNK 2 off = trailing_zeros(cands) idx = i*16 + off + 1 return -idx, tag end i = (i+1) & (sz-1) end end function _setindex!(h::SwissDict, v, key, index, tag) @inbounds h.keys[index] = key @inbounds h.vals[index] = v h.count += 1 h.age += 1 so = _slotget(h.slots, index) h.nbfull += (iszero(index & 0x0f) & (so==0x00)) _slotset!(h.slots, tag, index) if index < h.idxfloor h.idxfloor = index end maybe_rehash_grow!(h) ##CHUNK 3 end function _delete!(h::SwissDict{K,V}, index) where {K,V} # Caller is responsible for maybe shrinking the SwissDict after the deletion. isbitstype(K) || isbitsunion(K) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.keys, index-1) isbitstype(V) || isbitsunion(V) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.vals, index-1) isboundary = iszero(index & 0x0f) #boundaries: 16, 32, ... @inbounds _slotset!(h.slots, ifelse(isboundary, 0x01, 0x00), index) h.count -= 1 h.age += 1 maybe_rehash_shrink!(h) end # fast iteration over active slots. function _iterslots(h::SwissDict, start::Int) i0 = ((start-1) & (length(h.keys)-1))>>4 + 1 off = (start-1) & 0x0f @inbounds sl = _find_free(h.slots[i0>>4 + 1]) sl = ((~sl & 0xffff)>>off) << off ##CHUNK 4 sz = length(h.keys) if h.count*4 < sz && sz > 16 rehash!(h, sz>>1) end end function Base.sizehint!(d::SwissDict, newsz::Integer) newsz = _tablesz(newsz*2) # *2 for keys and values in same array oldsz = length(d.keys) # grow at least 25% if newsz < (oldsz*5)>>2 return d end rehash!(d, newsz) end Base.isempty(t::SwissDict) = (t.count == 0) Base.length(t::SwissDict) = t.count ##CHUNK 5 keys::Vector{K} vals::Vector{V} nbfull::Int count::Int age::UInt idxfloor::Int # an index <= the indices of all used slots function SwissDict{K,V}() where {K, V} new(fill(_expand16(0x00),1), Vector{K}(undef, 16), Vector{V}(undef, 16), 0, 0, 0, 1) end function SwissDict{K,V}(d::SwissDict{K,V}) where {K, V} new(copy(d.slots), copy(d.keys), copy(d.vals), d.nbfull, d.count, d.age, d.idxfloor) end function SwissDict{K, V}(slots, keys, vals, nbfull, count, age, idxfloor) where {K, V} new(slots, keys, vals, nbfull, count, age, idxfloor) end end function SwissDict{K,V}(kv) where {K, V} h = SwissDict{K,V}() ##CHUNK 6 end @inline _find_free(v::_u8x16) = _vcmp_le(v, _expand16(UInt8(1))) # Basic operations # get the index where a key is stored, or -1 if not present ht_keyindex(h::SwissDict, key) = ht_keyindex(h::SwissDict, key, _hashtag(hash(key))...) function ht_keyindex(h::SwissDict, key, i0, tag) slots = h.slots keys = h.keys sz = length(slots) i = i0 & (sz-1) _prefetchr(pointer(h.keys, i*16+1)) _prefetchr(pointer(h.vals, i*16+1)) #Todo/discuss: _prefetchr(pointer(h.keys, i*16+9))? @inbounds while true msk = slots[i+1] cands, done = _find_candidates(msk, tag) while cands != 0
370
382
DataStructures.jl
61
function Base.empty!(h::SwissDict{K,V}) where {K, V} fill!(h.slots, _expand16(0x00)) sz = length(h.keys) empty!(h.keys) empty!(h.vals) resize!(h.keys, sz) resize!(h.vals, sz) h.nbfull = 0 h.count = 0 h.age += 1 h.idxfloor = 1 return h end
function Base.empty!(h::SwissDict{K,V}) where {K, V} fill!(h.slots, _expand16(0x00)) sz = length(h.keys) empty!(h.keys) empty!(h.vals) resize!(h.keys, sz) resize!(h.vals, sz) h.nbfull = 0 h.count = 0 h.age += 1 h.idxfloor = 1 return h end
[ 370, 382 ]
function Base.empty!(h::SwissDict{K,V}) where {K, V} fill!(h.slots, _expand16(0x00)) sz = length(h.keys) empty!(h.keys) empty!(h.vals) resize!(h.keys, sz) resize!(h.vals, sz) h.nbfull = 0 h.count = 0 h.age += 1 h.idxfloor = 1 return h end
function Base.empty!(h::SwissDict{K,V}) where {K, V} fill!(h.slots, _expand16(0x00)) sz = length(h.keys) empty!(h.keys) empty!(h.vals) resize!(h.keys, sz) resize!(h.vals, sz) h.nbfull = 0 h.count = 0 h.age += 1 h.idxfloor = 1 return h end
Base.empty!
370
382
src/swiss_dict.jl
#FILE: DataStructures.jl/src/robin_dict.jl ##CHUNK 1 RobinDict{String, Int64}() ``` """ function Base.empty!(h::RobinDict{K,V}) where {K, V} sz = length(h.keys) empty!(h.hashes) empty!(h.keys) empty!(h.vals) resize!(h.keys, sz) resize!(h.vals, sz) resize!(h.hashes, sz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end function rh_search(h::RobinDict{K, V}, key) where {K, V} sz = length(h.keys) chash = hash_key(key) ##CHUNK 2 end return index_curr end #rehash! algorithm function rehash!(h::RobinDict{K,V}, newsz = length(h.keys)) where {K, V} oldk = h.keys oldv = h.vals oldh = h.hashes sz = length(oldk) newsz = _tablesz(newsz) if h.count == 0 resize!(h.keys, newsz) resize!(h.vals, newsz) resize!(h.hashes, newsz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end ##CHUNK 3 newsz = _tablesz(newsz) if h.count == 0 resize!(h.keys, newsz) resize!(h.vals, newsz) resize!(h.hashes, newsz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end h.keys = Vector{K}(undef, newsz) h.vals = Vector{V}(undef, newsz) h.hashes = zeros(UInt32,newsz) h.count = 0 h.idxfloor = 0 for i = 1:sz @inbounds if oldh[i] != 0 k = oldk[i] #CURRENT FILE: DataStructures.jl/src/swiss_dict.jl ##CHUNK 1 vals[idx] = v nbfull += iszero(idx & 0x0f) count += 1 if h.age != age0 return rehash!(h, newsz) end is = _iterslots(h, s) end h.slots = slots h.keys = keys h.vals = vals h.nbfull = nbfull @assert h.age == age0 @assert h.count == count return h end Base.isempty(t::SwissDict) = (t.count == 0) Base.length(t::SwissDict) = t.count ##CHUNK 2 if newsz < (oldsz*5)>>2 return d end rehash!(d, newsz) end function rehash!(h::SwissDict{K,V}, newsz = length(h.keys)) where {K, V} olds = h.slots oldk = h.keys oldv = h.vals sz = length(oldk) newsz = _tablesz(newsz) (newsz*SWISS_DICT_LOAD_FACTOR) > h.count || (newsz <<= 1) h.age += 1 h.idxfloor = 1 if h.count == 0 resize!(h.slots, newsz>>4) fill!(h.slots, _expand16(0x00)) resize!(h.keys, newsz) resize!(h.vals, newsz) ##CHUNK 3 sz = length(oldk) newsz = _tablesz(newsz) (newsz*SWISS_DICT_LOAD_FACTOR) > h.count || (newsz <<= 1) h.age += 1 h.idxfloor = 1 if h.count == 0 resize!(h.slots, newsz>>4) fill!(h.slots, _expand16(0x00)) resize!(h.keys, newsz) resize!(h.vals, newsz) h.nbfull = 0 return h end nssz = newsz>>4 slots = fill(_expand16(0x00), nssz) keys = Vector{K}(undef, newsz) vals = Vector{V}(undef, newsz) age0 = h.age nbfull = 0 is = _iterslots(h, 1) ##CHUNK 4 sz = length(h.keys) if h.count*4 < sz && sz > 16 rehash!(h, sz>>1) end end function Base.sizehint!(d::SwissDict, newsz::Integer) newsz = _tablesz(newsz*2) # *2 for keys and values in same array oldsz = length(d.keys) # grow at least 25% if newsz < (oldsz*5)>>2 return d end rehash!(d, newsz) end function rehash!(h::SwissDict{K,V}, newsz = length(h.keys)) where {K, V} olds = h.slots oldk = h.keys oldv = h.vals ##CHUNK 5 cands = _find_free(msk) if cands != 0 off = trailing_zeros(cands) idx = i*16 + off + 1 break end i = (i+1) & (nssz-1) end _slotset!(slots, t, idx) keys[idx] = k vals[idx] = v nbfull += iszero(idx & 0x0f) count += 1 if h.age != age0 return rehash!(h, newsz) end is = _iterslots(h, s) end h.slots = slots h.keys = keys ##CHUNK 6 keys::Vector{K} vals::Vector{V} nbfull::Int count::Int age::UInt idxfloor::Int # an index <= the indices of all used slots function SwissDict{K,V}() where {K, V} new(fill(_expand16(0x00),1), Vector{K}(undef, 16), Vector{V}(undef, 16), 0, 0, 0, 1) end function SwissDict{K,V}(d::SwissDict{K,V}) where {K, V} new(copy(d.slots), copy(d.keys), copy(d.vals), d.nbfull, d.count, d.age, d.idxfloor) end function SwissDict{K, V}(slots, keys, vals, nbfull, count, age, idxfloor) where {K, V} new(slots, keys, vals, nbfull, count, age, idxfloor) end end function SwissDict{K,V}(kv) where {K, V} h = SwissDict{K,V}() ##CHUNK 7 function SwissDict{K,V}(d::SwissDict{K,V}) where {K, V} new(copy(d.slots), copy(d.keys), copy(d.vals), d.nbfull, d.count, d.age, d.idxfloor) end function SwissDict{K, V}(slots, keys, vals, nbfull, count, age, idxfloor) where {K, V} new(slots, keys, vals, nbfull, count, age, idxfloor) end end function SwissDict{K,V}(kv) where {K, V} h = SwissDict{K,V}() for (k,v) in kv h[k] = v end return h end SwissDict{K,V}(p::Pair) where {K,V} = setindex!(SwissDict{K,V}(), p.second, p.first) function SwissDict{K,V}(ps::Pair...) where {K, V} h = SwissDict{K,V}() sizehint!(h, length(ps)) for p in ps
389
402
DataStructures.jl
62
function _setindex!(h::SwissDict{K,V}, v0, key::K) where {K, V} v = convert(V, v0) index, tag = ht_keyindex2!(h, key) if index > 0 h.age += 1 @inbounds h.keys[index] = key @inbounds h.vals[index] = v else _setindex!(h, v, key, -index, tag) end return h end
function _setindex!(h::SwissDict{K,V}, v0, key::K) where {K, V} v = convert(V, v0) index, tag = ht_keyindex2!(h, key) if index > 0 h.age += 1 @inbounds h.keys[index] = key @inbounds h.vals[index] = v else _setindex!(h, v, key, -index, tag) end return h end
[ 389, 402 ]
function _setindex!(h::SwissDict{K,V}, v0, key::K) where {K, V} v = convert(V, v0) index, tag = ht_keyindex2!(h, key) if index > 0 h.age += 1 @inbounds h.keys[index] = key @inbounds h.vals[index] = v else _setindex!(h, v, key, -index, tag) end return h end
function _setindex!(h::SwissDict{K,V}, v0, key::K) where {K, V} v = convert(V, v0) index, tag = ht_keyindex2!(h, key) if index > 0 h.age += 1 @inbounds h.keys[index] = key @inbounds h.vals[index] = v else _setindex!(h, v, key, -index, tag) end return h end
_setindex!
389
402
src/swiss_dict.jl
#FILE: DataStructures.jl/src/ordered_robin_dict.jl ##CHUNK 1 @inbounds h.dict[key] = Int32(nk) h.count += 1 end function Base.setindex!(h::OrderedRobinDict{K, V}, v0, key0) where {K,V} key = convert(K, key0) v = convert(V, v0) index = get(h.dict, key, -2) if index < 0 _setindex!(h, v0, key0) else @assert haskey(h, key0) @inbounds orig_v = h.vals[index] !isequal(orig_v, v0) && (@inbounds h.vals[index] = v0) end check_for_rehash(h) && rehash!(h) return h ##CHUNK 2 empty!(h.vals) h.count = 0 return h end function _setindex!(h::OrderedRobinDict, v, key) hk, hv = h.keys, h.vals push!(hk, key) push!(hv, v) nk = length(hk) @inbounds h.dict[key] = Int32(nk) h.count += 1 end function Base.setindex!(h::OrderedRobinDict{K, V}, v0, key0) where {K,V} key = convert(K, key0) v = convert(V, v0) index = get(h.dict, key, -2) if index < 0 #FILE: DataStructures.jl/src/robin_dict.jl ##CHUNK 1 function Base.setindex!(h::RobinDict{K,V}, v0, key0) where {K, V} key = convert(K, key0) isequal(key, key0) || throw(ArgumentError("$key0 is not a valid key for type $K")) _setindex!(h, key, v0) end function _setindex!(h::RobinDict{K,V}, key::K, v0) where {K, V} v = convert(V, v0) index = rh_insert!(h, key, v) @assert index > 0 return h end """ empty!(collection) -> collection Remove all elements from a `collection`. # Examples ##CHUNK 2 ``` """ function Base.getkey(h::RobinDict{K,V}, key, default) where {K, V} index = rh_search(h, key) @inbounds return (index < 0) ? default : h.keys[index]::K end # backward shift deletion by not keeping any tombstones function rh_delete!(h::RobinDict{K, V}, index) where {K, V} @assert index > 0 # this assumes that there is a key/value present in the dictionary at index index0 = index sz = length(h.keys) @inbounds while true index0 = (index0 & (sz - 1)) + 1 if isslotempty(h, index0) || calculate_distance(h, index0) == 0 break end end #CURRENT FILE: DataStructures.jl/src/swiss_dict.jl ##CHUNK 1 key = convert(K, key0) return _get!(default, h, key) end function _get!(default::Callable, h::SwissDict{K,V}, key::K) where {K, V} index, tag = ht_keyindex2!(h, key) index > 0 && return @inbounds h.vals[index] age0 = h.age v = convert(V, default()) if h.age != age0 index, tag = ht_keyindex2!(h, key) end if index > 0 h.age += 1 @inbounds h.keys[index] = key @inbounds h.vals[index] = v else _setindex!(h, v, key, -index, tag) ##CHUNK 2 v = convert(V, default()) if h.age != age0 index, tag = ht_keyindex2!(h, key) end if index > 0 h.age += 1 @inbounds h.keys[index] = key @inbounds h.vals[index] = v else _setindex!(h, v, key, -index, tag) end return v end function Base.getindex(h::SwissDict{K,V}, key) where {K, V} index = ht_keyindex(h, key) @inbounds return (index < 0) ? throw(KeyError(key)) : h.vals[index]::V end """ ##CHUNK 3 This is intended to be called using `do` block syntax: ```julia get!(dict, key) do # default value calculated here time() end ``` """ function Base.get!(default::Callable, h::SwissDict{K,V}, key0) where {K, V} key = convert(K, key0) return _get!(default, h, key) end function _get!(default::Callable, h::SwissDict{K,V}, key::K) where {K, V} index, tag = ht_keyindex2!(h, key) index > 0 && return @inbounds h.vals[index] age0 = h.age ##CHUNK 4 off = trailing_zeros(cands) idx = i*16 + off + 1 return -idx, tag end i = (i+1) & (sz-1) end end function _setindex!(h::SwissDict, v, key, index, tag) @inbounds h.keys[index] = key @inbounds h.vals[index] = v h.count += 1 h.age += 1 so = _slotget(h.slots, index) h.nbfull += (iszero(index & 0x0f) & (so==0x00)) _slotset!(h.slots, tag, index) if index < h.idxfloor h.idxfloor = index end maybe_rehash_grow!(h) ##CHUNK 5 fill!(h.slots, _expand16(0x00)) sz = length(h.keys) empty!(h.keys) empty!(h.vals) resize!(h.keys, sz) resize!(h.vals, sz) h.nbfull = 0 h.count = 0 h.age += 1 h.idxfloor = 1 return h end function Base.setindex!(h::SwissDict{K,V}, v0, key0) where {K, V} key = convert(K, key0) _setindex!(h, v0, key) end """ ##CHUNK 6 # get the index where a key is stored, or -pos if not present # and the key would be inserted at pos # This version is for use by setindex! and get!. It never rehashes. ht_keyindex2!(h::SwissDict, key) = ht_keyindex2!(h, key, _hashtag(hash(key))...) @inline function ht_keyindex2!(h::SwissDict, key, i0, tag) slots = h.slots keys = h.keys sz = length(slots) i = i0 & (sz-1) _prefetchw(pointer(h.keys, i*16+1)) _prefetchw(pointer(h.vals, i*16+1)) #Todo/discuss: _prefetchr(pointer(h.keys, i*16+9))? @inbounds while true msk = slots[i+1] cands, done = _find_candidates(msk, tag) while cands != 0 off = trailing_zeros(cands) idx = i*16 + off + 1 isequal(keys[idx], key) && return idx, tag
449
467
DataStructures.jl
63
function _get!(default::Callable, h::SwissDict{K,V}, key::K) where {K, V} index, tag = ht_keyindex2!(h, key) index > 0 && return @inbounds h.vals[index] age0 = h.age v = convert(V, default()) if h.age != age0 index, tag = ht_keyindex2!(h, key) end if index > 0 h.age += 1 @inbounds h.keys[index] = key @inbounds h.vals[index] = v else _setindex!(h, v, key, -index, tag) end return v end
function _get!(default::Callable, h::SwissDict{K,V}, key::K) where {K, V} index, tag = ht_keyindex2!(h, key) index > 0 && return @inbounds h.vals[index] age0 = h.age v = convert(V, default()) if h.age != age0 index, tag = ht_keyindex2!(h, key) end if index > 0 h.age += 1 @inbounds h.keys[index] = key @inbounds h.vals[index] = v else _setindex!(h, v, key, -index, tag) end return v end
[ 449, 467 ]
function _get!(default::Callable, h::SwissDict{K,V}, key::K) where {K, V} index, tag = ht_keyindex2!(h, key) index > 0 && return @inbounds h.vals[index] age0 = h.age v = convert(V, default()) if h.age != age0 index, tag = ht_keyindex2!(h, key) end if index > 0 h.age += 1 @inbounds h.keys[index] = key @inbounds h.vals[index] = v else _setindex!(h, v, key, -index, tag) end return v end
function _get!(default::Callable, h::SwissDict{K,V}, key::K) where {K, V} index, tag = ht_keyindex2!(h, key) index > 0 && return @inbounds h.vals[index] age0 = h.age v = convert(V, default()) if h.age != age0 index, tag = ht_keyindex2!(h, key) end if index > 0 h.age += 1 @inbounds h.keys[index] = key @inbounds h.vals[index] = v else _setindex!(h, v, key, -index, tag) end return v end
_get!
449
467
src/swiss_dict.jl
#FILE: DataStructures.jl/src/ordered_robin_dict.jl ##CHUNK 1 @inbounds h.dict[key] = Int32(nk) h.count += 1 end function Base.setindex!(h::OrderedRobinDict{K, V}, v0, key0) where {K,V} key = convert(K, key0) v = convert(V, v0) index = get(h.dict, key, -2) if index < 0 _setindex!(h, v0, key0) else @assert haskey(h, key0) @inbounds orig_v = h.vals[index] !isequal(orig_v, v0) && (@inbounds h.vals[index] = v0) end check_for_rehash(h) && rehash!(h) return h ##CHUNK 2 Return the value stored for the given key, or if no mapping for the key is present, store `key => f()`, and return `f()`. This is intended to be called using `do` block syntax: ```julia get!(dict, key) do # default value calculated here time() end ``` """ function Base.get!(default::Base.Callable, h::OrderedRobinDict{K,V}, key0) where {K,V} index = get(h.dict, key0, -2) index > 0 && return @inbounds h.vals[index] v = convert(V, default()) setindex!(h, v, key0) return v end ##CHUNK 3 empty!(h.vals) h.count = 0 return h end function _setindex!(h::OrderedRobinDict, v, key) hk, hv = h.keys, h.vals push!(hk, key) push!(hv, v) nk = length(hk) @inbounds h.dict[key] = Int32(nk) h.count += 1 end function Base.setindex!(h::OrderedRobinDict{K, V}, v0, key0) where {K,V} key = convert(K, key0) v = convert(V, v0) index = get(h.dict, key, -2) if index < 0 ##CHUNK 4 """ function Base.get!(default::Base.Callable, h::OrderedRobinDict{K,V}, key0) where {K,V} index = get(h.dict, key0, -2) index > 0 && return @inbounds h.vals[index] v = convert(V, default()) setindex!(h, v, key0) return v end function Base.getindex(h::OrderedRobinDict{K,V}, key) where {K,V} index = get(h.dict, key, -1) return (index < 0) ? throw(KeyError(key)) : @inbounds h.vals[index]::V end """ get(collection, key, default) Return the value stored for the given key, or the given default value if no mapping for the key is present. #FILE: DataStructures.jl/src/robin_dict.jl ##CHUNK 1 return _get!(default, h, key) end function _get!(default::Callable, h::RobinDict{K,V}, key::K) where V where K index = rh_search(h, key) index > 0 && return h.vals[index] v = convert(V, default()) rh_insert!(h, key, v) return v end function Base.getindex(h::RobinDict{K, V}, key) where {K, V} index = rh_search(h, key) @inbounds return (index < 0) ? throw(KeyError(key)) : h.vals[index] end """ get(collection, key, default) ##CHUNK 2 # grow at least 25% if newsz < (oldsz*5)>>2 return d end rehash!(d, newsz) end Base.@propagate_inbounds isslotfilled(h::RobinDict, index) = (h.hashes[index] != 0) Base.@propagate_inbounds isslotempty(h::RobinDict, index) = (h.hashes[index] == 0) function Base.setindex!(h::RobinDict{K,V}, v0, key0) where {K, V} key = convert(K, key0) isequal(key, key0) || throw(ArgumentError("$key0 is not a valid key for type $K")) _setindex!(h, key, v0) end function _setindex!(h::RobinDict{K,V}, key::K, v0) where {K, V} v = convert(V, v0) index = rh_insert!(h, key, v) ##CHUNK 3 ``` """ function Base.getkey(h::RobinDict{K,V}, key, default) where {K, V} index = rh_search(h, key) @inbounds return (index < 0) ? default : h.keys[index]::K end # backward shift deletion by not keeping any tombstones function rh_delete!(h::RobinDict{K, V}, index) where {K, V} @assert index > 0 # this assumes that there is a key/value present in the dictionary at index index0 = index sz = length(h.keys) @inbounds while true index0 = (index0 & (sz - 1)) + 1 if isslotempty(h, index0) || calculate_distance(h, index0) == 0 break end end #CURRENT FILE: DataStructures.jl/src/swiss_dict.jl ##CHUNK 1 return h end function Base.setindex!(h::SwissDict{K,V}, v0, key0) where {K, V} key = convert(K, key0) _setindex!(h, v0, key) end function _setindex!(h::SwissDict{K,V}, v0, key::K) where {K, V} v = convert(V, v0) index, tag = ht_keyindex2!(h, key) if index > 0 h.age += 1 @inbounds h.keys[index] = key @inbounds h.vals[index] = v else _setindex!(h, v, key, -index, tag) end ##CHUNK 2 index, tag = ht_keyindex2!(h, key) if index > 0 h.age += 1 @inbounds h.keys[index] = key @inbounds h.vals[index] = v else _setindex!(h, v, key, -index, tag) end return h end """ get!(collection, key, default) Return the value stored for the given key, or if no mapping for the key is present, store `key => default`, and return `default`. # Examples ##CHUNK 3 off = trailing_zeros(cands) idx = i*16 + off + 1 return -idx, tag end i = (i+1) & (sz-1) end end function _setindex!(h::SwissDict, v, key, index, tag) @inbounds h.keys[index] = key @inbounds h.vals[index] = v h.count += 1 h.age += 1 so = _slotget(h.slots, index) h.nbfull += (iszero(index & 0x0f) & (so==0x00)) _slotset!(h.slots, tag, index) if index < h.idxfloor h.idxfloor = index end maybe_rehash_grow!(h)
601
611
DataStructures.jl
64
function Base.pop!(h::SwissDict) isempty(h) && throw(ArgumentError("SwissDict must be non-empty")) is = _iterslots(h, h.idxfloor) @assert is !== nothing idx, s = is @inbounds key = h.keys[idx] @inbounds val = h.vals[idx] _delete!(h, idx) h.idxfloor = idx return key => val end
function Base.pop!(h::SwissDict) isempty(h) && throw(ArgumentError("SwissDict must be non-empty")) is = _iterslots(h, h.idxfloor) @assert is !== nothing idx, s = is @inbounds key = h.keys[idx] @inbounds val = h.vals[idx] _delete!(h, idx) h.idxfloor = idx return key => val end
[ 601, 611 ]
function Base.pop!(h::SwissDict) isempty(h) && throw(ArgumentError("SwissDict must be non-empty")) is = _iterslots(h, h.idxfloor) @assert is !== nothing idx, s = is @inbounds key = h.keys[idx] @inbounds val = h.vals[idx] _delete!(h, idx) h.idxfloor = idx return key => val end
function Base.pop!(h::SwissDict) isempty(h) && throw(ArgumentError("SwissDict must be non-empty")) is = _iterslots(h, h.idxfloor) @assert is !== nothing idx, s = is @inbounds key = h.keys[idx] @inbounds val = h.vals[idx] _delete!(h, idx) h.idxfloor = idx return key => val end
Base.pop!
601
611
src/swiss_dict.jl
#FILE: DataStructures.jl/src/ordered_robin_dict.jl ##CHUNK 1 OrderedRobinDict{String, Int64} with 1 entry: "a" => 1 ``` """ function Base.delete!(h::OrderedRobinDict, key) pop!(h, key) return h end function _delete!(h::OrderedRobinDict, index) @inbounds h.dict[h.keys[index]] = -1 h.count -= 1 check_for_rehash(h) ? rehash!(h) : h end function get_first_filled_index(h::OrderedRobinDict) index = 1 while (true) isslotfilled(h, index) && return index index += 1 #FILE: DataStructures.jl/src/robin_dict.jl ##CHUNK 1 newsz = _tablesz(newsz) if h.count == 0 resize!(h.keys, newsz) resize!(h.vals, newsz) resize!(h.hashes, newsz) fill!(h.hashes, 0) h.count = 0 h.idxfloor = 0 return h end h.keys = Vector{K}(undef, newsz) h.vals = Vector{V}(undef, newsz) h.hashes = zeros(UInt32,newsz) h.count = 0 h.idxfloor = 0 for i = 1:sz @inbounds if oldh[i] != 0 k = oldk[i] #CURRENT FILE: DataStructures.jl/src/swiss_dict.jl ##CHUNK 1 "a" => 1 "b" => 2 julia> delete!(d, "b") SwissDict{String, Int64} with 1 entry: "a" => 1 ``` """ function Base.delete!(h::SwissDict, key) index = ht_keyindex(h, key) if index > 0 _delete!(h, index) end maybe_rehash_shrink!(h) return h end Base.@propagate_inbounds function Base.iterate(h::SwissDict, state = h.idxfloor) is = _iterslots(h, state) is === nothing && return nothing ##CHUNK 2 @inbounds h.vals[index] = v h.count += 1 h.age += 1 so = _slotget(h.slots, index) h.nbfull += (iszero(index & 0x0f) & (so==0x00)) _slotset!(h.slots, tag, index) if index < h.idxfloor h.idxfloor = index end maybe_rehash_grow!(h) end function _delete!(h::SwissDict{K,V}, index) where {K,V} # Caller is responsible for maybe shrinking the SwissDict after the deletion. isbitstype(K) || isbitsunion(K) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.keys, index-1) isbitstype(V) || isbitsunion(V) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.vals, index-1) isboundary = iszero(index & 0x0f) #boundaries: 16, 32, ... @inbounds _slotset!(h.slots, ifelse(isboundary, 0x01, 0x00), index) h.count -= 1 h.age += 1 ##CHUNK 3 end function _delete!(h::SwissDict{K,V}, index) where {K,V} # Caller is responsible for maybe shrinking the SwissDict after the deletion. isbitstype(K) || isbitsunion(K) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.keys, index-1) isbitstype(V) || isbitsunion(V) || ccall(:jl_arrayunset, Cvoid, (Any, UInt), h.vals, index-1) isboundary = iszero(index & 0x0f) #boundaries: 16, 32, ... @inbounds _slotset!(h.slots, ifelse(isboundary, 0x01, 0x00), index) h.count -= 1 h.age += 1 maybe_rehash_shrink!(h) end # fast iteration over active slots. function _iterslots(h::SwissDict, start::Int) i0 = ((start-1) & (length(h.keys)-1))>>4 + 1 off = (start-1) & 0x0f @inbounds sl = _find_free(h.slots[i0>>4 + 1]) sl = ((~sl & 0xffff)>>off) << off ##CHUNK 4 vals[idx] = v nbfull += iszero(idx & 0x0f) count += 1 if h.age != age0 return rehash!(h, newsz) end is = _iterslots(h, s) end h.slots = slots h.keys = keys h.vals = vals h.nbfull = nbfull @assert h.age == age0 @assert h.count == count return h end Base.isempty(t::SwissDict) = (t.count == 0) Base.length(t::SwissDict) = t.count ##CHUNK 5 fill!(h.slots, _expand16(0x00)) sz = length(h.keys) empty!(h.keys) empty!(h.vals) resize!(h.keys, sz) resize!(h.vals, sz) h.nbfull = 0 h.count = 0 h.age += 1 h.idxfloor = 1 return h end function Base.setindex!(h::SwissDict{K,V}, v0, key0) where {K, V} key = convert(K, key0) _setindex!(h, v0, key) end function _setindex!(h::SwissDict{K,V}, v0, key::K) where {K, V} v = convert(V, v0) ##CHUNK 6 cands = _find_free(msk) if cands != 0 off = trailing_zeros(cands) idx = i*16 + off + 1 break end i = (i+1) & (nssz-1) end _slotset!(slots, t, idx) keys[idx] = k vals[idx] = v nbfull += iszero(idx & 0x0f) count += 1 if h.age != age0 return rehash!(h, newsz) end is = _iterslots(h, s) end h.slots = slots h.keys = keys ##CHUNK 7 maybe_rehash_shrink!(h) end # fast iteration over active slots. function _iterslots(h::SwissDict, start::Int) i0 = ((start-1) & (length(h.keys)-1))>>4 + 1 off = (start-1) & 0x0f @inbounds sl = _find_free(h.slots[i0>>4 + 1]) sl = ((~sl & 0xffff)>>off) << off return _iterslots(h, (i0, sl)) end function _iterslots(h::SwissDict, state) i, sl = state while iszero(sl) i += 1 i <= length(h.slots) || return nothing @inbounds msk = h.slots[i] sl = _find_free(msk) ##CHUNK 8 sz = length(oldk) newsz = _tablesz(newsz) (newsz*SWISS_DICT_LOAD_FACTOR) > h.count || (newsz <<= 1) h.age += 1 h.idxfloor = 1 if h.count == 0 resize!(h.slots, newsz>>4) fill!(h.slots, _expand16(0x00)) resize!(h.keys, newsz) resize!(h.vals, newsz) h.nbfull = 0 return h end nssz = newsz>>4 slots = fill(_expand16(0x00), nssz) keys = Vector{K}(undef, newsz) vals = Vector{V}(undef, newsz) age0 = h.age nbfull = 0 is = _iterslots(h, 1)
78
88
DataStructures.jl
65
function Base.keys(t::Trie{K,V}, prefix=_empty_prefix(t), found=Vector{typeof(prefix)}()) where {K,V} if t.is_key push!(found, prefix) end for (char,child) in t.children keys(child, _concat(prefix, char), found) end return found end
function Base.keys(t::Trie{K,V}, prefix=_empty_prefix(t), found=Vector{typeof(prefix)}()) where {K,V} if t.is_key push!(found, prefix) end for (char,child) in t.children keys(child, _concat(prefix, char), found) end return found end
[ 78, 88 ]
function Base.keys(t::Trie{K,V}, prefix=_empty_prefix(t), found=Vector{typeof(prefix)}()) where {K,V} if t.is_key push!(found, prefix) end for (char,child) in t.children keys(child, _concat(prefix, char), found) end return found end
function Base.keys(t::Trie{K,V}, prefix=_empty_prefix(t), found=Vector{typeof(prefix)}()) where {K,V} if t.is_key push!(found, prefix) end for (char,child) in t.children keys(child, _concat(prefix, char), found) end return found end
Base.keys
78
88
src/trie.jl
#FILE: DataStructures.jl/test/test_trie.jl ##CHUNK 1 t[[1,2,3,4]] = 1 t[[1,2]] = 2 @test haskey(t, [1,2]) @test get(t, [1,2], nothing) == 2 st = subtrie(t, [1,2,3]) @test keys(st) == [[4]] @test st[[4]] == 1 @test find_prefixes(t, [1,2,3,5]) == [[1,2]] @test find_prefixes(t, 1:3) == [1:2] end end # @testset Trie #CURRENT FILE: DataStructures.jl/src/trie.jl ##CHUNK 1 node != nothing && node.is_key end function Base.get(t::Trie, key, notfound) node = subtrie(t, key) if node != nothing && node.is_key return node.value end return notfound end _concat(prefix::String, char::Char) = string(prefix, char) _concat(prefix::Vector{T}, char::T) where {T} = vcat(prefix, char) _empty_prefix(::Trie{Char,V}) where {V} = "" _empty_prefix(::Trie{K,V}) where {K,V} = K[] function keys_with_prefix(t::Trie, prefix) st = subtrie(t, prefix) ##CHUNK 2 3-element Vector{UnitRange{Int64}}: 1:1 1:3 1:4 julia> find_prefixes(t′, [1,2,3,4,5]) 3-element Vector{Vector{Int64}}: [1] [1, 2, 3] [1, 2, 3, 4] ``` """ function find_prefixes(t::Trie, str::T) where {T} prefixes = T[] it = partial_path(t, str) idx = 0 for t in it if t.is_key push!(prefixes, str[firstindex(str):idx]) end ##CHUNK 3 ``` """ function find_prefixes(t::Trie, str::T) where {T} prefixes = T[] it = partial_path(t, str) idx = 0 for t in it if t.is_key push!(prefixes, str[firstindex(str):idx]) end idx = nextind(str, idx) end return prefixes end ##CHUNK 4 function Base.getindex(t::Trie, key) node = subtrie(t, key) if node != nothing && node.is_key return node.value end throw(KeyError("key not found: $key")) end function subtrie(t::Trie, prefix) node = t for char in prefix node = get(node.children, char, nothing) isnothing(node) && return nothing end return node end function Base.haskey(t::Trie, key) node = subtrie(t, key) ##CHUNK 5 node = t for char in prefix node = get(node.children, char, nothing) isnothing(node) && return nothing end return node end function Base.haskey(t::Trie, key) node = subtrie(t, key) node != nothing && node.is_key end function Base.get(t::Trie, key, notfound) node = subtrie(t, key) if node != nothing && node.is_key return node.value end return notfound end ##CHUNK 6 function Base.setindex!(t::Trie{K,V}, val, key) where {K,V} value = convert(V, val) # we don't want to iterate before finding out it fails node = t for char in key node = get!(Trie{K,V}, node.children, char) end node.is_key = true node.value = value end function Base.getindex(t::Trie, key) node = subtrie(t, key) if node != nothing && node.is_key return node.value end throw(KeyError("key not found: $key")) end function subtrie(t::Trie, prefix) ##CHUNK 7 _concat(prefix::String, char::Char) = string(prefix, char) _concat(prefix::Vector{T}, char::T) where {T} = vcat(prefix, char) _empty_prefix(::Trie{Char,V}) where {V} = "" _empty_prefix(::Trie{K,V}) where {K,V} = K[] function keys_with_prefix(t::Trie, prefix) st = subtrie(t, prefix) st != nothing ? keys(st,prefix) : [] end # The state of a TrieIterator is a pair (t::Trie, i::Int), # where t is the Trie which was the output of the previous iteration # and i is the index of the current character of the string. # The indexing is potentially confusing; # see the comments and implementation below for details. struct TrieIterator t::Trie ##CHUNK 8 return it.t, (it.t, firstindex(it.str)) elseif i > lastindex(it.str) || !(it.str[i] in keys(t.children)) return nothing else t = t.children[it.str[i]] return (t, (t, nextind(it.str, i))) end end partial_path(t::Trie, str) = TrieIterator(t, str) Base.IteratorSize(::Type{TrieIterator}) = Base.SizeUnknown() """ find_prefixes(t::Trie, str) Find all keys from the `Trie` that are prefix of the given string # Examples ```julia-repl julia> t = Trie(["A", "ABC", "ABCD", "BCE"]) ##CHUNK 9 mutable struct Trie{K,V} value::V children::Dict{K,Trie{K,V}} is_key::Bool function Trie{K,V}() where {K,V} self = new{K,V}() self.children = Dict{K,Trie{K,V}}() self.is_key = false return self end function Trie{K,V}(ks, vs) where {K,V} return Trie{K,V}(zip(ks, vs)) end function Trie{K,V}(kv) where {K,V} t = Trie{K,V}() for (k,v) in kv t[k] = v
154
165
DataStructures.jl
66
function find_prefixes(t::Trie, str::T) where {T} prefixes = T[] it = partial_path(t, str) idx = 0 for t in it if t.is_key push!(prefixes, str[firstindex(str):idx]) end idx = nextind(str, idx) end return prefixes end
function find_prefixes(t::Trie, str::T) where {T} prefixes = T[] it = partial_path(t, str) idx = 0 for t in it if t.is_key push!(prefixes, str[firstindex(str):idx]) end idx = nextind(str, idx) end return prefixes end
[ 154, 165 ]
function find_prefixes(t::Trie, str::T) where {T} prefixes = T[] it = partial_path(t, str) idx = 0 for t in it if t.is_key push!(prefixes, str[firstindex(str):idx]) end idx = nextind(str, idx) end return prefixes end
function find_prefixes(t::Trie, str::T) where {T} prefixes = T[] it = partial_path(t, str) idx = 0 for t in it if t.is_key push!(prefixes, str[firstindex(str):idx]) end idx = nextind(str, idx) end return prefixes end
find_prefixes
154
165
src/trie.jl
#FILE: DataStructures.jl/test/test_trie.jl ##CHUNK 1 t[[1,2,3,4]] = 1 t[[1,2]] = 2 @test haskey(t, [1,2]) @test get(t, [1,2], nothing) == 2 st = subtrie(t, [1,2,3]) @test keys(st) == [[4]] @test st[[4]] == 1 @test find_prefixes(t, [1,2,3,5]) == [[1,2]] @test find_prefixes(t, 1:3) == [1:2] end end # @testset Trie #FILE: DataStructures.jl/src/fenwick.jl ##CHUNK 1 5 ``` """ function prefixsum(ft::FenwickTree{T}, ind::Integer) where T sum = zero(T) ind < 1 && return sum i = ind n = ft.n @boundscheck 1 <= i <= n || throw(ArgumentError("$i should be in between 1 and $n")) @inbounds while i > 0 sum += ft.bi_tree[i] i -= i&(-i) end sum end Base.getindex(ft::FenwickTree{T}, ind::Integer) where T = prefixsum(ft, ind) #FILE: DataStructures.jl/src/deprecations.jl ##CHUNK 1 # 0.18 deprecations. Remove before releasing 0.19 @deprecate path(t::Trie, str::AbstractString) partial_path(t::Trie, str::AbstractString) @deprecate find_root find_root! @deprecate top first @deprecate reverse_iter Iterators.reverse # Deprecations from #700 Base.@deprecate_binding DisjointSets DisjointSet Base.@deprecate_binding IntDisjointSets IntDisjointSet @deprecate DisjointSets(xs...) DisjointSet(xs) # Enqueue and dequeue deprecations #CURRENT FILE: DataStructures.jl/src/trie.jl ##CHUNK 1 if i == 0 return it.t, (it.t, firstindex(it.str)) elseif i > lastindex(it.str) || !(it.str[i] in keys(t.children)) return nothing else t = t.children[it.str[i]] return (t, (t, nextind(it.str, i))) end end partial_path(t::Trie, str) = TrieIterator(t, str) Base.IteratorSize(::Type{TrieIterator}) = Base.SizeUnknown() """ find_prefixes(t::Trie, str) Find all keys from the `Trie` that are prefix of the given string # Examples ```julia-repl ##CHUNK 2 node != nothing && node.is_key end function Base.get(t::Trie, key, notfound) node = subtrie(t, key) if node != nothing && node.is_key return node.value end return notfound end _concat(prefix::String, char::Char) = string(prefix, char) _concat(prefix::Vector{T}, char::T) where {T} = vcat(prefix, char) _empty_prefix(::Trie{Char,V}) where {V} = "" _empty_prefix(::Trie{K,V}) where {K,V} = K[] function Base.keys(t::Trie{K,V}, prefix=_empty_prefix(t), found=Vector{typeof(prefix)}()) where {K,V} ##CHUNK 3 if t.is_key push!(found, prefix) end for (char,child) in t.children keys(child, _concat(prefix, char), found) end return found end function keys_with_prefix(t::Trie, prefix) st = subtrie(t, prefix) st != nothing ? keys(st,prefix) : [] end # The state of a TrieIterator is a pair (t::Trie, i::Int), # where t is the Trie which was the output of the previous iteration # and i is the index of the current character of the string. # The indexing is potentially confusing; # see the comments and implementation below for details. struct TrieIterator ##CHUNK 4 function Base.getindex(t::Trie, key) node = subtrie(t, key) if node != nothing && node.is_key return node.value end throw(KeyError("key not found: $key")) end function subtrie(t::Trie, prefix) node = t for char in prefix node = get(node.children, char, nothing) isnothing(node) && return nothing end return node end function Base.haskey(t::Trie, key) node = subtrie(t, key) ##CHUNK 5 _concat(prefix::String, char::Char) = string(prefix, char) _concat(prefix::Vector{T}, char::T) where {T} = vcat(prefix, char) _empty_prefix(::Trie{Char,V}) where {V} = "" _empty_prefix(::Trie{K,V}) where {K,V} = K[] function Base.keys(t::Trie{K,V}, prefix=_empty_prefix(t), found=Vector{typeof(prefix)}()) where {K,V} if t.is_key push!(found, prefix) end for (char,child) in t.children keys(child, _concat(prefix, char), found) end return found end function keys_with_prefix(t::Trie, prefix) ##CHUNK 6 t::Trie str end # At the start, there is no previous iteration, # so the first element of the state is undefined. # We use a "dummy value" of it.t to keep the type of the state stable. # The second element is 0 # since the root of the trie corresponds to a length 0 prefix of str. function Base.iterate(it::TrieIterator, (t, i) = (it.t, 0)) if i == 0 return it.t, (it.t, firstindex(it.str)) elseif i > lastindex(it.str) || !(it.str[i] in keys(t.children)) return nothing else t = t.children[it.str[i]] return (t, (t, nextind(it.str, i))) end end ##CHUNK 7 node = t for char in prefix node = get(node.children, char, nothing) isnothing(node) && return nothing end return node end function Base.haskey(t::Trie, key) node = subtrie(t, key) node != nothing && node.is_key end function Base.get(t::Trie, key, notfound) node = subtrie(t, key) if node != nothing && node.is_key return node.value end return notfound end
143
160
DataStructures.jl
67
function is_minmax_heap(A::AbstractVector) for i in 1:length(A) if on_minlevel(i) # check that A[i] < children A[i] # and grandchildren A[i] for j in children_and_grandchildren(length(A), i) A[i] ≤ A[j] || return false end else # max layer for j in children_and_grandchildren(length(A), i) A[i] ≥ A[j] || return false end end end return true end
function is_minmax_heap(A::AbstractVector) for i in 1:length(A) if on_minlevel(i) # check that A[i] < children A[i] # and grandchildren A[i] for j in children_and_grandchildren(length(A), i) A[i] ≤ A[j] || return false end else # max layer for j in children_and_grandchildren(length(A), i) A[i] ≥ A[j] || return false end end end return true end
[ 143, 160 ]
function is_minmax_heap(A::AbstractVector) for i in 1:length(A) if on_minlevel(i) # check that A[i] < children A[i] # and grandchildren A[i] for j in children_and_grandchildren(length(A), i) A[i] ≤ A[j] || return false end else # max layer for j in children_and_grandchildren(length(A), i) A[i] ≥ A[j] || return false end end end return true end
function is_minmax_heap(A::AbstractVector) for i in 1:length(A) if on_minlevel(i) # check that A[i] < children A[i] # and grandchildren A[i] for j in children_and_grandchildren(length(A), i) A[i] ≤ A[j] || return false end else # max layer for j in children_and_grandchildren(length(A), i) A[i] ≥ A[j] || return false end end end return true end
is_minmax_heap
143
160
src/heaps/minmax_heap.jl
#CURRENT FILE: DataStructures.jl/src/heaps/minmax_heap.jl ##CHUNK 1 @inline isgrandchild(j, i) = j > rchild(i) @inline hasgrandparent(i) = i ≥ 4 """ children_and_grandchildren(maxlen, i) Return the indices of all children and grandchildren of position `i`. """ function children_and_grandchildren(maxlen::T, i::T) where {T <: Integer} left, right = children(i) _children_and_grandchildren = (left, children(left)..., right, children(right)...) return Iterators.filter(<=(maxlen), _children_and_grandchildren) end """ is_minmax_heap(h::AbstractVector) -> Bool Return `true` if `A` is a min-max heap. A min-max heap is a heap where the minimum element is the root and the maximum ##CHUNK 2 end Base.@propagate_inbounds function _minmax_heap_trickle_down!(A::AbstractVector, i::Integer) if on_minlevel(i) _minmax_heap_trickle_down!(A, i, Forward) else _minmax_heap_trickle_down!(A, i, Reverse) end end Base.@propagate_inbounds function _minmax_heap_trickle_down!(A::AbstractVector, i::Integer, o::Ordering, x=A[i]) if haschildren(i, A) # get the index of the extremum (min or max) descendant extremum = o === Forward ? minimum : maximum _, m = extremum((A[j], j) for j in children_and_grandchildren(length(A), i)) if isgrandchild(m, i) if lt(o, A[m], A[i]) A[i] = A[m] ##CHUNK 3 Base.@propagate_inbounds function _minmax_heap_bubble_up!(A::AbstractVector, i::Integer, o::Ordering, x=A[i]) if hasgrandparent(i) gparent = hparent(hparent(i)) if lt(o, x, A[gparent]) A[i] = A[gparent] A[gparent] = x _minmax_heap_bubble_up!(A, gparent, o) end end end Base.@propagate_inbounds function _minmax_heap_trickle_down!(A::AbstractVector, i::Integer) if on_minlevel(i) _minmax_heap_trickle_down!(A, i, Forward) else _minmax_heap_trickle_down!(A, i, Reverse) end end ##CHUNK 4 left, right = children(i) _children_and_grandchildren = (left, children(left)..., right, children(right)...) return Iterators.filter(<=(maxlen), _children_and_grandchildren) end """ is_minmax_heap(h::AbstractVector) -> Bool Return `true` if `A` is a min-max heap. A min-max heap is a heap where the minimum element is the root and the maximum element is a child of the root. """ ################################################ # # interfaces # ################################################ Base.length(h::BinaryMinMaxHeap) = length(h.valtree) ##CHUNK 5 Base.@propagate_inbounds function _minmax_heap_trickle_down!(A::AbstractVector, i::Integer, o::Ordering, x=A[i]) if haschildren(i, A) # get the index of the extremum (min or max) descendant extremum = o === Forward ? minimum : maximum _, m = extremum((A[j], j) for j in children_and_grandchildren(length(A), i)) if isgrandchild(m, i) if lt(o, A[m], A[i]) A[i] = A[m] A[m] = x if lt(o, A[hparent(m)], A[m]) t = A[m] A[m] = A[hparent(m)] A[hparent(m)] = t end _minmax_heap_trickle_down!(A, m, o) end else if lt(o, A[m], A[i]) ##CHUNK 6 tmp = A[i] A[i] = A[hparent(i)] A[hparent(i)] = tmp _minmax_heap_bubble_up!(A, hparent(i), Forward) else # bubble up max _minmax_heap_bubble_up!(A, i, Reverse) end end end Base.@propagate_inbounds function _minmax_heap_bubble_up!(A::AbstractVector, i::Integer, o::Ordering, x=A[i]) if hasgrandparent(i) gparent = hparent(hparent(i)) if lt(o, x, A[gparent]) A[i] = A[gparent] A[gparent] = x _minmax_heap_bubble_up!(A, gparent, o) end end ##CHUNK 7 _minmax_heap_bubble_up!(A, hparent(i), Reverse) else # bubble up min _minmax_heap_bubble_up!(A, i, Forward) end else # max level if i > 1 && A[i] < A[hparent(i)] # swap to parent and bubble up min tmp = A[i] A[i] = A[hparent(i)] A[hparent(i)] = tmp _minmax_heap_bubble_up!(A, hparent(i), Forward) else # bubble up max _minmax_heap_bubble_up!(A, i, Reverse) end end end ##CHUNK 8 # # core implementation # ################################################ function _make_binary_minmax_heap(xs) valtree = copy(xs) for i in length(xs):-1:1 @inbounds _minmax_heap_trickle_down!(valtree, i) end return valtree end Base.@propagate_inbounds function _minmax_heap_bubble_up!(A::AbstractVector, i::Integer) if on_minlevel(i) if i > 1 && A[i] > A[hparent(i)] # swap to parent and bubble up max tmp = A[i] A[i] = A[hparent(i)] A[hparent(i)] = tmp ##CHUNK 9 return valtree end Base.@propagate_inbounds function _minmax_heap_bubble_up!(A::AbstractVector, i::Integer) if on_minlevel(i) if i > 1 && A[i] > A[hparent(i)] # swap to parent and bubble up max tmp = A[i] A[i] = A[hparent(i)] A[hparent(i)] = tmp _minmax_heap_bubble_up!(A, hparent(i), Reverse) else # bubble up min _minmax_heap_bubble_up!(A, i, Forward) end else # max level if i > 1 && A[i] < A[hparent(i)] # swap to parent and bubble up min ##CHUNK 10 A[m] = x if lt(o, A[hparent(m)], A[m]) t = A[m] A[m] = A[hparent(m)] A[hparent(m)] = t end _minmax_heap_trickle_down!(A, m, o) end else if lt(o, A[m], A[i]) A[i] = A[m] A[m] = x end end end end ################################################ # # utilities
187
197
DataStructures.jl
68
function popmin!(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(valtree) || throw(ArgumentError("heap must be non-empty")) @inbounds x = valtree[1] y = pop!(valtree) if !isempty(valtree) @inbounds valtree[1] = y @inbounds _minmax_heap_trickle_down!(valtree, 1) end return x end
function popmin!(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(valtree) || throw(ArgumentError("heap must be non-empty")) @inbounds x = valtree[1] y = pop!(valtree) if !isempty(valtree) @inbounds valtree[1] = y @inbounds _minmax_heap_trickle_down!(valtree, 1) end return x end
[ 187, 197 ]
function popmin!(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(valtree) || throw(ArgumentError("heap must be non-empty")) @inbounds x = valtree[1] y = pop!(valtree) if !isempty(valtree) @inbounds valtree[1] = y @inbounds _minmax_heap_trickle_down!(valtree, 1) end return x end
function popmin!(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(valtree) || throw(ArgumentError("heap must be non-empty")) @inbounds x = valtree[1] y = pop!(valtree) if !isempty(valtree) @inbounds valtree[1] = y @inbounds _minmax_heap_trickle_down!(valtree, 1) end return x end
popmin!
187
197
src/heaps/minmax_heap.jl
#CURRENT FILE: DataStructures.jl/src/heaps/minmax_heap.jl ##CHUNK 1 Remove up to the `k` smallest values from the heap. """ @inline function popmin!(h::BinaryMinMaxHeap, k::Integer) return [popmin!(h) for _ in 1:min(length(h), k)] end """ popmax!(h::BinaryMinMaxHeap) -> max Remove the maximum value from the heap. """ function popmax!(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(valtree) || throw(ArgumentError("heap must be non-empty")) @inbounds x, i = maximum(((valtree[j], j) for j in 1:min(length(valtree), 3))) y = pop!(valtree) if !isempty(valtree) && i <= length(valtree) @inbounds valtree[i] = y @inbounds _minmax_heap_trickle_down!(valtree, i) ##CHUNK 2 Remove the maximum value from the heap. """ function popmax!(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(valtree) || throw(ArgumentError("heap must be non-empty")) @inbounds x, i = maximum(((valtree[j], j) for j in 1:min(length(valtree), 3))) y = pop!(valtree) if !isempty(valtree) && i <= length(valtree) @inbounds valtree[i] = y @inbounds _minmax_heap_trickle_down!(valtree, i) end return x end """ popmax!(h::BinaryMinMaxHeap, k::Integer) -> vals Remove up to the `k` largest values from the heap. """ @inline function popmax!(h::BinaryMinMaxHeap, k::Integer) ##CHUNK 3 function BinaryMinMaxHeap{T}(xs::AbstractVector{T}) where {T} valtree = _make_binary_minmax_heap(xs) new{T}(valtree) end end BinaryMinMaxHeap(xs::AbstractVector{T}) where T = BinaryMinMaxHeap{T}(xs) ################################################ # # core implementation # ################################################ function _make_binary_minmax_heap(xs) valtree = copy(xs) for i in length(xs):-1:1 @inbounds _minmax_heap_trickle_down!(valtree, i) end ##CHUNK 4 """ first(h::BinaryMinMaxHeap) Get the first (minimum) of the heap. """ @inline Base.first(h::BinaryMinMaxHeap) = minimum(h) @inline function Base.minimum(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(h) || throw(ArgumentError("heap must be non-empty")) return @inbounds h.valtree[1] end @inline function Base.maximum(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(h) || throw(ArgumentError("heap must be non-empty")) return @inbounds maximum(@views(valtree[1:min(end, 3)])) end Base.empty!(h::BinaryMinMaxHeap) = (empty!(h.valtree); h) ##CHUNK 5 # # core implementation # ################################################ function _make_binary_minmax_heap(xs) valtree = copy(xs) for i in length(xs):-1:1 @inbounds _minmax_heap_trickle_down!(valtree, i) end return valtree end Base.@propagate_inbounds function _minmax_heap_bubble_up!(A::AbstractVector, i::Integer) if on_minlevel(i) if i > 1 && A[i] > A[hparent(i)] # swap to parent and bubble up max tmp = A[i] A[i] = A[hparent(i)] A[hparent(i)] = tmp ##CHUNK 6 return [popmax!(h) for _ in 1:min(length(h), k)] end function Base.push!(h::BinaryMinMaxHeap, v) valtree = h.valtree push!(valtree, v) @inbounds _minmax_heap_bubble_up!(valtree, length(valtree)) end """ first(h::BinaryMinMaxHeap) Get the first (minimum) of the heap. """ @inline Base.first(h::BinaryMinMaxHeap) = minimum(h) @inline function Base.minimum(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(h) || throw(ArgumentError("heap must be non-empty")) ##CHUNK 7 end return x end """ popmax!(h::BinaryMinMaxHeap, k::Integer) -> vals Remove up to the `k` largest values from the heap. """ @inline function popmax!(h::BinaryMinMaxHeap, k::Integer) return [popmax!(h) for _ in 1:min(length(h), k)] end function Base.push!(h::BinaryMinMaxHeap, v) valtree = h.valtree push!(valtree, v) @inbounds _minmax_heap_bubble_up!(valtree, length(valtree)) end ##CHUNK 8 return @inbounds h.valtree[1] end @inline function Base.maximum(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(h) || throw(ArgumentError("heap must be non-empty")) return @inbounds maximum(@views(valtree[1:min(end, 3)])) end Base.empty!(h::BinaryMinMaxHeap) = (empty!(h.valtree); h) """ popall!(h::BinaryMinMaxHeap, ::Ordering = Forward) Remove and return all the elements of `h` according to the given ordering. Default is `Forward` (smallest to largest). """ popall!(h::BinaryMinMaxHeap) = popall!(h, Forward) ##CHUNK 9 """ pop!(h::BinaryMinMaxHeap) = popmin!(h) """ @inline Base.pop!(h::BinaryMinMaxHeap) = popmin!(h) function Base.sizehint!(h::BinaryMinMaxHeap, s::Integer) sizehint!(h.valtree, s) return h end """ popmin!(h::BinaryMinMaxHeap) -> min Remove the minimum value from the heap. """ """ popmin!(h::BinaryMinMaxHeap, k::Integer) -> vals ##CHUNK 10 ################################################ # # interfaces # ################################################ Base.length(h::BinaryMinMaxHeap) = length(h.valtree) Base.isempty(h::BinaryMinMaxHeap) = isempty(h.valtree) """ pop!(h::BinaryMinMaxHeap) = popmin!(h) """ @inline Base.pop!(h::BinaryMinMaxHeap) = popmin!(h) function Base.sizehint!(h::BinaryMinMaxHeap, s::Integer) sizehint!(h.valtree, s) return h end
214
224
DataStructures.jl
69
function popmax!(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(valtree) || throw(ArgumentError("heap must be non-empty")) @inbounds x, i = maximum(((valtree[j], j) for j in 1:min(length(valtree), 3))) y = pop!(valtree) if !isempty(valtree) && i <= length(valtree) @inbounds valtree[i] = y @inbounds _minmax_heap_trickle_down!(valtree, i) end return x end
function popmax!(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(valtree) || throw(ArgumentError("heap must be non-empty")) @inbounds x, i = maximum(((valtree[j], j) for j in 1:min(length(valtree), 3))) y = pop!(valtree) if !isempty(valtree) && i <= length(valtree) @inbounds valtree[i] = y @inbounds _minmax_heap_trickle_down!(valtree, i) end return x end
[ 214, 224 ]
function popmax!(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(valtree) || throw(ArgumentError("heap must be non-empty")) @inbounds x, i = maximum(((valtree[j], j) for j in 1:min(length(valtree), 3))) y = pop!(valtree) if !isempty(valtree) && i <= length(valtree) @inbounds valtree[i] = y @inbounds _minmax_heap_trickle_down!(valtree, i) end return x end
function popmax!(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(valtree) || throw(ArgumentError("heap must be non-empty")) @inbounds x, i = maximum(((valtree[j], j) for j in 1:min(length(valtree), 3))) y = pop!(valtree) if !isempty(valtree) && i <= length(valtree) @inbounds valtree[i] = y @inbounds _minmax_heap_trickle_down!(valtree, i) end return x end
popmax!
214
224
src/heaps/minmax_heap.jl
#CURRENT FILE: DataStructures.jl/src/heaps/minmax_heap.jl ##CHUNK 1 y = pop!(valtree) if !isempty(valtree) @inbounds valtree[1] = y @inbounds _minmax_heap_trickle_down!(valtree, 1) end return x end """ popmin!(h::BinaryMinMaxHeap, k::Integer) -> vals Remove up to the `k` smallest values from the heap. """ @inline function popmin!(h::BinaryMinMaxHeap, k::Integer) return [popmin!(h) for _ in 1:min(length(h), k)] end """ popmax!(h::BinaryMinMaxHeap) -> max ##CHUNK 2 # # core implementation # ################################################ function _make_binary_minmax_heap(xs) valtree = copy(xs) for i in length(xs):-1:1 @inbounds _minmax_heap_trickle_down!(valtree, i) end return valtree end Base.@propagate_inbounds function _minmax_heap_bubble_up!(A::AbstractVector, i::Integer) if on_minlevel(i) if i > 1 && A[i] > A[hparent(i)] # swap to parent and bubble up max tmp = A[i] A[i] = A[hparent(i)] A[hparent(i)] = tmp ##CHUNK 3 function BinaryMinMaxHeap{T}(xs::AbstractVector{T}) where {T} valtree = _make_binary_minmax_heap(xs) new{T}(valtree) end end BinaryMinMaxHeap(xs::AbstractVector{T}) where T = BinaryMinMaxHeap{T}(xs) ################################################ # # core implementation # ################################################ function _make_binary_minmax_heap(xs) valtree = copy(xs) for i in length(xs):-1:1 @inbounds _minmax_heap_trickle_down!(valtree, i) end ##CHUNK 4 """ popmin!(h::BinaryMinMaxHeap) -> min Remove the minimum value from the heap. """ function popmin!(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(valtree) || throw(ArgumentError("heap must be non-empty")) @inbounds x = valtree[1] y = pop!(valtree) if !isempty(valtree) @inbounds valtree[1] = y @inbounds _minmax_heap_trickle_down!(valtree, 1) end return x end """ ##CHUNK 5 Remove the maximum value from the heap. """ """ popmax!(h::BinaryMinMaxHeap, k::Integer) -> vals Remove up to the `k` largest values from the heap. """ @inline function popmax!(h::BinaryMinMaxHeap, k::Integer) return [popmax!(h) for _ in 1:min(length(h), k)] end function Base.push!(h::BinaryMinMaxHeap, v) valtree = h.valtree push!(valtree, v) @inbounds _minmax_heap_bubble_up!(valtree, length(valtree)) end ##CHUNK 6 """ pop!(h::BinaryMinMaxHeap) = popmin!(h) """ @inline Base.pop!(h::BinaryMinMaxHeap) = popmin!(h) function Base.sizehint!(h::BinaryMinMaxHeap, s::Integer) sizehint!(h.valtree, s) return h end """ popmin!(h::BinaryMinMaxHeap) -> min Remove the minimum value from the heap. """ function popmin!(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(valtree) || throw(ArgumentError("heap must be non-empty")) @inbounds x = valtree[1] ##CHUNK 7 """ first(h::BinaryMinMaxHeap) Get the first (minimum) of the heap. """ @inline Base.first(h::BinaryMinMaxHeap) = minimum(h) @inline function Base.minimum(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(h) || throw(ArgumentError("heap must be non-empty")) return @inbounds h.valtree[1] end @inline function Base.maximum(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(h) || throw(ArgumentError("heap must be non-empty")) return @inbounds maximum(@views(valtree[1:min(end, 3)])) end Base.empty!(h::BinaryMinMaxHeap) = (empty!(h.valtree); h) ##CHUNK 8 return [popmax!(h) for _ in 1:min(length(h), k)] end function Base.push!(h::BinaryMinMaxHeap, v) valtree = h.valtree push!(valtree, v) @inbounds _minmax_heap_bubble_up!(valtree, length(valtree)) end """ first(h::BinaryMinMaxHeap) Get the first (minimum) of the heap. """ @inline Base.first(h::BinaryMinMaxHeap) = minimum(h) @inline function Base.minimum(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(h) || throw(ArgumentError("heap must be non-empty")) ##CHUNK 9 return @inbounds h.valtree[1] end @inline function Base.maximum(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(h) || throw(ArgumentError("heap must be non-empty")) return @inbounds maximum(@views(valtree[1:min(end, 3)])) end Base.empty!(h::BinaryMinMaxHeap) = (empty!(h.valtree); h) """ popall!(h::BinaryMinMaxHeap, ::Ordering = Forward) Remove and return all the elements of `h` according to the given ordering. Default is `Forward` (smallest to largest). """ popall!(h::BinaryMinMaxHeap) = popall!(h, Forward) ##CHUNK 10 ################################################ # # minmax heap type and constructors # ################################################ mutable struct BinaryMinMaxHeap{T} <: AbstractMinMaxHeap{T} valtree::Vector{T} BinaryMinMaxHeap{T}() where {T} = new{T}(Vector{T}())
103
130
DataStructures.jl
70
function _binary_heap_pop!(ord::Ordering, nodes::Vector{MutableBinaryHeapNode{T}}, nodemap::Vector{Int}, nd_id::Int=1) where T # extract node rt = nodes[nd_id] v = rt.value @inbounds nodemap[rt.handle] = 0 # if node-to-remove is at end, we can just pop it # the same applies to 1-element heaps that are empty after removing the last element if nd_id == lastindex(nodes) pop!(nodes) else # move the last node to the position of the node-to-remove @inbounds nodes[nd_id] = new_rt = nodes[end] pop!(nodes) @inbounds nodemap[new_rt.handle] = nd_id if length(nodes) > 1 if Base.lt(ord, new_rt.value, v) _heap_bubble_up!(ord, nodes, nodemap, nd_id) else _heap_bubble_down!(ord, nodes, nodemap, nd_id) end end end return v end
function _binary_heap_pop!(ord::Ordering, nodes::Vector{MutableBinaryHeapNode{T}}, nodemap::Vector{Int}, nd_id::Int=1) where T # extract node rt = nodes[nd_id] v = rt.value @inbounds nodemap[rt.handle] = 0 # if node-to-remove is at end, we can just pop it # the same applies to 1-element heaps that are empty after removing the last element if nd_id == lastindex(nodes) pop!(nodes) else # move the last node to the position of the node-to-remove @inbounds nodes[nd_id] = new_rt = nodes[end] pop!(nodes) @inbounds nodemap[new_rt.handle] = nd_id if length(nodes) > 1 if Base.lt(ord, new_rt.value, v) _heap_bubble_up!(ord, nodes, nodemap, nd_id) else _heap_bubble_down!(ord, nodes, nodemap, nd_id) end end end return v end
[ 103, 130 ]
function _binary_heap_pop!(ord::Ordering, nodes::Vector{MutableBinaryHeapNode{T}}, nodemap::Vector{Int}, nd_id::Int=1) where T # extract node rt = nodes[nd_id] v = rt.value @inbounds nodemap[rt.handle] = 0 # if node-to-remove is at end, we can just pop it # the same applies to 1-element heaps that are empty after removing the last element if nd_id == lastindex(nodes) pop!(nodes) else # move the last node to the position of the node-to-remove @inbounds nodes[nd_id] = new_rt = nodes[end] pop!(nodes) @inbounds nodemap[new_rt.handle] = nd_id if length(nodes) > 1 if Base.lt(ord, new_rt.value, v) _heap_bubble_up!(ord, nodes, nodemap, nd_id) else _heap_bubble_down!(ord, nodes, nodemap, nd_id) end end end return v end
function _binary_heap_pop!(ord::Ordering, nodes::Vector{MutableBinaryHeapNode{T}}, nodemap::Vector{Int}, nd_id::Int=1) where T # extract node rt = nodes[nd_id] v = rt.value @inbounds nodemap[rt.handle] = 0 # if node-to-remove is at end, we can just pop it # the same applies to 1-element heaps that are empty after removing the last element if nd_id == lastindex(nodes) pop!(nodes) else # move the last node to the position of the node-to-remove @inbounds nodes[nd_id] = new_rt = nodes[end] pop!(nodes) @inbounds nodemap[new_rt.handle] = nd_id if length(nodes) > 1 if Base.lt(ord, new_rt.value, v) _heap_bubble_up!(ord, nodes, nodemap, nd_id) else _heap_bubble_down!(ord, nodes, nodemap, nd_id) end end end return v end
_binary_heap_pop!
103
130
src/heaps/mutable_binary_heap.jl
#FILE: DataStructures.jl/src/heaps/minmax_heap.jl ##CHUNK 1 """ popmin!(h::BinaryMinMaxHeap) -> min Remove the minimum value from the heap. """ function popmin!(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(valtree) || throw(ArgumentError("heap must be non-empty")) @inbounds x = valtree[1] y = pop!(valtree) if !isempty(valtree) @inbounds valtree[1] = y @inbounds _minmax_heap_trickle_down!(valtree, 1) end return x end """ ##CHUNK 2 Remove the maximum value from the heap. """ function popmax!(h::BinaryMinMaxHeap) valtree = h.valtree !isempty(valtree) || throw(ArgumentError("heap must be non-empty")) @inbounds x, i = maximum(((valtree[j], j) for j in 1:min(length(valtree), 3))) y = pop!(valtree) if !isempty(valtree) && i <= length(valtree) @inbounds valtree[i] = y @inbounds _minmax_heap_trickle_down!(valtree, i) end return x end """ popmax!(h::BinaryMinMaxHeap, k::Integer) -> vals Remove up to the `k` largest values from the heap. """ #FILE: DataStructures.jl/src/heaps/arrays_as_heaps.jl ##CHUNK 1 # Binary heap indexing heapleft(i::Integer) = 2i heapright(i::Integer) = 2i + 1 heapparent(i::Integer) = div(i, 2) # Binary min-heap percolate down. function percolate_down!(xs::AbstractArray, i::Integer, x=xs[i], o::Ordering=Forward, len::Integer=length(xs)) @inbounds while (l = heapleft(i)) <= len r = heapright(i) j = r > len || lt(o, xs[l], xs[r]) ? l : r lt(o, xs[j], x) || break xs[i] = xs[j] i = j end xs[i] = x end percolate_down!(xs::AbstractArray, i::Integer, o::Ordering, len::Integer=length(xs)) = percolate_down!(xs, i, xs[i], o, len) #CURRENT FILE: DataStructures.jl/src/heaps/mutable_binary_heap.jl ##CHUNK 1 end function _heap_bubble_down!(ord::Ordering, nodes::Vector{MutableBinaryHeapNode{T}}, nodemap::Vector{Int}, nd_id::Int) where T @inbounds nd = nodes[nd_id] v::T = nd.value n = length(nodes) last_parent = n >> 1 swapped = true i = nd_id while swapped && i <= last_parent il = i << 1 if il < n # contains both left and right children ir = il + 1 ##CHUNK 2 nodes[nd_id] = MutableBinaryHeapNode(x, i) if Base.lt(ordering, x, v0) _heap_bubble_up!(ordering, nodes, nodemap, nd_id) else _heap_bubble_down!(ordering, nodes, nodemap, nd_id) end end """ delete!{T}(h::MutableBinaryHeap{T}, i::Int) Deletes the element with handle `i` from heap `h` . """ function Base.delete!(h::MutableBinaryHeap{T}, i::Int) where T nd_id = h.node_map[i] _binary_heap_pop!(h.ordering, h.nodes, h.node_map, nd_id) return h end Base.setindex!(h::MutableBinaryHeap, v, i::Int) = update!(h, i, v) ##CHUNK 3 # # interfaces # ################################################# Base.length(h::MutableBinaryHeap) = length(h.nodes) Base.isempty(h::MutableBinaryHeap) = isempty(h.nodes) function Base.push!(h::MutableBinaryHeap{T}, v) where T nodes = h.nodes nodemap = h.node_map i = length(nodemap) + 1 nd_id = length(nodes) + 1 push!(nodes, MutableBinaryHeapNode(convert(T, v), i)) push!(nodemap, nd_id) _heap_bubble_up!(h.ordering, nodes, nodemap, nd_id) return i end ##CHUNK 4 This is equivalent to `h[i]=v`. """ function update!(h::MutableBinaryHeap{T}, i::Int, v) where T nodes = h.nodes nodemap = h.node_map ordering = h.ordering nd_id = nodemap[i] v0 = nodes[nd_id].value x = convert(T, v) nodes[nd_id] = MutableBinaryHeapNode(x, i) if Base.lt(ordering, x, v0) _heap_bubble_up!(ordering, nodes, nodemap, nd_id) else _heap_bubble_down!(ordering, nodes, nodemap, nd_id) end end """ delete!{T}(h::MutableBinaryHeap{T}, i::Int) ##CHUNK 5 el = h.nodes[1] return el.value, el.handle end Base.pop!(h::MutableBinaryHeap{T}) where {T} = _binary_heap_pop!(h.ordering, h.nodes, h.node_map) """ update!{T}(h::MutableBinaryHeap{T}, i::Int, v::T) Replace the element at index `i` in heap `h` with `v`. This is equivalent to `h[i]=v`. """ function update!(h::MutableBinaryHeap{T}, i::Int, v) where T nodes = h.nodes nodemap = h.node_map ordering = h.ordering nd_id = nodemap[i] v0 = nodes[nd_id].value x = convert(T, v) ##CHUNK 6 else swapped = false end end end if i != nd_id @inbounds nodes[i] = nd @inbounds nodemap[nd.handle] = i end end function _make_mutable_binary_heap(ord::Ordering, ty::Type{T}, values) where T # make a static binary index tree from a list of values n = length(values) nodes = Vector{MutableBinaryHeapNode{T}}(undef, n) nodemap = Vector{Int}(undef, n) ##CHUNK 7 # ################################################# function _heap_bubble_up!(ord::Ordering, nodes::Vector{MutableBinaryHeapNode{T}}, nodemap::Vector{Int}, nd_id::Int) where T @inbounds nd = nodes[nd_id] v::T = nd.value swapped = true # whether swap happens at last step i = nd_id while swapped && i > 1 # nd is not root p = i >> 1 @inbounds nd_p = nodes[p] if Base.lt(ord, v, nd_p.value) # move parent downward @inbounds nodes[i] = nd_p @inbounds nodemap[nd_p.handle] = i
132
150
DataStructures.jl
71
function _make_mutable_binary_heap(ord::Ordering, ty::Type{T}, values) where T # make a static binary index tree from a list of values n = length(values) nodes = Vector{MutableBinaryHeapNode{T}}(undef, n) nodemap = Vector{Int}(undef, n) i::Int = 0 for v in values i += 1 @inbounds nodes[i] = MutableBinaryHeapNode{T}(v, i) @inbounds nodemap[i] = i end for i = 1 : n _heap_bubble_up!(ord, nodes, nodemap, i) end return nodes, nodemap end
function _make_mutable_binary_heap(ord::Ordering, ty::Type{T}, values) where T # make a static binary index tree from a list of values n = length(values) nodes = Vector{MutableBinaryHeapNode{T}}(undef, n) nodemap = Vector{Int}(undef, n) i::Int = 0 for v in values i += 1 @inbounds nodes[i] = MutableBinaryHeapNode{T}(v, i) @inbounds nodemap[i] = i end for i = 1 : n _heap_bubble_up!(ord, nodes, nodemap, i) end return nodes, nodemap end
[ 132, 150 ]
function _make_mutable_binary_heap(ord::Ordering, ty::Type{T}, values) where T # make a static binary index tree from a list of values n = length(values) nodes = Vector{MutableBinaryHeapNode{T}}(undef, n) nodemap = Vector{Int}(undef, n) i::Int = 0 for v in values i += 1 @inbounds nodes[i] = MutableBinaryHeapNode{T}(v, i) @inbounds nodemap[i] = i end for i = 1 : n _heap_bubble_up!(ord, nodes, nodemap, i) end return nodes, nodemap end
function _make_mutable_binary_heap(ord::Ordering, ty::Type{T}, values) where T # make a static binary index tree from a list of values n = length(values) nodes = Vector{MutableBinaryHeapNode{T}}(undef, n) nodemap = Vector{Int}(undef, n) i::Int = 0 for v in values i += 1 @inbounds nodes[i] = MutableBinaryHeapNode{T}(v, i) @inbounds nodemap[i] = i end for i = 1 : n _heap_bubble_up!(ord, nodes, nodemap, i) end return nodes, nodemap end
_make_mutable_binary_heap
132
150
src/heaps/mutable_binary_heap.jl
#FILE: DataStructures.jl/test/test_mutable_binheap.jl ##CHUNK 1 # Test of binary heaps # auxiliary functions function heap_values(h::MutableBinaryHeap{VT,O}) where {VT,O} n = length(h) nodes = h.nodes @assert length(nodes) == n vs = Vector{VT}(undef, n) for i = 1 : n vs[i] = nodes[i].value end vs end function list_values(h::MutableBinaryHeap{VT,O}) where {VT,O} n = length(h) nodes = h.nodes nodemap = h.node_map vs = Vector{VT}() #CURRENT FILE: DataStructures.jl/src/heaps/mutable_binary_heap.jl ##CHUNK 1 # interfaces # ################################################# Base.length(h::MutableBinaryHeap) = length(h.nodes) Base.isempty(h::MutableBinaryHeap) = isempty(h.nodes) function Base.push!(h::MutableBinaryHeap{T}, v) where T nodes = h.nodes nodemap = h.node_map i = length(nodemap) + 1 nd_id = length(nodes) + 1 push!(nodes, MutableBinaryHeapNode(convert(T, v), i)) push!(nodemap, nd_id) _heap_bubble_up!(h.ordering, nodes, nodemap, nd_id) return i end function Base.empty!(h::MutableBinaryHeap) ##CHUNK 2 if length(nodes) > 1 if Base.lt(ord, new_rt.value, v) _heap_bubble_up!(ord, nodes, nodemap, nd_id) else _heap_bubble_down!(ord, nodes, nodemap, nd_id) end end end return v end ################################################# # # Binary Heap type and constructors # ################################################# mutable struct MutableBinaryHeap{T, O <: Base.Ordering} <: AbstractMutableHeap{T, Int} ##CHUNK 3 """ function update!(h::MutableBinaryHeap{T}, i::Int, v) where T nodes = h.nodes nodemap = h.node_map ordering = h.ordering nd_id = nodemap[i] v0 = nodes[nd_id].value x = convert(T, v) nodes[nd_id] = MutableBinaryHeapNode(x, i) if Base.lt(ordering, x, v0) _heap_bubble_up!(ordering, nodes, nodemap, nd_id) else _heap_bubble_down!(ordering, nodes, nodemap, nd_id) end end """ delete!{T}(h::MutableBinaryHeap{T}, i::Int) ##CHUNK 4 nodemap = h.node_map i = length(nodemap) + 1 nd_id = length(nodes) + 1 push!(nodes, MutableBinaryHeapNode(convert(T, v), i)) push!(nodemap, nd_id) _heap_bubble_up!(h.ordering, nodes, nodemap, nd_id) return i end function Base.empty!(h::MutableBinaryHeap) empty!(h.nodes) empty!(h.node_map) return h end function Base.sizehint!(h::MutableBinaryHeap, s::Integer) sizehint!(h.nodes, s) sizehint!(h.node_map, s) return h end ##CHUNK 5 return el.value, el.handle end Base.pop!(h::MutableBinaryHeap{T}) where {T} = _binary_heap_pop!(h.ordering, h.nodes, h.node_map) """ update!{T}(h::MutableBinaryHeap{T}, i::Int, v::T) Replace the element at index `i` in heap `h` with `v`. This is equivalent to `h[i]=v`. """ function update!(h::MutableBinaryHeap{T}, i::Int, v) where T nodes = h.nodes nodemap = h.node_map ordering = h.ordering nd_id = nodemap[i] v0 = nodes[nd_id].value x = convert(T, v) nodes[nd_id] = MutableBinaryHeapNode(x, i) ##CHUNK 6 end function _heap_bubble_down!(ord::Ordering, nodes::Vector{MutableBinaryHeapNode{T}}, nodemap::Vector{Int}, nd_id::Int) where T @inbounds nd = nodes[nd_id] v::T = nd.value n = length(nodes) last_parent = n >> 1 swapped = true i = nd_id while swapped && i <= last_parent il = i << 1 if il < n # contains both left and right children ir = il + 1 ##CHUNK 7 else swapped = false end end end if i != nd_id @inbounds nodes[i] = nd @inbounds nodemap[nd.handle] = i end end function _binary_heap_pop!(ord::Ordering, nodes::Vector{MutableBinaryHeapNode{T}}, nodemap::Vector{Int}, nd_id::Int=1) where T # extract node rt = nodes[nd_id] v = rt.value @inbounds nodemap[rt.handle] = 0 ##CHUNK 8 ################################################# # # Binary Heap type and constructors # ################################################# mutable struct MutableBinaryHeap{T, O <: Base.Ordering} <: AbstractMutableHeap{T, Int} ordering::O nodes::Vector{MutableBinaryHeapNode{T}} node_map::Vector{Int} function MutableBinaryHeap{T}(ordering::Base.Ordering) where T nodes = Vector{MutableBinaryHeapNode{T}}() node_map = Vector{Int}() new{T, typeof(ordering)}(ordering, nodes, node_map) end ##CHUNK 9 end function _binary_heap_pop!(ord::Ordering, nodes::Vector{MutableBinaryHeapNode{T}}, nodemap::Vector{Int}, nd_id::Int=1) where T # extract node rt = nodes[nd_id] v = rt.value @inbounds nodemap[rt.handle] = 0 # if node-to-remove is at end, we can just pop it # the same applies to 1-element heaps that are empty after removing the last element if nd_id == lastindex(nodes) pop!(nodes) else # move the last node to the position of the node-to-remove @inbounds nodes[nd_id] = new_rt = nodes[end] pop!(nodes) @inbounds nodemap[new_rt.handle] = nd_id
187
197
Distributions.jl
72
function mean(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval)) return μ end
function mean(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval)) return μ end
[ 187, 197 ]
function mean(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval)) return μ end
function mean(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval)) return μ end
mean
187
197
src/censored.jl
#CURRENT FILE: Distributions.jl/src/censored.jl ##CHUNK 1 # and τ is d₀ truncated to [l, u] function mean(d::LeftCensored) lower = d.lower log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) μ = xexpy(lower, log_prob_lower) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::RightCensored) upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) μ = xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function var(d::LeftCensored) lower = d.lower log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) ##CHUNK 2 log_prob_upper_inc = log_prob_upper xlogx_pu = 0 end log_prob_interval = log1mexp(log_prob_upper) entropy_bound = -xexpx(log_prob_upper_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pu return entropy_interval + entropy_bound end function entropy(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower_inc = logcdf(d0, lower) log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) logpu = logpdf(d0, upper) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) ##CHUNK 3 log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval) return v end function var(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval)) ##CHUNK 4 d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower_inc = logcdf(d0, lower) log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) logpu = logpdf(d0, upper) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pl = xexpx(logpl) xlogx_pu = xexpx(logpu) else log_prob_lower = log_prob_lower_inc log_prob_upper_inc = log_prob_upper xlogx_pl = xlogx_pu = 0 end log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) entropy_bound = -(xexpx(log_prob_lower_inc) + xexpx(log_prob_upper_inc)) dtrunc = _to_truncated(d) ##CHUNK 5 end function entropy(d::RightCensored) d0 = d.uncensored upper = d.upper log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpu = logpdf(d0, upper) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pu = xexpx(logpu) else log_prob_upper_inc = log_prob_upper xlogx_pu = 0 end log_prob_interval = log1mexp(log_prob_upper) entropy_bound = -xexpx(log_prob_upper_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pu return entropy_interval + entropy_bound end function entropy(d::Censored) ##CHUNK 6 upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) μ = xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function var(d::LeftCensored) lower = d.lower log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(lower, log_prob_lower) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(lower - μ), log_prob_lower) + xexpy(v_interval, log_prob_interval) return v end function var(d::RightCensored) upper = d.upper ##CHUNK 7 # ) / P_{x ~ d₀}(l ≤ x ≤ u), # where H[τ] is the entropy of τ. function entropy(d::LeftCensored) d0 = d.uncensored lower = d.lower log_prob_lower_inc = logcdf(d0, lower) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) xlogx_pl = xexpx(logpl) else log_prob_lower = log_prob_lower_inc xlogx_pl = 0 end log_prob_interval = log1mexp(log_prob_lower) entropy_bound = -xexpx(log_prob_lower_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl return entropy_interval + entropy_bound ##CHUNK 8 log_prob_interval = log1mexp(log_prob_lower) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(lower, log_prob_lower) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(lower - μ), log_prob_lower) + xexpy(v_interval, log_prob_interval) return v end function var(d::RightCensored) upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval) return v end function var(d::Censored) ##CHUNK 9 d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval)) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = (xexpy(abs2(lower - μ), log_prob_lower) + xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval)) return v end # this expectation also uses the following relation: # 𝔼_{x ~ τ}[-log d(x)] = H[τ] - log P_{x ~ d₀}(l ≤ x ≤ u) # + (P_{x ~ d₀}(x = l) (log P_{x ~ d₀}(x = l) - log P_{x ~ d₀}(x ≤ l)) + # P_{x ~ d₀}(x = u) (log P_{x ~ d₀}(x = u) - log P_{x ~ d₀}(x ≥ u)) ##CHUNK 10 xlogx_pl = xexpx(logpl) else log_prob_lower = log_prob_lower_inc xlogx_pl = 0 end log_prob_interval = log1mexp(log_prob_lower) entropy_bound = -xexpx(log_prob_lower_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl return entropy_interval + entropy_bound end function entropy(d::RightCensored) d0 = d.uncensored upper = d.upper log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpu = logpdf(d0, upper) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pu = xexpx(logpu) else
199
209
Distributions.jl
73
function var(d::LeftCensored) lower = d.lower log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(lower, log_prob_lower) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(lower - μ), log_prob_lower) + xexpy(v_interval, log_prob_interval) return v end
function var(d::LeftCensored) lower = d.lower log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(lower, log_prob_lower) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(lower - μ), log_prob_lower) + xexpy(v_interval, log_prob_interval) return v end
[ 199, 209 ]
function var(d::LeftCensored) lower = d.lower log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(lower, log_prob_lower) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(lower - μ), log_prob_lower) + xexpy(v_interval, log_prob_interval) return v end
function var(d::LeftCensored) lower = d.lower log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(lower, log_prob_lower) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(lower - μ), log_prob_lower) + xexpy(v_interval, log_prob_interval) return v end
var
199
209
src/censored.jl
#CURRENT FILE: Distributions.jl/src/censored.jl ##CHUNK 1 log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval)) return μ end function var(d::RightCensored) upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval) return v end function var(d::Censored) ##CHUNK 2 log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval) return v end function var(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval)) ##CHUNK 3 upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) μ = xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval)) return μ end function var(d::RightCensored) upper = d.upper ##CHUNK 4 # and τ is d₀ truncated to [l, u] function mean(d::LeftCensored) lower = d.lower log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) μ = xexpy(lower, log_prob_lower) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::RightCensored) upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) μ = xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper ##CHUNK 5 d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval)) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = (xexpy(abs2(lower - μ), log_prob_lower) + xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval)) return v end # this expectation also uses the following relation: # 𝔼_{x ~ τ}[-log d(x)] = H[τ] - log P_{x ~ d₀}(l ≤ x ≤ u) # + (P_{x ~ d₀}(x = l) (log P_{x ~ d₀}(x = l) - log P_{x ~ d₀}(x ≤ l)) + # P_{x ~ d₀}(x = u) (log P_{x ~ d₀}(x = u) - log P_{x ~ d₀}(x ≥ u)) ##CHUNK 6 log_prob_upper_inc = log_prob_upper xlogx_pu = 0 end log_prob_interval = log1mexp(log_prob_upper) entropy_bound = -xexpx(log_prob_upper_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pu return entropy_interval + entropy_bound end function entropy(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower_inc = logcdf(d0, lower) log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) logpu = logpdf(d0, upper) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) ##CHUNK 7 end function entropy(d::RightCensored) d0 = d.uncensored upper = d.upper log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpu = logpdf(d0, upper) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pu = xexpx(logpu) else log_prob_upper_inc = log_prob_upper xlogx_pu = 0 end log_prob_interval = log1mexp(log_prob_upper) entropy_bound = -xexpx(log_prob_upper_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pu return entropy_interval + entropy_bound end function entropy(d::Censored) ##CHUNK 8 # ) / P_{x ~ d₀}(l ≤ x ≤ u), # where H[τ] is the entropy of τ. function entropy(d::LeftCensored) d0 = d.uncensored lower = d.lower log_prob_lower_inc = logcdf(d0, lower) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) xlogx_pl = xexpx(logpl) else log_prob_lower = log_prob_lower_inc xlogx_pl = 0 end log_prob_interval = log1mexp(log_prob_lower) entropy_bound = -xexpx(log_prob_lower_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl return entropy_interval + entropy_bound ##CHUNK 9 #### Statistics quantile(d::Censored, p::Real) = _clamp(quantile(d.uncensored, p), d.lower, d.upper) median(d::Censored) = _clamp(median(d.uncensored), d.lower, d.upper) # the expectations use the following relation: # 𝔼_{x ~ d}[h(x)] = P_{x ~ d₀}(x < l) h(l) + P_{x ~ d₀}(x > u) h(u) # + P_{x ~ d₀}(l ≤ x ≤ u) 𝔼_{x ~ τ}[h(x)], # where d₀ is the uncensored distribution, d is d₀ censored to [l, u], # and τ is d₀ truncated to [l, u] function mean(d::LeftCensored) lower = d.lower log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) μ = xexpy(lower, log_prob_lower) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::RightCensored) ##CHUNK 10 xlogx_pl = xexpx(logpl) else log_prob_lower = log_prob_lower_inc xlogx_pl = 0 end log_prob_interval = log1mexp(log_prob_lower) entropy_bound = -xexpx(log_prob_lower_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl return entropy_interval + entropy_bound end function entropy(d::RightCensored) d0 = d.uncensored upper = d.upper log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpu = logpdf(d0, upper) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pu = xexpx(logpu) else
210
220
Distributions.jl
74
function var(d::RightCensored) upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval) return v end
function var(d::RightCensored) upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval) return v end
[ 210, 220 ]
function var(d::RightCensored) upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval) return v end
function var(d::RightCensored) upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval) return v end
var
210
220
src/censored.jl
#CURRENT FILE: Distributions.jl/src/censored.jl ##CHUNK 1 log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval)) return μ end function var(d::LeftCensored) lower = d.lower log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(lower, log_prob_lower) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(lower - μ), log_prob_lower) + xexpy(v_interval, log_prob_interval) return v end function var(d::Censored) ##CHUNK 2 log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(lower, log_prob_lower) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(lower - μ), log_prob_lower) + xexpy(v_interval, log_prob_interval) return v end function var(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval)) ##CHUNK 3 upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) μ = xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval)) return μ end function var(d::LeftCensored) lower = d.lower ##CHUNK 4 # and τ is d₀ truncated to [l, u] function mean(d::LeftCensored) lower = d.lower log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) μ = xexpy(lower, log_prob_lower) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::RightCensored) upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) μ = xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper ##CHUNK 5 end function entropy(d::RightCensored) d0 = d.uncensored upper = d.upper log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpu = logpdf(d0, upper) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pu = xexpx(logpu) else log_prob_upper_inc = log_prob_upper xlogx_pu = 0 end log_prob_interval = log1mexp(log_prob_upper) entropy_bound = -xexpx(log_prob_upper_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pu return entropy_interval + entropy_bound end function entropy(d::Censored) ##CHUNK 6 d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval)) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = (xexpy(abs2(lower - μ), log_prob_lower) + xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval)) return v end # this expectation also uses the following relation: # 𝔼_{x ~ τ}[-log d(x)] = H[τ] - log P_{x ~ d₀}(l ≤ x ≤ u) # + (P_{x ~ d₀}(x = l) (log P_{x ~ d₀}(x = l) - log P_{x ~ d₀}(x ≤ l)) + # P_{x ~ d₀}(x = u) (log P_{x ~ d₀}(x = u) - log P_{x ~ d₀}(x ≥ u)) ##CHUNK 7 log_prob_upper_inc = log_prob_upper xlogx_pu = 0 end log_prob_interval = log1mexp(log_prob_upper) entropy_bound = -xexpx(log_prob_upper_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pu return entropy_interval + entropy_bound end function entropy(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower_inc = logcdf(d0, lower) log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) logpu = logpdf(d0, upper) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) ##CHUNK 8 xlogx_pl = xexpx(logpl) else log_prob_lower = log_prob_lower_inc xlogx_pl = 0 end log_prob_interval = log1mexp(log_prob_lower) entropy_bound = -xexpx(log_prob_lower_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl return entropy_interval + entropy_bound end function entropy(d::RightCensored) d0 = d.uncensored upper = d.upper log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpu = logpdf(d0, upper) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pu = xexpx(logpu) else ##CHUNK 9 #### Statistics quantile(d::Censored, p::Real) = _clamp(quantile(d.uncensored, p), d.lower, d.upper) median(d::Censored) = _clamp(median(d.uncensored), d.lower, d.upper) # the expectations use the following relation: # 𝔼_{x ~ d}[h(x)] = P_{x ~ d₀}(x < l) h(l) + P_{x ~ d₀}(x > u) h(u) # + P_{x ~ d₀}(l ≤ x ≤ u) 𝔼_{x ~ τ}[h(x)], # where d₀ is the uncensored distribution, d is d₀ censored to [l, u], # and τ is d₀ truncated to [l, u] function mean(d::LeftCensored) lower = d.lower log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) μ = xexpy(lower, log_prob_lower) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::RightCensored) ##CHUNK 10 xlogx_pl = xexpx(logpl) xlogx_pu = xexpx(logpu) else log_prob_lower = log_prob_lower_inc log_prob_upper_inc = log_prob_upper xlogx_pl = xlogx_pu = 0 end log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) entropy_bound = -(xexpx(log_prob_lower_inc) + xexpx(log_prob_upper_inc)) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl + xlogx_pu return entropy_interval + entropy_bound end #### Evaluation function pdf(d::Censored, x::Real) d0 = d.uncensored lower = d.lower
221
236
Distributions.jl
75
function var(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval)) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = (xexpy(abs2(lower - μ), log_prob_lower) + xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval)) return v end
function var(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval)) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = (xexpy(abs2(lower - μ), log_prob_lower) + xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval)) return v end
[ 221, 236 ]
function var(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval)) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = (xexpy(abs2(lower - μ), log_prob_lower) + xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval)) return v end
function var(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval)) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = (xexpy(abs2(lower - μ), log_prob_lower) + xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval)) return v end
var
221
236
src/censored.jl
#CURRENT FILE: Distributions.jl/src/censored.jl ##CHUNK 1 upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) μ = xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval)) return μ end function var(d::LeftCensored) lower = d.lower ##CHUNK 2 log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(lower, log_prob_lower) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(lower - μ), log_prob_lower) + xexpy(v_interval, log_prob_interval) return v end function var(d::RightCensored) upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval) return v end ##CHUNK 3 log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval)) return μ end function var(d::LeftCensored) lower = d.lower log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(lower, log_prob_lower) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(lower - μ), log_prob_lower) + xexpy(v_interval, log_prob_interval) return v end function var(d::RightCensored) ##CHUNK 4 # and τ is d₀ truncated to [l, u] function mean(d::LeftCensored) lower = d.lower log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) μ = xexpy(lower, log_prob_lower) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::RightCensored) upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) μ = xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper ##CHUNK 5 lower = d.lower log_prob_lower_inc = logcdf(d0, lower) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) xlogx_pl = xexpx(logpl) else log_prob_lower = log_prob_lower_inc xlogx_pl = 0 end log_prob_interval = log1mexp(log_prob_lower) entropy_bound = -xexpx(log_prob_lower_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl return entropy_interval + entropy_bound end function entropy(d::RightCensored) d0 = d.uncensored upper = d.upper log_prob_upper = logccdf(d0, upper) ##CHUNK 6 if value_support(typeof(d0)) === Discrete logpu = logpdf(d0, upper) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pu = xexpx(logpu) else log_prob_upper_inc = log_prob_upper xlogx_pu = 0 end log_prob_interval = log1mexp(log_prob_upper) entropy_bound = -xexpx(log_prob_upper_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pu return entropy_interval + entropy_bound end function entropy(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower_inc = logcdf(d0, lower) log_prob_upper = logccdf(d0, upper) ##CHUNK 7 xlogx_pl = xlogx_pu = 0 end log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) entropy_bound = -(xexpx(log_prob_lower_inc) + xexpx(log_prob_upper_inc)) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl + xlogx_pu return entropy_interval + entropy_bound end #### Evaluation function pdf(d::Censored, x::Real) d0 = d.uncensored lower = d.lower upper = d.upper px = float(pdf(d0, x)) return if _in_open_interval(x, lower, upper) px elseif x == lower ##CHUNK 8 log_prob_interval = log1mexp(log_prob_lower) entropy_bound = -xexpx(log_prob_lower_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl return entropy_interval + entropy_bound end function entropy(d::RightCensored) d0 = d.uncensored upper = d.upper log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpu = logpdf(d0, upper) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pu = xexpx(logpu) else log_prob_upper_inc = log_prob_upper xlogx_pu = 0 end log_prob_interval = log1mexp(log_prob_upper) entropy_bound = -xexpx(log_prob_upper_inc) ##CHUNK 9 if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) logpu = logpdf(d0, upper) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pl = xexpx(logpl) xlogx_pu = xexpx(logpu) else log_prob_lower = log_prob_lower_inc log_prob_upper_inc = log_prob_upper xlogx_pl = xlogx_pu = 0 end log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) entropy_bound = -(xexpx(log_prob_lower_inc) + xexpx(log_prob_upper_inc)) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl + xlogx_pu return entropy_interval + entropy_bound end ##CHUNK 10 #### Statistics quantile(d::Censored, p::Real) = _clamp(quantile(d.uncensored, p), d.lower, d.upper) median(d::Censored) = _clamp(median(d.uncensored), d.lower, d.upper) # the expectations use the following relation: # 𝔼_{x ~ d}[h(x)] = P_{x ~ d₀}(x < l) h(l) + P_{x ~ d₀}(x > u) h(u) # + P_{x ~ d₀}(l ≤ x ≤ u) 𝔼_{x ~ τ}[h(x)], # where d₀ is the uncensored distribution, d is d₀ censored to [l, u], # and τ is d₀ truncated to [l, u] function mean(d::LeftCensored) lower = d.lower log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) μ = xexpy(lower, log_prob_lower) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::RightCensored)
245
262
Distributions.jl
76
function entropy(d::LeftCensored) d0 = d.uncensored lower = d.lower log_prob_lower_inc = logcdf(d0, lower) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) xlogx_pl = xexpx(logpl) else log_prob_lower = log_prob_lower_inc xlogx_pl = 0 end log_prob_interval = log1mexp(log_prob_lower) entropy_bound = -xexpx(log_prob_lower_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl return entropy_interval + entropy_bound end
function entropy(d::LeftCensored) d0 = d.uncensored lower = d.lower log_prob_lower_inc = logcdf(d0, lower) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) xlogx_pl = xexpx(logpl) else log_prob_lower = log_prob_lower_inc xlogx_pl = 0 end log_prob_interval = log1mexp(log_prob_lower) entropy_bound = -xexpx(log_prob_lower_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl return entropy_interval + entropy_bound end
[ 245, 262 ]
function entropy(d::LeftCensored) d0 = d.uncensored lower = d.lower log_prob_lower_inc = logcdf(d0, lower) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) xlogx_pl = xexpx(logpl) else log_prob_lower = log_prob_lower_inc xlogx_pl = 0 end log_prob_interval = log1mexp(log_prob_lower) entropy_bound = -xexpx(log_prob_lower_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl return entropy_interval + entropy_bound end
function entropy(d::LeftCensored) d0 = d.uncensored lower = d.lower log_prob_lower_inc = logcdf(d0, lower) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) xlogx_pl = xexpx(logpl) else log_prob_lower = log_prob_lower_inc xlogx_pl = 0 end log_prob_interval = log1mexp(log_prob_lower) entropy_bound = -xexpx(log_prob_lower_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl return entropy_interval + entropy_bound end
entropy
245
262
src/censored.jl
#CURRENT FILE: Distributions.jl/src/censored.jl ##CHUNK 1 log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pu = xexpx(logpu) else log_prob_upper_inc = log_prob_upper xlogx_pu = 0 end log_prob_interval = log1mexp(log_prob_upper) entropy_bound = -xexpx(log_prob_upper_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pu return entropy_interval + entropy_bound end function entropy(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower_inc = logcdf(d0, lower) log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) ##CHUNK 2 log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) entropy_bound = -(xexpx(log_prob_lower_inc) + xexpx(log_prob_upper_inc)) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl + xlogx_pu return entropy_interval + entropy_bound end #### Evaluation function pdf(d::Censored, x::Real) d0 = d.uncensored lower = d.lower upper = d.upper px = float(pdf(d0, x)) return if _in_open_interval(x, lower, upper) px elseif x == lower x == upper ? one(px) : oftype(px, cdf(d0, x)) elseif x == upper ##CHUNK 3 return entropy_interval + entropy_bound end function entropy(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower_inc = logcdf(d0, lower) log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) logpu = logpdf(d0, upper) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pl = xexpx(logpl) xlogx_pu = xexpx(logpu) else log_prob_lower = log_prob_lower_inc log_prob_upper_inc = log_prob_upper xlogx_pl = xlogx_pu = 0 end ##CHUNK 4 logpu = logpdf(d0, upper) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pl = xexpx(logpl) xlogx_pu = xexpx(logpu) else log_prob_lower = log_prob_lower_inc log_prob_upper_inc = log_prob_upper xlogx_pl = xlogx_pu = 0 end log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) entropy_bound = -(xexpx(log_prob_lower_inc) + xexpx(log_prob_upper_inc)) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl + xlogx_pu return entropy_interval + entropy_bound end #### Evaluation ##CHUNK 5 upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) μ = xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval)) return μ end function var(d::LeftCensored) lower = d.lower ##CHUNK 6 upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval) return v end function var(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + ##CHUNK 7 # P_{x ~ d₀}(x = u) (log P_{x ~ d₀}(x = u) - log P_{x ~ d₀}(x ≥ u)) # ) / P_{x ~ d₀}(l ≤ x ≤ u), # where H[τ] is the entropy of τ. function entropy(d::RightCensored) d0 = d.uncensored upper = d.upper log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpu = logpdf(d0, upper) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pu = xexpx(logpu) else log_prob_upper_inc = log_prob_upper xlogx_pu = 0 end log_prob_interval = log1mexp(log_prob_upper) entropy_bound = -xexpx(log_prob_upper_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pu ##CHUNK 8 function var(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval)) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = (xexpy(abs2(lower - μ), log_prob_lower) + xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval)) return v end # this expectation also uses the following relation: # 𝔼_{x ~ τ}[-log d(x)] = H[τ] - log P_{x ~ d₀}(l ≤ x ≤ u) # + (P_{x ~ d₀}(x = l) (log P_{x ~ d₀}(x = l) - log P_{x ~ d₀}(x ≤ l)) + ##CHUNK 9 log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(lower, log_prob_lower) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(lower - μ), log_prob_lower) + xexpy(v_interval, log_prob_interval) return v end function var(d::RightCensored) upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval) return v end ##CHUNK 10 # and τ is d₀ truncated to [l, u] function mean(d::LeftCensored) lower = d.lower log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) μ = xexpy(lower, log_prob_lower) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::RightCensored) upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) μ = xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper
263
280
Distributions.jl
77
function entropy(d::RightCensored) d0 = d.uncensored upper = d.upper log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpu = logpdf(d0, upper) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pu = xexpx(logpu) else log_prob_upper_inc = log_prob_upper xlogx_pu = 0 end log_prob_interval = log1mexp(log_prob_upper) entropy_bound = -xexpx(log_prob_upper_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pu return entropy_interval + entropy_bound end
function entropy(d::RightCensored) d0 = d.uncensored upper = d.upper log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpu = logpdf(d0, upper) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pu = xexpx(logpu) else log_prob_upper_inc = log_prob_upper xlogx_pu = 0 end log_prob_interval = log1mexp(log_prob_upper) entropy_bound = -xexpx(log_prob_upper_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pu return entropy_interval + entropy_bound end
[ 263, 280 ]
function entropy(d::RightCensored) d0 = d.uncensored upper = d.upper log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpu = logpdf(d0, upper) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pu = xexpx(logpu) else log_prob_upper_inc = log_prob_upper xlogx_pu = 0 end log_prob_interval = log1mexp(log_prob_upper) entropy_bound = -xexpx(log_prob_upper_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pu return entropy_interval + entropy_bound end
function entropy(d::RightCensored) d0 = d.uncensored upper = d.upper log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpu = logpdf(d0, upper) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pu = xexpx(logpu) else log_prob_upper_inc = log_prob_upper xlogx_pu = 0 end log_prob_interval = log1mexp(log_prob_upper) entropy_bound = -xexpx(log_prob_upper_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pu return entropy_interval + entropy_bound end
entropy
263
280
src/censored.jl
#CURRENT FILE: Distributions.jl/src/censored.jl ##CHUNK 1 log_prob_lower = logsubexp(log_prob_lower_inc, logpl) xlogx_pl = xexpx(logpl) else log_prob_lower = log_prob_lower_inc xlogx_pl = 0 end log_prob_interval = log1mexp(log_prob_lower) entropy_bound = -xexpx(log_prob_lower_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl return entropy_interval + entropy_bound end function entropy(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower_inc = logcdf(d0, lower) log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) ##CHUNK 2 return entropy_interval + entropy_bound end function entropy(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower_inc = logcdf(d0, lower) log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) logpu = logpdf(d0, upper) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pl = xexpx(logpl) xlogx_pu = xexpx(logpu) else log_prob_lower = log_prob_lower_inc log_prob_upper_inc = log_prob_upper xlogx_pl = xlogx_pu = 0 end ##CHUNK 3 log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) entropy_bound = -(xexpx(log_prob_lower_inc) + xexpx(log_prob_upper_inc)) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl + xlogx_pu return entropy_interval + entropy_bound end #### Evaluation function pdf(d::Censored, x::Real) d0 = d.uncensored lower = d.lower upper = d.upper px = float(pdf(d0, x)) return if _in_open_interval(x, lower, upper) px elseif x == lower x == upper ? one(px) : oftype(px, cdf(d0, x)) elseif x == upper ##CHUNK 4 logpu = logpdf(d0, upper) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pl = xexpx(logpl) xlogx_pu = xexpx(logpu) else log_prob_lower = log_prob_lower_inc log_prob_upper_inc = log_prob_upper xlogx_pl = xlogx_pu = 0 end log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) entropy_bound = -(xexpx(log_prob_lower_inc) + xexpx(log_prob_upper_inc)) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl + xlogx_pu return entropy_interval + entropy_bound end #### Evaluation ##CHUNK 5 upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval) return v end function var(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + ##CHUNK 6 upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) μ = xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval)) return μ end function var(d::LeftCensored) lower = d.lower ##CHUNK 7 log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(lower, log_prob_lower) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(lower - μ), log_prob_lower) + xexpy(v_interval, log_prob_interval) return v end function var(d::RightCensored) upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval) return v end ##CHUNK 8 # and τ is d₀ truncated to [l, u] function mean(d::LeftCensored) lower = d.lower log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) μ = xexpy(lower, log_prob_lower) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::RightCensored) upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) μ = xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper ##CHUNK 9 function var(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval)) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = (xexpy(abs2(lower - μ), log_prob_lower) + xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval)) return v end # this expectation also uses the following relation: # 𝔼_{x ~ τ}[-log d(x)] = H[τ] - log P_{x ~ d₀}(l ≤ x ≤ u) # + (P_{x ~ d₀}(x = l) (log P_{x ~ d₀}(x = l) - log P_{x ~ d₀}(x ≤ l)) + ##CHUNK 10 # P_{x ~ d₀}(x = u) (log P_{x ~ d₀}(x = u) - log P_{x ~ d₀}(x ≥ u)) # ) / P_{x ~ d₀}(l ≤ x ≤ u), # where H[τ] is the entropy of τ. function entropy(d::LeftCensored) d0 = d.uncensored lower = d.lower log_prob_lower_inc = logcdf(d0, lower) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) xlogx_pl = xexpx(logpl) else log_prob_lower = log_prob_lower_inc xlogx_pl = 0 end log_prob_interval = log1mexp(log_prob_lower) entropy_bound = -xexpx(log_prob_lower_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl
281
304
Distributions.jl
78
function entropy(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower_inc = logcdf(d0, lower) log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) logpu = logpdf(d0, upper) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pl = xexpx(logpl) xlogx_pu = xexpx(logpu) else log_prob_lower = log_prob_lower_inc log_prob_upper_inc = log_prob_upper xlogx_pl = xlogx_pu = 0 end log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) entropy_bound = -(xexpx(log_prob_lower_inc) + xexpx(log_prob_upper_inc)) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl + xlogx_pu return entropy_interval + entropy_bound end
function entropy(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower_inc = logcdf(d0, lower) log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) logpu = logpdf(d0, upper) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pl = xexpx(logpl) xlogx_pu = xexpx(logpu) else log_prob_lower = log_prob_lower_inc log_prob_upper_inc = log_prob_upper xlogx_pl = xlogx_pu = 0 end log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) entropy_bound = -(xexpx(log_prob_lower_inc) + xexpx(log_prob_upper_inc)) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl + xlogx_pu return entropy_interval + entropy_bound end
[ 281, 304 ]
function entropy(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower_inc = logcdf(d0, lower) log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) logpu = logpdf(d0, upper) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pl = xexpx(logpl) xlogx_pu = xexpx(logpu) else log_prob_lower = log_prob_lower_inc log_prob_upper_inc = log_prob_upper xlogx_pl = xlogx_pu = 0 end log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) entropy_bound = -(xexpx(log_prob_lower_inc) + xexpx(log_prob_upper_inc)) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl + xlogx_pu return entropy_interval + entropy_bound end
function entropy(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower_inc = logcdf(d0, lower) log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) logpu = logpdf(d0, upper) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pl = xexpx(logpl) xlogx_pu = xexpx(logpu) else log_prob_lower = log_prob_lower_inc log_prob_upper_inc = log_prob_upper xlogx_pl = xlogx_pu = 0 end log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) entropy_bound = -(xexpx(log_prob_lower_inc) + xexpx(log_prob_upper_inc)) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl + xlogx_pu return entropy_interval + entropy_bound end
entropy
281
304
src/censored.jl
#CURRENT FILE: Distributions.jl/src/censored.jl ##CHUNK 1 return entropy_interval + entropy_bound end function entropy(d::RightCensored) d0 = d.uncensored upper = d.upper log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpu = logpdf(d0, upper) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pu = xexpx(logpu) else log_prob_upper_inc = log_prob_upper xlogx_pu = 0 end log_prob_interval = log1mexp(log_prob_upper) entropy_bound = -xexpx(log_prob_upper_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pu return entropy_interval + entropy_bound end ##CHUNK 2 log_prob_lower = logsubexp(log_prob_lower_inc, logpl) xlogx_pl = xexpx(logpl) else log_prob_lower = log_prob_lower_inc xlogx_pl = 0 end log_prob_interval = log1mexp(log_prob_lower) entropy_bound = -xexpx(log_prob_lower_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl return entropy_interval + entropy_bound end function entropy(d::RightCensored) d0 = d.uncensored upper = d.upper log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpu = logpdf(d0, upper) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pu = xexpx(logpu) ##CHUNK 3 else log_prob_upper_inc = log_prob_upper xlogx_pu = 0 end log_prob_interval = log1mexp(log_prob_upper) entropy_bound = -xexpx(log_prob_upper_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pu return entropy_interval + entropy_bound end #### Evaluation function pdf(d::Censored, x::Real) d0 = d.uncensored lower = d.lower upper = d.upper px = float(pdf(d0, x)) return if _in_open_interval(x, lower, upper) ##CHUNK 4 # P_{x ~ d₀}(x = u) (log P_{x ~ d₀}(x = u) - log P_{x ~ d₀}(x ≥ u)) # ) / P_{x ~ d₀}(l ≤ x ≤ u), # where H[τ] is the entropy of τ. function entropy(d::LeftCensored) d0 = d.uncensored lower = d.lower log_prob_lower_inc = logcdf(d0, lower) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) xlogx_pl = xexpx(logpl) else log_prob_lower = log_prob_lower_inc xlogx_pl = 0 end log_prob_interval = log1mexp(log_prob_lower) entropy_bound = -xexpx(log_prob_lower_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl ##CHUNK 5 upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) μ = xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval)) return μ end function var(d::LeftCensored) lower = d.lower ##CHUNK 6 upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval) return v end function var(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + ##CHUNK 7 function var(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(μ_interval, log_prob_interval)) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = (xexpy(abs2(lower - μ), log_prob_lower) + xexpy(abs2(upper - μ), log_prob_upper) + xexpy(v_interval, log_prob_interval)) return v end # this expectation also uses the following relation: # 𝔼_{x ~ τ}[-log d(x)] = H[τ] - log P_{x ~ d₀}(l ≤ x ≤ u) # + (P_{x ~ d₀}(x = l) (log P_{x ~ d₀}(x = l) - log P_{x ~ d₀}(x ≤ l)) + ##CHUNK 8 # and τ is d₀ truncated to [l, u] function mean(d::LeftCensored) lower = d.lower log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) μ = xexpy(lower, log_prob_lower) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::RightCensored) upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) μ = xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper ##CHUNK 9 log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval)) return μ end function var(d::LeftCensored) lower = d.lower log_prob_lower = _logcdf_noninclusive(d.uncensored, lower) log_prob_interval = log1mexp(log_prob_lower) dtrunc = _to_truncated(d) μ_interval = mean(dtrunc) μ = xexpy(lower, log_prob_lower) + xexpy(μ_interval, log_prob_interval) v_interval = var(dtrunc) + abs2(μ_interval - μ) v = xexpy(abs2(lower - μ), log_prob_lower) + xexpy(v_interval, log_prob_interval) return v end function var(d::RightCensored) ##CHUNK 10 log_prob_upper = upper === nothing ? zero(logpx) : oftype(logpx, _logccdf_inclusive(d0, upper)) logzero = oftype(logpx, -Inf) return sum(x) do xi _in_open_interval(xi, lower, upper) && return logpdf(d0, xi) xi == lower && return log_prob_lower xi == upper && return log_prob_upper return logzero end end function cdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = cdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else one(result)
309
327
Distributions.jl
79
function pdf(d::Censored, x::Real) d0 = d.uncensored lower = d.lower upper = d.upper px = float(pdf(d0, x)) return if _in_open_interval(x, lower, upper) px elseif x == lower x == upper ? one(px) : oftype(px, cdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(px, ccdf(d0, x) + px) else oftype(px, ccdf(d0, x)) end else # not in support zero(px) end end
function pdf(d::Censored, x::Real) d0 = d.uncensored lower = d.lower upper = d.upper px = float(pdf(d0, x)) return if _in_open_interval(x, lower, upper) px elseif x == lower x == upper ? one(px) : oftype(px, cdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(px, ccdf(d0, x) + px) else oftype(px, ccdf(d0, x)) end else # not in support zero(px) end end
[ 309, 327 ]
function pdf(d::Censored, x::Real) d0 = d.uncensored lower = d.lower upper = d.upper px = float(pdf(d0, x)) return if _in_open_interval(x, lower, upper) px elseif x == lower x == upper ? one(px) : oftype(px, cdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(px, ccdf(d0, x) + px) else oftype(px, ccdf(d0, x)) end else # not in support zero(px) end end
function pdf(d::Censored, x::Real) d0 = d.uncensored lower = d.lower upper = d.upper px = float(pdf(d0, x)) return if _in_open_interval(x, lower, upper) px elseif x == lower x == upper ? one(px) : oftype(px, cdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(px, ccdf(d0, x) + px) else oftype(px, ccdf(d0, x)) end else # not in support zero(px) end end
pdf
309
327
src/censored.jl
#FILE: Distributions.jl/src/truncate.jl ##CHUNK 1 end function logpdf(d::Truncated, x::Real) result = logpdf(d.untruncated, x) - d.logtp return _in_closed_interval(x, d.lower, d.upper) ? result : oftype(result, -Inf) end function cdf(d::Truncated, x::Real) result = clamp((cdf(d.untruncated, x) - d.lcdf) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x < d.lower zero(result) elseif d.upper !== nothing && x >= d.upper one(result) else result end end function logcdf(d::Truncated, x::Real) ##CHUNK 2 function ccdf(d::Truncated, x::Real) result = clamp((d.ucdf - cdf(d.untruncated, x)) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x <= d.lower one(result) elseif d.upper !== nothing && x > d.upper zero(result) else result end end function logccdf(d::Truncated, x::Real) result = logsubexp(logccdf(d.untruncated, x), log1p(-d.ucdf)) - d.logtp return if d.lower !== nothing && x <= d.lower zero(result) elseif d.upper !== nothing && x > d.upper oftype(result, -Inf) else result #FILE: Distributions.jl/src/quantilealgs.jl ##CHUNK 1 return T(minimum(d)) elseif p == 1 return T(maximum(d)) else return T(NaN) end end function cquantile_newton(d::ContinuousUnivariateDistribution, p::Real, xs::Real=mode(d), tol::Real=1e-12) x = xs + (ccdf(d, xs)-p) / pdf(d, xs) T = typeof(x) if 0 < p < 1 x0 = T(xs) while abs(x-x0) > max(abs(x),abs(x0)) * tol x0 = x x = x0 + (ccdf(d, x0)-p) / pdf(d, x0) end return x elseif p == 1 return T(minimum(d)) #CURRENT FILE: Distributions.jl/src/censored.jl ##CHUNK 1 d0 = d.uncensored lower = d.lower upper = d.upper logpx = logpdf(d0, x) return if _in_open_interval(x, lower, upper) logpx elseif x == lower x == upper ? zero(logpx) : oftype(logpx, logcdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(logpx, logaddexp(logccdf(d0, x), logpx)) else oftype(logpx, logccdf(d0, x)) end else # not in support oftype(logpx, -Inf) end end function loglikelihood(d::Censored, x::AbstractArray{<:Real}) ##CHUNK 2 d0 = d.uncensored lower = d.lower upper = d.upper logpx = logpdf(d0, first(x)) log_prob_lower = lower === nothing ? zero(logpx) : oftype(logpx, logcdf(d0, lower)) log_prob_upper = upper === nothing ? zero(logpx) : oftype(logpx, _logccdf_inclusive(d0, upper)) logzero = oftype(logpx, -Inf) return sum(x) do xi _in_open_interval(xi, lower, upper) && return logpdf(d0, xi) xi == lower && return log_prob_lower xi == upper && return log_prob_upper return logzero end end function cdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = cdf(d.uncensored, x) return if lower !== nothing && x < lower ##CHUNK 3 function ccdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = ccdf(d.uncensored, x) return if lower !== nothing && x < lower one(result) elseif upper === nothing || x < upper result else zero(result) end end function logccdf(d::Censored{<:Any,<:Any,T}, x::Real) where {T} lower = d.lower upper = d.upper result = logccdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) ##CHUNK 4 upper = d.upper result = logcdf(d.uncensored, x) return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper === nothing || x < d.upper result else zero(result) end end function ccdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = ccdf(d.uncensored, x) return if lower !== nothing && x < lower one(result) elseif upper === nothing || x < upper result else ##CHUNK 5 zero(result) elseif upper === nothing || x < upper result else one(result) end end function logcdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = logcdf(d.uncensored, x) return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper === nothing || x < d.upper result else zero(result) end end ##CHUNK 6 zero(result) end end function logccdf(d::Censored{<:Any,<:Any,T}, x::Real) where {T} lower = d.lower upper = d.upper result = logccdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else oftype(result, -Inf) end end #### Sampling ##CHUNK 7 oftype(logpx, logaddexp(logccdf(d0, x), logpx)) else oftype(logpx, logccdf(d0, x)) end else # not in support oftype(logpx, -Inf) end end function loglikelihood(d::Censored, x::AbstractArray{<:Real}) d0 = d.uncensored lower = d.lower upper = d.upper logpx = logpdf(d0, first(x)) log_prob_lower = lower === nothing ? zero(logpx) : oftype(logpx, logcdf(d0, lower)) log_prob_upper = upper === nothing ? zero(logpx) : oftype(logpx, _logccdf_inclusive(d0, upper)) logzero = oftype(logpx, -Inf) return sum(x) do xi _in_open_interval(xi, lower, upper) && return logpdf(d0, xi) xi == lower && return log_prob_lower
329
347
Distributions.jl
80
function logpdf(d::Censored, x::Real) d0 = d.uncensored lower = d.lower upper = d.upper logpx = logpdf(d0, x) return if _in_open_interval(x, lower, upper) logpx elseif x == lower x == upper ? zero(logpx) : oftype(logpx, logcdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(logpx, logaddexp(logccdf(d0, x), logpx)) else oftype(logpx, logccdf(d0, x)) end else # not in support oftype(logpx, -Inf) end end
function logpdf(d::Censored, x::Real) d0 = d.uncensored lower = d.lower upper = d.upper logpx = logpdf(d0, x) return if _in_open_interval(x, lower, upper) logpx elseif x == lower x == upper ? zero(logpx) : oftype(logpx, logcdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(logpx, logaddexp(logccdf(d0, x), logpx)) else oftype(logpx, logccdf(d0, x)) end else # not in support oftype(logpx, -Inf) end end
[ 329, 347 ]
function logpdf(d::Censored, x::Real) d0 = d.uncensored lower = d.lower upper = d.upper logpx = logpdf(d0, x) return if _in_open_interval(x, lower, upper) logpx elseif x == lower x == upper ? zero(logpx) : oftype(logpx, logcdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(logpx, logaddexp(logccdf(d0, x), logpx)) else oftype(logpx, logccdf(d0, x)) end else # not in support oftype(logpx, -Inf) end end
function logpdf(d::Censored, x::Real) d0 = d.uncensored lower = d.lower upper = d.upper logpx = logpdf(d0, x) return if _in_open_interval(x, lower, upper) logpx elseif x == lower x == upper ? zero(logpx) : oftype(logpx, logcdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(logpx, logaddexp(logccdf(d0, x), logpx)) else oftype(logpx, logccdf(d0, x)) end else # not in support oftype(logpx, -Inf) end end
logpdf
329
347
src/censored.jl
#FILE: Distributions.jl/src/quantilealgs.jl ##CHUNK 1 x0 = x x = x0 + exp(lp - logpdf(d,x0) + log1mexp(min(logcdf(d,x0)-lp,0))) end end return x elseif lp == -Inf return T(minimum(d)) elseif lp == 0 return T(maximum(d)) else return T(NaN) end end function invlogccdf_newton(d::ContinuousUnivariateDistribution, lp::Real, xs::Real=mode(d), tol::Real=1e-12) T = typeof(lp - logpdf(d,xs)) if -Inf < lp < 0 x0 = T(xs) if lp < logccdf(d,x0) x = x0 + exp(lp - logpdf(d,x0) + logexpm1(max(logccdf(d,x0)-lp,0))) #FILE: Distributions.jl/src/truncate.jl ##CHUNK 1 end function logpdf(d::Truncated, x::Real) result = logpdf(d.untruncated, x) - d.logtp return _in_closed_interval(x, d.lower, d.upper) ? result : oftype(result, -Inf) end function cdf(d::Truncated, x::Real) result = clamp((cdf(d.untruncated, x) - d.lcdf) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x < d.lower zero(result) elseif d.upper !== nothing && x >= d.upper one(result) else result end end function logcdf(d::Truncated, x::Real) #FILE: Distributions.jl/src/univariate/continuous/kumaraswamy.jl ##CHUNK 1 function logpdf(d::Kumaraswamy, x::Real) a, b = params(d) _x = clamp(x, 0, 1) # Ensures we can still get a value when outside the support y = log(a) + log(b) + xlogy(a - 1, _x) + xlog1py(b - 1, -_x^a) return x < 0 || x > 1 ? oftype(y, -Inf) : y end function ccdf(d::Kumaraswamy, x::Real) a, b = params(d) y = (1 - clamp(x, 0, 1)^a)^b return x < 0 ? one(y) : (x > 1 ? zero(y) : y) end cdf(d::Kumaraswamy, x::Real) = 1 - ccdf(d, x) function logccdf(d::Kumaraswamy, x::Real) a, b = params(d) y = b * log1p(-clamp(x, 0, 1)^a) return x < 0 ? zero(y) : (x > 1 ? oftype(y, -Inf) : y) #CURRENT FILE: Distributions.jl/src/censored.jl ##CHUNK 1 lower = d.lower upper = d.upper px = float(pdf(d0, x)) return if _in_open_interval(x, lower, upper) px elseif x == lower x == upper ? one(px) : oftype(px, cdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(px, ccdf(d0, x) + px) else oftype(px, ccdf(d0, x)) end else # not in support zero(px) end end function loglikelihood(d::Censored, x::AbstractArray{<:Real}) ##CHUNK 2 else oftype(px, ccdf(d0, x)) end else # not in support zero(px) end end function loglikelihood(d::Censored, x::AbstractArray{<:Real}) d0 = d.uncensored lower = d.lower upper = d.upper logpx = logpdf(d0, first(x)) log_prob_lower = lower === nothing ? zero(logpx) : oftype(logpx, logcdf(d0, lower)) log_prob_upper = upper === nothing ? zero(logpx) : oftype(logpx, _logccdf_inclusive(d0, upper)) logzero = oftype(logpx, -Inf) return sum(x) do xi _in_open_interval(xi, lower, upper) && return logpdf(d0, xi) xi == lower && return log_prob_lower ##CHUNK 3 d0 = d.uncensored lower = d.lower upper = d.upper logpx = logpdf(d0, first(x)) log_prob_lower = lower === nothing ? zero(logpx) : oftype(logpx, logcdf(d0, lower)) log_prob_upper = upper === nothing ? zero(logpx) : oftype(logpx, _logccdf_inclusive(d0, upper)) logzero = oftype(logpx, -Inf) return sum(x) do xi _in_open_interval(xi, lower, upper) && return logpdf(d0, xi) xi == lower && return log_prob_lower xi == upper && return log_prob_upper return logzero end end function cdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = cdf(d.uncensored, x) return if lower !== nothing && x < lower ##CHUNK 4 _logcdf_noninclusive(d::UnivariateDistribution, x) = logcdf(d, x) function _logcdf_noninclusive(d::DiscreteUnivariateDistribution, x) return logsubexp(logcdf(d, x), logpdf(d, x)) end _ccdf_inclusive(d::UnivariateDistribution, x) = ccdf(d, x) _ccdf_inclusive(d::DiscreteUnivariateDistribution, x) = ccdf(d, x) + pdf(d, x) _logccdf_inclusive(d::UnivariateDistribution, x) = logccdf(d, x) function _logccdf_inclusive(d::DiscreteUnivariateDistribution, x) return logaddexp(logccdf(d, x), logpdf(d, x)) end # like xlogx but for input on log scale, safe when x == -Inf function xexpx(x::Real) result = x * exp(x) return x == -Inf ? zero(result) : result end # x * exp(y) with correct limit for y == -Inf ##CHUNK 5 dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl + xlogx_pu return entropy_interval + entropy_bound end #### Evaluation function pdf(d::Censored, x::Real) d0 = d.uncensored lower = d.lower upper = d.upper px = float(pdf(d0, x)) return if _in_open_interval(x, lower, upper) px elseif x == lower x == upper ? one(px) : oftype(px, cdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(px, ccdf(d0, x) + px) ##CHUNK 6 function ccdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = ccdf(d.uncensored, x) return if lower !== nothing && x < lower one(result) elseif upper === nothing || x < upper result else zero(result) end end function logccdf(d::Censored{<:Any,<:Any,T}, x::Real) where {T} lower = d.lower upper = d.upper result = logccdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) ##CHUNK 7 zero(result) end end function logccdf(d::Censored{<:Any,<:Any,T}, x::Real) where {T} lower = d.lower upper = d.upper result = logccdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else oftype(result, -Inf) end end #### Sampling
349
363
Distributions.jl
81
function loglikelihood(d::Censored, x::AbstractArray{<:Real}) d0 = d.uncensored lower = d.lower upper = d.upper logpx = logpdf(d0, first(x)) log_prob_lower = lower === nothing ? zero(logpx) : oftype(logpx, logcdf(d0, lower)) log_prob_upper = upper === nothing ? zero(logpx) : oftype(logpx, _logccdf_inclusive(d0, upper)) logzero = oftype(logpx, -Inf) return sum(x) do xi _in_open_interval(xi, lower, upper) && return logpdf(d0, xi) xi == lower && return log_prob_lower xi == upper && return log_prob_upper return logzero end end
function loglikelihood(d::Censored, x::AbstractArray{<:Real}) d0 = d.uncensored lower = d.lower upper = d.upper logpx = logpdf(d0, first(x)) log_prob_lower = lower === nothing ? zero(logpx) : oftype(logpx, logcdf(d0, lower)) log_prob_upper = upper === nothing ? zero(logpx) : oftype(logpx, _logccdf_inclusive(d0, upper)) logzero = oftype(logpx, -Inf) return sum(x) do xi _in_open_interval(xi, lower, upper) && return logpdf(d0, xi) xi == lower && return log_prob_lower xi == upper && return log_prob_upper return logzero end end
[ 349, 363 ]
function loglikelihood(d::Censored, x::AbstractArray{<:Real}) d0 = d.uncensored lower = d.lower upper = d.upper logpx = logpdf(d0, first(x)) log_prob_lower = lower === nothing ? zero(logpx) : oftype(logpx, logcdf(d0, lower)) log_prob_upper = upper === nothing ? zero(logpx) : oftype(logpx, _logccdf_inclusive(d0, upper)) logzero = oftype(logpx, -Inf) return sum(x) do xi _in_open_interval(xi, lower, upper) && return logpdf(d0, xi) xi == lower && return log_prob_lower xi == upper && return log_prob_upper return logzero end end
function loglikelihood(d::Censored, x::AbstractArray{<:Real}) d0 = d.uncensored lower = d.lower upper = d.upper logpx = logpdf(d0, first(x)) log_prob_lower = lower === nothing ? zero(logpx) : oftype(logpx, logcdf(d0, lower)) log_prob_upper = upper === nothing ? zero(logpx) : oftype(logpx, _logccdf_inclusive(d0, upper)) logzero = oftype(logpx, -Inf) return sum(x) do xi _in_open_interval(xi, lower, upper) && return logpdf(d0, xi) xi == lower && return log_prob_lower xi == upper && return log_prob_upper return logzero end end
loglikelihood
349
363
src/censored.jl
#CURRENT FILE: Distributions.jl/src/censored.jl ##CHUNK 1 lower = d.lower upper = d.upper logpx = logpdf(d0, x) return if _in_open_interval(x, lower, upper) logpx elseif x == lower x == upper ? zero(logpx) : oftype(logpx, logcdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(logpx, logaddexp(logccdf(d0, x), logpx)) else oftype(logpx, logccdf(d0, x)) end else # not in support oftype(logpx, -Inf) end end function cdf(d::Censored, x::Real) ##CHUNK 2 else oftype(px, ccdf(d0, x)) end else # not in support zero(px) end end function logpdf(d::Censored, x::Real) d0 = d.uncensored lower = d.lower upper = d.upper logpx = logpdf(d0, x) return if _in_open_interval(x, lower, upper) logpx elseif x == lower x == upper ? zero(logpx) : oftype(logpx, logcdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(logpx, logaddexp(logccdf(d0, x), logpx)) ##CHUNK 3 lower = d.lower upper = d.upper px = float(pdf(d0, x)) return if _in_open_interval(x, lower, upper) px elseif x == lower x == upper ? one(px) : oftype(px, cdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(px, ccdf(d0, x) + px) else oftype(px, ccdf(d0, x)) end else # not in support zero(px) end end function logpdf(d::Censored, x::Real) d0 = d.uncensored ##CHUNK 4 else log_prob_upper_inc = log_prob_upper xlogx_pu = 0 end log_prob_interval = log1mexp(log_prob_upper) entropy_bound = -xexpx(log_prob_upper_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pu return entropy_interval + entropy_bound end function entropy(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower_inc = logcdf(d0, lower) log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) logpu = logpdf(d0, upper) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) ##CHUNK 5 log_prob_lower = logsubexp(log_prob_lower_inc, logpl) xlogx_pl = xexpx(logpl) else log_prob_lower = log_prob_lower_inc xlogx_pl = 0 end log_prob_interval = log1mexp(log_prob_lower) entropy_bound = -xexpx(log_prob_lower_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl return entropy_interval + entropy_bound end function entropy(d::RightCensored) d0 = d.uncensored upper = d.upper log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpu = logpdf(d0, upper) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pu = xexpx(logpu) ##CHUNK 6 dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pl + xlogx_pu return entropy_interval + entropy_bound end #### Evaluation function pdf(d::Censored, x::Real) d0 = d.uncensored lower = d.lower upper = d.upper px = float(pdf(d0, x)) return if _in_open_interval(x, lower, upper) px elseif x == lower x == upper ? one(px) : oftype(px, cdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(px, ccdf(d0, x) + px) ##CHUNK 7 function entropy(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower_inc = logcdf(d0, lower) log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpl = logpdf(d0, lower) logpu = logpdf(d0, upper) log_prob_lower = logsubexp(log_prob_lower_inc, logpl) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pl = xexpx(logpl) xlogx_pu = xexpx(logpu) else log_prob_lower = log_prob_lower_inc log_prob_upper_inc = log_prob_upper xlogx_pl = xlogx_pu = 0 end log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) entropy_bound = -(xexpx(log_prob_lower_inc) + xexpx(log_prob_upper_inc)) ##CHUNK 8 upper = d.upper log_prob_upper = logccdf(d.uncensored, upper) log_prob_interval = log1mexp(log_prob_upper) μ = xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval) return μ end function mean(d::Censored) d0 = d.uncensored lower = d.lower upper = d.upper log_prob_lower = _logcdf_noninclusive(d0, lower) log_prob_upper = logccdf(d0, upper) log_prob_interval = log1mexp(logaddexp(log_prob_lower, log_prob_upper)) μ = (xexpy(lower, log_prob_lower) + xexpy(upper, log_prob_upper) + xexpy(mean(_to_truncated(d)), log_prob_interval)) return μ end function var(d::LeftCensored) lower = d.lower ##CHUNK 9 else oftype(logpx, logccdf(d0, x)) end else # not in support oftype(logpx, -Inf) end end function cdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = cdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else one(result) end ##CHUNK 10 return entropy_interval + entropy_bound end function entropy(d::RightCensored) d0 = d.uncensored upper = d.upper log_prob_upper = logccdf(d0, upper) if value_support(typeof(d0)) === Discrete logpu = logpdf(d0, upper) log_prob_upper_inc = logaddexp(log_prob_upper, logpu) xlogx_pu = xexpx(logpu) else log_prob_upper_inc = log_prob_upper xlogx_pu = 0 end log_prob_interval = log1mexp(log_prob_upper) entropy_bound = -xexpx(log_prob_upper_inc) dtrunc = _to_truncated(d) entropy_interval = xexpy(entropy(dtrunc), log_prob_interval) - xexpx(log_prob_interval) + xlogx_pu return entropy_interval + entropy_bound end
365
376
Distributions.jl
82
function cdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = cdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else one(result) end end
function cdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = cdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else one(result) end end
[ 365, 376 ]
function cdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = cdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else one(result) end end
function cdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = cdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else one(result) end end
cdf
365
376
src/censored.jl
#FILE: Distributions.jl/src/truncate.jl ##CHUNK 1 return if d.lower !== nothing && x < d.lower zero(result) elseif d.upper !== nothing && x >= d.upper one(result) else result end end function logcdf(d::Truncated, x::Real) result = logsubexp(logcdf(d.untruncated, x), d.loglcdf) - d.logtp return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper !== nothing && x >= d.upper zero(result) else result end end ##CHUNK 2 function ccdf(d::Truncated, x::Real) result = clamp((d.ucdf - cdf(d.untruncated, x)) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x <= d.lower one(result) elseif d.upper !== nothing && x > d.upper zero(result) else result end end function logccdf(d::Truncated, x::Real) result = logsubexp(logccdf(d.untruncated, x), log1p(-d.ucdf)) - d.logtp return if d.lower !== nothing && x <= d.lower zero(result) elseif d.upper !== nothing && x > d.upper oftype(result, -Inf) else result ##CHUNK 3 result = logsubexp(logcdf(d.untruncated, x), d.loglcdf) - d.logtp return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper !== nothing && x >= d.upper zero(result) else result end end function ccdf(d::Truncated, x::Real) result = clamp((d.ucdf - cdf(d.untruncated, x)) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x <= d.lower one(result) elseif d.upper !== nothing && x > d.upper zero(result) else result end ##CHUNK 4 end function logpdf(d::Truncated, x::Real) result = logpdf(d.untruncated, x) - d.logtp return _in_closed_interval(x, d.lower, d.upper) ? result : oftype(result, -Inf) end function cdf(d::Truncated, x::Real) result = clamp((cdf(d.untruncated, x) - d.lcdf) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x < d.lower zero(result) elseif d.upper !== nothing && x >= d.upper one(result) else result end end function logcdf(d::Truncated, x::Real) #CURRENT FILE: Distributions.jl/src/censored.jl ##CHUNK 1 return logzero end end function logcdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = logcdf(d.uncensored, x) return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper === nothing || x < d.upper result else zero(result) end end function ccdf(d::Censored, x::Real) lower = d.lower ##CHUNK 2 oftype(result, -Inf) elseif d.upper === nothing || x < d.upper result else zero(result) end end function ccdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = ccdf(d.uncensored, x) return if lower !== nothing && x < lower one(result) elseif upper === nothing || x < upper result else zero(result) end end ##CHUNK 3 upper = d.upper result = ccdf(d.uncensored, x) return if lower !== nothing && x < lower one(result) elseif upper === nothing || x < upper result else zero(result) end end function logccdf(d::Censored{<:Any,<:Any,T}, x::Real) where {T} lower = d.lower upper = d.upper result = logccdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else ##CHUNK 4 function logccdf(d::Censored{<:Any,<:Any,T}, x::Real) where {T} lower = d.lower upper = d.upper result = logccdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else oftype(result, -Inf) end end #### Sampling rand(rng::AbstractRNG, d::Censored) = _clamp(rand(rng, d.uncensored), d.lower, d.upper) ##CHUNK 5 lower = d.lower upper = d.upper px = float(pdf(d0, x)) return if _in_open_interval(x, lower, upper) px elseif x == lower x == upper ? one(px) : oftype(px, cdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(px, ccdf(d0, x) + px) else oftype(px, ccdf(d0, x)) end else # not in support zero(px) end end function logpdf(d::Censored, x::Real) d0 = d.uncensored ##CHUNK 6 lower = d.lower upper = d.upper logpx = logpdf(d0, first(x)) log_prob_lower = lower === nothing ? zero(logpx) : oftype(logpx, logcdf(d0, lower)) log_prob_upper = upper === nothing ? zero(logpx) : oftype(logpx, _logccdf_inclusive(d0, upper)) logzero = oftype(logpx, -Inf) return sum(x) do xi _in_open_interval(xi, lower, upper) && return logpdf(d0, xi) xi == lower && return log_prob_lower xi == upper && return log_prob_upper return logzero end end function logcdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = logcdf(d.uncensored, x) return if d.lower !== nothing && x < d.lower
378
389
Distributions.jl
83
function logcdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = logcdf(d.uncensored, x) return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper === nothing || x < d.upper result else zero(result) end end
function logcdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = logcdf(d.uncensored, x) return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper === nothing || x < d.upper result else zero(result) end end
[ 378, 389 ]
function logcdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = logcdf(d.uncensored, x) return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper === nothing || x < d.upper result else zero(result) end end
function logcdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = logcdf(d.uncensored, x) return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper === nothing || x < d.upper result else zero(result) end end
logcdf
378
389
src/censored.jl
#FILE: Distributions.jl/src/truncate.jl ##CHUNK 1 function ccdf(d::Truncated, x::Real) result = clamp((d.ucdf - cdf(d.untruncated, x)) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x <= d.lower one(result) elseif d.upper !== nothing && x > d.upper zero(result) else result end end function logccdf(d::Truncated, x::Real) result = logsubexp(logccdf(d.untruncated, x), log1p(-d.ucdf)) - d.logtp return if d.lower !== nothing && x <= d.lower zero(result) elseif d.upper !== nothing && x > d.upper oftype(result, -Inf) else result ##CHUNK 2 return if d.lower !== nothing && x < d.lower zero(result) elseif d.upper !== nothing && x >= d.upper one(result) else result end end function logcdf(d::Truncated, x::Real) result = logsubexp(logcdf(d.untruncated, x), d.loglcdf) - d.logtp return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper !== nothing && x >= d.upper zero(result) else result end end ##CHUNK 3 result = logsubexp(logcdf(d.untruncated, x), d.loglcdf) - d.logtp return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper !== nothing && x >= d.upper zero(result) else result end end function ccdf(d::Truncated, x::Real) result = clamp((d.ucdf - cdf(d.untruncated, x)) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x <= d.lower one(result) elseif d.upper !== nothing && x > d.upper zero(result) else result end ##CHUNK 4 end function logpdf(d::Truncated, x::Real) result = logpdf(d.untruncated, x) - d.logtp return _in_closed_interval(x, d.lower, d.upper) ? result : oftype(result, -Inf) end function cdf(d::Truncated, x::Real) result = clamp((cdf(d.untruncated, x) - d.lcdf) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x < d.lower zero(result) elseif d.upper !== nothing && x >= d.upper one(result) else result end end function logcdf(d::Truncated, x::Real) #CURRENT FILE: Distributions.jl/src/censored.jl ##CHUNK 1 return logzero end end function cdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = cdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else one(result) end end function ccdf(d::Censored, x::Real) lower = d.lower ##CHUNK 2 upper = d.upper result = ccdf(d.uncensored, x) return if lower !== nothing && x < lower one(result) elseif upper === nothing || x < upper result else zero(result) end end function logccdf(d::Censored{<:Any,<:Any,T}, x::Real) where {T} lower = d.lower upper = d.upper result = logccdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else ##CHUNK 3 elseif upper === nothing || x < upper result else one(result) end end function ccdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = ccdf(d.uncensored, x) return if lower !== nothing && x < lower one(result) elseif upper === nothing || x < upper result else zero(result) end end ##CHUNK 4 function logccdf(d::Censored{<:Any,<:Any,T}, x::Real) where {T} lower = d.lower upper = d.upper result = logccdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else oftype(result, -Inf) end end #### Sampling rand(rng::AbstractRNG, d::Censored) = _clamp(rand(rng, d.uncensored), d.lower, d.upper) ##CHUNK 5 lower = d.lower upper = d.upper px = float(pdf(d0, x)) return if _in_open_interval(x, lower, upper) px elseif x == lower x == upper ? one(px) : oftype(px, cdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(px, ccdf(d0, x) + px) else oftype(px, ccdf(d0, x)) end else # not in support zero(px) end end function logpdf(d::Censored, x::Real) d0 = d.uncensored ##CHUNK 6 lower = d.lower upper = d.upper logpx = logpdf(d0, x) return if _in_open_interval(x, lower, upper) logpx elseif x == lower x == upper ? zero(logpx) : oftype(logpx, logcdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(logpx, logaddexp(logccdf(d0, x), logpx)) else oftype(logpx, logccdf(d0, x)) end else # not in support oftype(logpx, -Inf) end end function loglikelihood(d::Censored, x::AbstractArray{<:Real}) d0 = d.uncensored
391
402
Distributions.jl
84
function ccdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = ccdf(d.uncensored, x) return if lower !== nothing && x < lower one(result) elseif upper === nothing || x < upper result else zero(result) end end
function ccdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = ccdf(d.uncensored, x) return if lower !== nothing && x < lower one(result) elseif upper === nothing || x < upper result else zero(result) end end
[ 391, 402 ]
function ccdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = ccdf(d.uncensored, x) return if lower !== nothing && x < lower one(result) elseif upper === nothing || x < upper result else zero(result) end end
function ccdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = ccdf(d.uncensored, x) return if lower !== nothing && x < lower one(result) elseif upper === nothing || x < upper result else zero(result) end end
ccdf
391
402
src/censored.jl
#FILE: Distributions.jl/src/truncate.jl ##CHUNK 1 function ccdf(d::Truncated, x::Real) result = clamp((d.ucdf - cdf(d.untruncated, x)) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x <= d.lower one(result) elseif d.upper !== nothing && x > d.upper zero(result) else result end end function logccdf(d::Truncated, x::Real) result = logsubexp(logccdf(d.untruncated, x), log1p(-d.ucdf)) - d.logtp return if d.lower !== nothing && x <= d.lower zero(result) elseif d.upper !== nothing && x > d.upper oftype(result, -Inf) else result ##CHUNK 2 return if d.lower !== nothing && x < d.lower zero(result) elseif d.upper !== nothing && x >= d.upper one(result) else result end end function logcdf(d::Truncated, x::Real) result = logsubexp(logcdf(d.untruncated, x), d.loglcdf) - d.logtp return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper !== nothing && x >= d.upper zero(result) else result end end ##CHUNK 3 result = logsubexp(logcdf(d.untruncated, x), d.loglcdf) - d.logtp return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper !== nothing && x >= d.upper zero(result) else result end end function ccdf(d::Truncated, x::Real) result = clamp((d.ucdf - cdf(d.untruncated, x)) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x <= d.lower one(result) elseif d.upper !== nothing && x > d.upper zero(result) else result end ##CHUNK 4 end function logpdf(d::Truncated, x::Real) result = logpdf(d.untruncated, x) - d.logtp return _in_closed_interval(x, d.lower, d.upper) ? result : oftype(result, -Inf) end function cdf(d::Truncated, x::Real) result = clamp((cdf(d.untruncated, x) - d.lcdf) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x < d.lower zero(result) elseif d.upper !== nothing && x >= d.upper one(result) else result end end function logcdf(d::Truncated, x::Real) #FILE: Distributions.jl/test/censored.jl ##CHUNK 1 # Testing censored distributions module TestCensored using Distributions, Test using Distributions: Censored function _as_mixture(d::Censored) d0 = d.uncensored dtrunc = if d0 isa DiscreteUniform || d0 isa Poisson truncated( d0, d.lower === nothing ? -Inf : floor(d.lower) + 1, d.upper === nothing ? Inf : ceil(d.upper) - 1, ) elseif d0 isa ContinuousDistribution truncated( d0, d.lower === nothing ? -Inf : nextfloat(float(d.lower)), d.upper === nothing ? Inf : prevfloat(float(d.upper)), #CURRENT FILE: Distributions.jl/src/censored.jl ##CHUNK 1 return logzero end end function cdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = cdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else one(result) end end function logcdf(d::Censored, x::Real) lower = d.lower upper = d.upper ##CHUNK 2 elseif upper === nothing || x < upper result else one(result) end end function logcdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = logcdf(d.uncensored, x) return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper === nothing || x < d.upper result else zero(result) end end ##CHUNK 3 result = logcdf(d.uncensored, x) return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper === nothing || x < d.upper result else zero(result) end end function logccdf(d::Censored{<:Any,<:Any,T}, x::Real) where {T} lower = d.lower upper = d.upper result = logccdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else ##CHUNK 4 lower = d.lower upper = d.upper px = float(pdf(d0, x)) return if _in_open_interval(x, lower, upper) px elseif x == lower x == upper ? one(px) : oftype(px, cdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(px, ccdf(d0, x) + px) else oftype(px, ccdf(d0, x)) end else # not in support zero(px) end end function logpdf(d::Censored, x::Real) d0 = d.uncensored ##CHUNK 5 function logccdf(d::Censored{<:Any,<:Any,T}, x::Real) where {T} lower = d.lower upper = d.upper result = logccdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else oftype(result, -Inf) end end #### Sampling rand(rng::AbstractRNG, d::Censored) = _clamp(rand(rng, d.uncensored), d.lower, d.upper)
404
415
Distributions.jl
85
function logccdf(d::Censored{<:Any,<:Any,T}, x::Real) where {T} lower = d.lower upper = d.upper result = logccdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else oftype(result, -Inf) end end
function logccdf(d::Censored{<:Any,<:Any,T}, x::Real) where {T} lower = d.lower upper = d.upper result = logccdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else oftype(result, -Inf) end end
[ 404, 415 ]
function logccdf(d::Censored{<:Any,<:Any,T}, x::Real) where {T} lower = d.lower upper = d.upper result = logccdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else oftype(result, -Inf) end end
function logccdf(d::Censored{<:Any,<:Any,T}, x::Real) where {T} lower = d.lower upper = d.upper result = logccdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else oftype(result, -Inf) end end
logccdf
404
415
src/censored.jl
#FILE: Distributions.jl/src/truncate.jl ##CHUNK 1 function ccdf(d::Truncated, x::Real) result = clamp((d.ucdf - cdf(d.untruncated, x)) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x <= d.lower one(result) elseif d.upper !== nothing && x > d.upper zero(result) else result end end function logccdf(d::Truncated, x::Real) result = logsubexp(logccdf(d.untruncated, x), log1p(-d.ucdf)) - d.logtp return if d.lower !== nothing && x <= d.lower zero(result) elseif d.upper !== nothing && x > d.upper oftype(result, -Inf) else result ##CHUNK 2 end function logccdf(d::Truncated, x::Real) result = logsubexp(logccdf(d.untruncated, x), log1p(-d.ucdf)) - d.logtp return if d.lower !== nothing && x <= d.lower zero(result) elseif d.upper !== nothing && x > d.upper oftype(result, -Inf) else result end end ## random number generation function rand(rng::AbstractRNG, d::Truncated) d0 = d.untruncated tp = d.tp lower = d.lower upper = d.upper ##CHUNK 3 return if d.lower !== nothing && x < d.lower zero(result) elseif d.upper !== nothing && x >= d.upper one(result) else result end end function logcdf(d::Truncated, x::Real) result = logsubexp(logcdf(d.untruncated, x), d.loglcdf) - d.logtp return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper !== nothing && x >= d.upper zero(result) else result end end ##CHUNK 4 result = logsubexp(logcdf(d.untruncated, x), d.loglcdf) - d.logtp return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper !== nothing && x >= d.upper zero(result) else result end end function ccdf(d::Truncated, x::Real) result = clamp((d.ucdf - cdf(d.untruncated, x)) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x <= d.lower one(result) elseif d.upper !== nothing && x > d.upper zero(result) else result end #CURRENT FILE: Distributions.jl/src/censored.jl ##CHUNK 1 return logzero end end function cdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = cdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else one(result) end end function logcdf(d::Censored, x::Real) lower = d.lower upper = d.upper ##CHUNK 2 elseif upper === nothing || x < upper result else one(result) end end function logcdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = logcdf(d.uncensored, x) return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper === nothing || x < d.upper result else zero(result) end end ##CHUNK 3 result = logcdf(d.uncensored, x) return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper === nothing || x < d.upper result else zero(result) end end function ccdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = ccdf(d.uncensored, x) return if lower !== nothing && x < lower one(result) elseif upper === nothing || x < upper result else zero(result) ##CHUNK 4 lower = d.lower upper = d.upper px = float(pdf(d0, x)) return if _in_open_interval(x, lower, upper) px elseif x == lower x == upper ? one(px) : oftype(px, cdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(px, ccdf(d0, x) + px) else oftype(px, ccdf(d0, x)) end else # not in support zero(px) end end function logpdf(d::Censored, x::Real) d0 = d.uncensored ##CHUNK 5 lower = d.lower upper = d.upper logpx = logpdf(d0, x) return if _in_open_interval(x, lower, upper) logpx elseif x == lower x == upper ? zero(logpx) : oftype(logpx, logcdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(logpx, logaddexp(logccdf(d0, x), logpx)) else oftype(logpx, logccdf(d0, x)) end else # not in support oftype(logpx, -Inf) end end function loglikelihood(d::Censored, x::AbstractArray{<:Real}) d0 = d.uncensored ##CHUNK 6 else oftype(px, ccdf(d0, x)) end else # not in support zero(px) end end function logpdf(d::Censored, x::Real) d0 = d.uncensored lower = d.lower upper = d.upper logpx = logpdf(d0, x) return if _in_open_interval(x, lower, upper) logpx elseif x == lower x == upper ? zero(logpx) : oftype(logpx, logcdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(logpx, logaddexp(logccdf(d0, x), logpx))
156
172
Distributions.jl
86
function _rand!( rng::AbstractRNG, s::Sampleable{ArrayLikeVariate{N}}, x::AbstractArray{<:AbstractArray{<:Real,N}}, allocate::Bool, ) where {N} if allocate @inbounds for i in eachindex(x) x[i] = rand(rng, s) end else @inbounds for xi in x rand!(rng, s, xi) end end return x end
function _rand!( rng::AbstractRNG, s::Sampleable{ArrayLikeVariate{N}}, x::AbstractArray{<:AbstractArray{<:Real,N}}, allocate::Bool, ) where {N} if allocate @inbounds for i in eachindex(x) x[i] = rand(rng, s) end else @inbounds for xi in x rand!(rng, s, xi) end end return x end
[ 156, 172 ]
function _rand!( rng::AbstractRNG, s::Sampleable{ArrayLikeVariate{N}}, x::AbstractArray{<:AbstractArray{<:Real,N}}, allocate::Bool, ) where {N} if allocate @inbounds for i in eachindex(x) x[i] = rand(rng, s) end else @inbounds for xi in x rand!(rng, s, xi) end end return x end
function _rand!( rng::AbstractRNG, s::Sampleable{ArrayLikeVariate{N}}, x::AbstractArray{<:AbstractArray{<:Real,N}}, allocate::Bool, ) where {N} if allocate @inbounds for i in eachindex(x) x[i] = rand(rng, s) end else @inbounds for xi in x rand!(rng, s, xi) end end return x end
_rand!
156
172
src/genericrand.jl
#FILE: Distributions.jl/src/samplers/multinomial.jl ##CHUNK 1 function _rand!(rng::AbstractRNG, s::MultinomialSampler, x::AbstractVector{<:Real}) n = s.n k = length(s) if n^2 > k multinom_rand!(rng, n, s.prob, x) else # Use an alias table fill!(x, zero(eltype(x))) a = s.alias for i = 1:n x[rand(rng, a)] += 1 end end return x end length(s::MultinomialSampler) = length(s.prob) #CURRENT FILE: Distributions.jl/src/genericrand.jl ##CHUNK 1 function _rand!( rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate}, x::AbstractArray{<:Real}, ) @inbounds for xi in eachvariate(x, variate_form(typeof(s))) rand!(rng, s, xi) end return x end Base.@propagate_inbounds function rand!( rng::AbstractRNG, s::Sampleable{ArrayLikeVariate{N}}, x::AbstractArray{<:AbstractArray{<:Real,N}}, ) where {N} sz = size(s) allocate = !all(isassigned(x, i) && size(@inbounds x[i]) == sz for i in eachindex(x)) return rand!(rng, s, x, allocate) end ##CHUNK 2 Base.@propagate_inbounds function rand!( rng::AbstractRNG, s::Sampleable{ArrayLikeVariate{N}}, x::AbstractArray{<:AbstractArray{<:Real,N}}, ) where {N} sz = size(s) allocate = !all(isassigned(x, i) && size(@inbounds x[i]) == sz for i in eachindex(x)) return rand!(rng, s, x, allocate) end Base.@propagate_inbounds function rand!( s::Sampleable{ArrayLikeVariate{N}}, x::AbstractArray{<:AbstractArray{<:Real,N}}, allocate::Bool, ) where {N} return rand!(default_rng(), s, x, allocate) end @inline function rand!( rng::AbstractRNG, ##CHUNK 3 Base.@propagate_inbounds function rand!( s::Sampleable{ArrayLikeVariate{N}}, x::AbstractArray{<:AbstractArray{<:Real,N}}, allocate::Bool, ) where {N} return rand!(default_rng(), s, x, allocate) end @inline function rand!( rng::AbstractRNG, s::Sampleable{ArrayLikeVariate{N}}, x::AbstractArray{<:AbstractArray{<:Real,N}}, allocate::Bool, ) where {N} @boundscheck begin if !allocate sz = size(s) all(size(xi) == sz for xi in x) || throw(DimensionMismatch("inconsistent array dimensions")) end ##CHUNK 4 s::Sampleable{ArrayLikeVariate{N}}, x::AbstractArray{<:AbstractArray{<:Real,N}}, allocate::Bool, ) where {N} @boundscheck begin if !allocate sz = size(s) all(size(xi) == sz for xi in x) || throw(DimensionMismatch("inconsistent array dimensions")) end end # the function barrier fixes performance issues if `sampler(s)` is type unstable return _rand!(rng, sampler(s), x, allocate) end """ sampler(d::Distribution) -> Sampleable sampler(s::Sampleable) -> s ##CHUNK 5 throw(DimensionMismatch( "number of dimensions of `x` ($M) must be greater than number of dimensions of `s` ($N)" )) ntuple(i -> size(x, i), Val(N)) == size(s) || throw(DimensionMismatch("inconsistent array dimensions")) end # the function barrier fixes performance issues if `sampler(s)` is type unstable return _rand!(rng, sampler(s), x) end function _rand!( rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate}, x::AbstractArray{<:Real}, ) @inbounds for xi in eachvariate(x, variate_form(typeof(s))) rand!(rng, s, xi) end return x end ##CHUNK 6 end """ rand!([rng::AbstractRNG,] s::Sampleable, A::AbstractArray) Generate one or multiple samples from `s` to a pre-allocated array `A`. `A` should be in the form as specified above. The rules are summarized as below: - When `s` is univariate, `A` can be an array of arbitrary shape. Each element of `A` will be overridden by one sample. - When `s` is multivariate, `A` can be a vector to store one sample, or a matrix with each column for a sample. - When `s` is matrix-variate, `A` can be a matrix to store one sample, or an array of matrices with each element for a sample matrix. """ function rand! end Base.@propagate_inbounds rand!(s::Sampleable, X::AbstractArray) = rand!(default_rng(), s, X) Base.@propagate_inbounds function rand!(rng::AbstractRNG, s::Sampleable, X::AbstractArray) return _rand!(rng, s, X) end ##CHUNK 7 - When `s` is multivariate, `A` can be a vector to store one sample, or a matrix with each column for a sample. - When `s` is matrix-variate, `A` can be a matrix to store one sample, or an array of matrices with each element for a sample matrix. """ function rand! end Base.@propagate_inbounds rand!(s::Sampleable, X::AbstractArray) = rand!(default_rng(), s, X) Base.@propagate_inbounds function rand!(rng::AbstractRNG, s::Sampleable, X::AbstractArray) return _rand!(rng, s, X) end # default definitions for arraylike variates @inline function rand!( rng::AbstractRNG, s::Sampleable{ArrayLikeVariate{N}}, x::AbstractArray{<:Real,N}, ) where {N} @boundscheck begin size(x) == size(s) || throw(DimensionMismatch("inconsistent array dimensions")) end ##CHUNK 8 out = Array{float(eltype(s))}(undef, dims) return @inbounds rand!(rng, sampler(s), out) end function rand( rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate,Continuous}, dims::Dims, ) sz = size(s) ax = map(Base.OneTo, dims) out = [Array{float(eltype(s))}(undef, sz) for _ in Iterators.product(ax...)] return @inbounds rand!(rng, sampler(s), out, false) end """ rand!([rng::AbstractRNG,] s::Sampleable, A::AbstractArray) Generate one or multiple samples from `s` to a pre-allocated array `A`. `A` should be in the form as specified above. The rules are summarized as below: - When `s` is univariate, `A` can be an array of arbitrary shape. Each element of `A` will be overridden by one sample. ##CHUNK 9 # default definitions for arraylike variates @inline function rand!( rng::AbstractRNG, s::Sampleable{ArrayLikeVariate{N}}, x::AbstractArray{<:Real,N}, ) where {N} @boundscheck begin size(x) == size(s) || throw(DimensionMismatch("inconsistent array dimensions")) end return _rand!(rng, s, x) end @inline function rand!( rng::AbstractRNG, s::Sampleable{ArrayLikeVariate{N}}, x::AbstractArray{<:Real,M}, ) where {N,M} @boundscheck begin M > N ||
54
66
Distributions.jl
87
function cov(d::MatrixDistribution) M = length(d) V = zeros(partype(d), M, M) iter = CartesianIndices(size(d)) for el1 = 1:M for el2 = 1:el1 i, j = Tuple(iter[el1]) k, l = Tuple(iter[el2]) V[el1, el2] = cov(d, i, j, k, l) end end return V + tril(V, -1)' end
function cov(d::MatrixDistribution) M = length(d) V = zeros(partype(d), M, M) iter = CartesianIndices(size(d)) for el1 = 1:M for el2 = 1:el1 i, j = Tuple(iter[el1]) k, l = Tuple(iter[el2]) V[el1, el2] = cov(d, i, j, k, l) end end return V + tril(V, -1)' end
[ 54, 66 ]
function cov(d::MatrixDistribution) M = length(d) V = zeros(partype(d), M, M) iter = CartesianIndices(size(d)) for el1 = 1:M for el2 = 1:el1 i, j = Tuple(iter[el1]) k, l = Tuple(iter[el2]) V[el1, el2] = cov(d, i, j, k, l) end end return V + tril(V, -1)' end
function cov(d::MatrixDistribution) M = length(d) V = zeros(partype(d), M, M) iter = CartesianIndices(size(d)) for el1 = 1:M for el2 = 1:el1 i, j = Tuple(iter[el1]) k, l = Tuple(iter[el2]) V[el1, el2] = cov(d, i, j, k, l) end end return V + tril(V, -1)' end
cov
54
66
src/matrixvariates.jl
#FILE: Distributions.jl/src/multivariates.jl ##CHUNK 1 cov(d::MultivariateDistribution) """ cor(d::MultivariateDistribution) Computes the correlation matrix for distribution `d`. """ function cor(d::MultivariateDistribution) C = cov(d) n = size(C, 1) @assert size(C, 2) == n R = Matrix{eltype(C)}(undef, n, n) for j = 1:n for i = 1:j-1 @inbounds R[i, j] = R[j, i] end R[j, j] = 1.0 for i = j+1:n @inbounds R[i, j] = C[i, j] / sqrt(C[i, i] * C[j, j]) #FILE: Distributions.jl/src/matrix/matrixnormal.jl ##CHUNK 1 return Normal(μ, σ) end function _multivariate(d::MatrixNormal) n, p = size(d) all([n, p] .> 1) && throw(ArgumentError("Row or col dim of `MatrixNormal` must be 1 to coerce to `MvNormal`")) return vec(d) end function _rand_params(::Type{MatrixNormal}, elty, n::Int, p::Int) M = randn(elty, n, p) U = (X = 2 .* rand(elty, n, n) .- 1; X * X') V = (Y = 2 .* rand(elty, p, p) .- 1; Y * Y') return M, U, V end #FILE: Distributions.jl/src/mixtures/mixturemodel.jl ##CHUNK 1 return v end function var(d::MultivariateMixture) return diag(cov(d)) end function cov(d::MultivariateMixture) K = ncomponents(d) p = probs(d) m = zeros(length(d)) md = zeros(length(d)) V = zeros(length(d),length(d)) for i = 1:K pi = p[i] if pi > 0.0 c = component(d, i) axpy!(pi, mean(c), m) axpy!(pi, cov(c), V) #FILE: Distributions.jl/src/matrix/wishart.jl ##CHUNK 1 # # A is a lower triangular matrix, with # # A(i, j) ~ sqrt of Chisq(df - i + 1) when i == j # ~ Normal() when i > j # T = eltype(A) z = zero(T) axes1 = axes(A, 1) @inbounds for (j, jdx) in enumerate(axes(A, 2)), (i, idx) in enumerate(axes1) A[idx, jdx] = if i < j z elseif i > j randn(rng, T) else rand(rng, Chi(df - i + 1)) end end return A end #FILE: Distributions.jl/src/matrix/matrixfdist.jl ##CHUNK 1 n1, n2, B = params(d) n2 > p + 1 || throw(ArgumentError("mean only defined for df2 > dim + 1")) return (n1 / (n2 - p - 1)) * Matrix(B) end @inline partype(d::MatrixFDist{T}) where {T <: Real} = T # Konno (1988 JJSS) Corollary 2.4.i function cov(d::MatrixFDist, i::Integer, j::Integer, k::Integer, l::Integer) p = size(d, 1) n1, n2, PDB = params(d) n2 > p + 3 || throw(ArgumentError("cov only defined for df2 > dim + 3")) n = n1 + n2 B = Matrix(PDB) n1*(n - p - 1)*inv((n2 - p)*(n2 - p - 1)*(n2 - p - 3))*(2inv(n2 - p - 1)*B[i,j]*B[k,l] + B[j,l]*B[i,k] + B[i,l]*B[k,j]) end function var(d::MatrixFDist, i::Integer, j::Integer) p = size(d, 1) n1, n2, PDB = params(d) #FILE: Distributions.jl/src/multivariate/mvnormal.jl ##CHUNK 1 end function fit_mle(D::Type{FullNormal}, x::AbstractMatrix{Float64}, w::AbstractVector) m = size(x, 1) n = size(x, 2) length(w) == n || throw(DimensionMismatch("Inconsistent argument dimensions")) inv_sw = inv(sum(w)) mu = BLAS.gemv('N', inv_sw, x, w) z = Matrix{Float64}(undef, m, n) for j = 1:n cj = sqrt(w[j]) for i = 1:m @inbounds z[i,j] = (x[i,j] - mu[i]) * cj end end C = BLAS.syrk('U', 'N', inv_sw, z) LinearAlgebra.copytri!(C, 'U') MvNormal(mu, PDMat(C)) ##CHUNK 2 end function fit_mle(D::Type{DiagNormal}, x::AbstractMatrix{Float64}) m = size(x, 1) n = size(x, 2) mu = vec(mean(x, dims=2)) va = zeros(Float64, m) for j = 1:n for i = 1:m @inbounds va[i] += abs2(x[i,j] - mu[i]) end end lmul!(inv(n), va) MvNormal(mu, PDiagMat(va)) end function fit_mle(D::Type{DiagNormal}, x::AbstractMatrix{Float64}, w::AbstractVector) m = size(x, 1) n = size(x, 2) #FILE: Distributions.jl/test/reshaped.jl ##CHUNK 1 V = rand_posdef_mat(rand(4, 4)) M = randn(5, 4) d = MatrixNormal(M, U, V) for v in (vec(d), reshape(d, length(d)), reshape(d, (length(d),))) @test v isa MvNormal @test mean(v) == vec(M) @test cov(v) == kron(V, U) end end end #CURRENT FILE: Distributions.jl/src/matrixvariates.jl ##CHUNK 1 """ var(d::MatrixDistribution) Compute the matrix of element-wise variances for distribution `d`. """ var(d::MatrixDistribution) = ((n, p) = size(d); [var(d, i, j) for i in 1:n, j in 1:p]) """ cov(d::MatrixDistribution) Compute the covariance matrix for `vec(X)`, where `X` is a random matrix with distribution `d`. """ cov(d::MatrixDistribution, ::Val{true}) = cov(d) """ cov(d::MatrixDistribution, flattened = Val(false)) Compute the 4-dimensional array whose `(i, j, k, l)` element is `cov(X[i,j], X[k, l])`. """ ##CHUNK 2 distribution of inv(X), where X is a random matrix with distribution `d`. """ Base.inv(d::MatrixDistribution) """ mean(d::MatrixDistribution) Return the mean matrix of `d`. """ mean(d::MatrixDistribution) """ var(d::MatrixDistribution) Compute the matrix of element-wise variances for distribution `d`. """ var(d::MatrixDistribution) = ((n, p) = size(d); [var(d, i, j) for i in 1:n, j in 1:p]) """ cov(d::MatrixDistribution)
98
115
Distributions.jl
88
function cor(d::MultivariateDistribution) C = cov(d) n = size(C, 1) @assert size(C, 2) == n R = Matrix{eltype(C)}(undef, n, n) for j = 1:n for i = 1:j-1 @inbounds R[i, j] = R[j, i] end R[j, j] = 1.0 for i = j+1:n @inbounds R[i, j] = C[i, j] / sqrt(C[i, i] * C[j, j]) end end return R end
function cor(d::MultivariateDistribution) C = cov(d) n = size(C, 1) @assert size(C, 2) == n R = Matrix{eltype(C)}(undef, n, n) for j = 1:n for i = 1:j-1 @inbounds R[i, j] = R[j, i] end R[j, j] = 1.0 for i = j+1:n @inbounds R[i, j] = C[i, j] / sqrt(C[i, i] * C[j, j]) end end return R end
[ 98, 115 ]
function cor(d::MultivariateDistribution) C = cov(d) n = size(C, 1) @assert size(C, 2) == n R = Matrix{eltype(C)}(undef, n, n) for j = 1:n for i = 1:j-1 @inbounds R[i, j] = R[j, i] end R[j, j] = 1.0 for i = j+1:n @inbounds R[i, j] = C[i, j] / sqrt(C[i, i] * C[j, j]) end end return R end
function cor(d::MultivariateDistribution) C = cov(d) n = size(C, 1) @assert size(C, 2) == n R = Matrix{eltype(C)}(undef, n, n) for j = 1:n for i = 1:j-1 @inbounds R[i, j] = R[j, i] end R[j, j] = 1.0 for i = j+1:n @inbounds R[i, j] = C[i, j] / sqrt(C[i, i] * C[j, j]) end end return R end
cor
98
115
src/multivariates.jl
#FILE: Distributions.jl/src/multivariate/multinomial.jl ##CHUNK 1 @inbounds p_i = p[i] v[i] = n * p_i * (1 - p_i) end v end function cov(d::Multinomial{T}) where T<:Real p = probs(d) k = length(p) n = ntrials(d) C = Matrix{T}(undef, k, k) for j = 1:k pj = p[j] for i = 1:j-1 @inbounds C[i,j] = - n * p[i] * pj end @inbounds C[j,j] = n * pj * (1-pj) end #FILE: Distributions.jl/src/multivariate/dirichletmultinomial.jl ##CHUNK 1 function var(d::DirichletMultinomial{T}) where T <: Real v = fill(d.n * (d.n + d.α0) / (1 + d.α0), length(d)) p = d.α / d.α0 for i in eachindex(v) @inbounds v[i] *= p[i] * (1 - p[i]) end v end function cov(d::DirichletMultinomial{<:Real}) v = var(d) c = d.α * d.α' lmul!(-d.n * (d.n + d.α0) / (d.α0^2 * (1 + d.α0)), c) for (i, vi) in zip(diagind(c), v) @inbounds c[i] = vi end c end # Evaluation #FILE: Distributions.jl/src/multivariate/dirichlet.jl ##CHUNK 1 αi * (α0 - αi) * c end return v end function cov(d::Dirichlet) α = d.alpha α0 = d.alpha0 c = inv(α0^2 * (α0 + 1)) T = typeof(zero(eltype(α))^2 * c) k = length(α) C = Matrix{T}(undef, k, k) for j = 1:k αj = α[j] αjc = αj * c for i in 1:(j-1) @inbounds C[i,j] = C[j,i] end @inbounds C[j,j] = (α0 - αj) * αjc #FILE: Distributions.jl/src/multivariate/mvnormal.jl ##CHUNK 1 end function fit_mle(D::Type{FullNormal}, x::AbstractMatrix{Float64}, w::AbstractVector) m = size(x, 1) n = size(x, 2) length(w) == n || throw(DimensionMismatch("Inconsistent argument dimensions")) inv_sw = inv(sum(w)) mu = BLAS.gemv('N', inv_sw, x, w) z = Matrix{Float64}(undef, m, n) for j = 1:n cj = sqrt(w[j]) for i = 1:m @inbounds z[i,j] = (x[i,j] - mu[i]) * cj end end C = BLAS.syrk('U', 'N', inv_sw, z) LinearAlgebra.copytri!(C, 'U') MvNormal(mu, PDMat(C)) ##CHUNK 2 @inbounds va[i] += abs2(x[i,j] - mu[i]) end end lmul!(inv(n), va) MvNormal(mu, PDiagMat(va)) end function fit_mle(D::Type{DiagNormal}, x::AbstractMatrix{Float64}, w::AbstractVector) m = size(x, 1) n = size(x, 2) length(w) == n || throw(DimensionMismatch("Inconsistent argument dimensions")) inv_sw = inv(sum(w)) mu = BLAS.gemv('N', inv_sw, x, w) va = zeros(Float64, m) for j = 1:n @inbounds wj = w[j] for i = 1:m @inbounds va[i] += abs2(x[i,j] - mu[i]) * wj ##CHUNK 3 end end lmul!(inv_sw, va) MvNormal(mu, PDiagMat(va)) end function fit_mle(D::Type{IsoNormal}, x::AbstractMatrix{Float64}) m = size(x, 1) n = size(x, 2) mu = vec(mean(x, dims=2)) va = 0. for j = 1:n va_j = 0. for i = 1:m @inbounds va_j += abs2(x[i,j] - mu[i]) end va += va_j end MvNormal(mu, ScalMat(m, va / (m * n))) ##CHUNK 4 end function fit_mle(D::Type{DiagNormal}, x::AbstractMatrix{Float64}) m = size(x, 1) n = size(x, 2) mu = vec(mean(x, dims=2)) va = zeros(Float64, m) for j = 1:n for i = 1:m @inbounds va[i] += abs2(x[i,j] - mu[i]) end end lmul!(inv(n), va) MvNormal(mu, PDiagMat(va)) end function fit_mle(D::Type{DiagNormal}, x::AbstractMatrix{Float64}, w::AbstractVector) m = size(x, 1) n = size(x, 2) #FILE: Distributions.jl/src/matrix/wishart.jl ##CHUNK 1 # # A is a lower triangular matrix, with # # A(i, j) ~ sqrt of Chisq(df - i + 1) when i == j # ~ Normal() when i > j # T = eltype(A) z = zero(T) axes1 = axes(A, 1) @inbounds for (j, jdx) in enumerate(axes(A, 2)), (i, idx) in enumerate(axes1) A[idx, jdx] = if i < j z elseif i > j randn(rng, T) else rand(rng, Chi(df - i + 1)) end end return A end #FILE: Distributions.jl/src/matrix/matrixfdist.jl ##CHUNK 1 n1, n2, B = params(d) n2 > p + 1 || throw(ArgumentError("mean only defined for df2 > dim + 1")) return (n1 / (n2 - p - 1)) * Matrix(B) end @inline partype(d::MatrixFDist{T}) where {T <: Real} = T # Konno (1988 JJSS) Corollary 2.4.i function cov(d::MatrixFDist, i::Integer, j::Integer, k::Integer, l::Integer) p = size(d, 1) n1, n2, PDB = params(d) n2 > p + 3 || throw(ArgumentError("cov only defined for df2 > dim + 3")) n = n1 + n2 B = Matrix(PDB) n1*(n - p - 1)*inv((n2 - p)*(n2 - p - 1)*(n2 - p - 3))*(2inv(n2 - p - 1)*B[i,j]*B[k,l] + B[j,l]*B[i,k] + B[i,l]*B[k,j]) end function var(d::MatrixFDist, i::Integer, j::Integer) p = size(d, 1) n1, n2, PDB = params(d) #FILE: Distributions.jl/src/samplers/binomial.jl ##CHUNK 1 # compute probability vector of a Binomial distribution function binompvec(n::Int, p::Float64) pv = Vector{Float64}(undef, n+1) if p == 0.0 fill!(pv, 0.0) pv[1] = 1.0 elseif p == 1.0 fill!(pv, 0.0) pv[n+1] = 1.0 else q = 1.0 - p a = p / q @inbounds pv[1] = pk = q ^ n for k = 1:n @inbounds pv[k+1] = (pk *= ((n - k + 1) / k) * a) end end return pv end #CURRENT FILE: Distributions.jl/src/multivariates.jl
177
187
Distributions.jl
89
function __logpdf( d::ProductDistribution{N,M}, x::AbstractArray{<:Real,N}, ) where {N,M} # we use pairwise summation (https://github.com/JuliaLang/julia/pull/31020) # to compute `sum(logpdf.(d.dists, eachvariate))` @inbounds broadcasted = Broadcast.broadcasted( logpdf, d.dists, eachvariate(x, ArrayLikeVariate{M}), ) return sum(Broadcast.instantiate(broadcasted)) end
function __logpdf( d::ProductDistribution{N,M}, x::AbstractArray{<:Real,N}, ) where {N,M} # we use pairwise summation (https://github.com/JuliaLang/julia/pull/31020) # to compute `sum(logpdf.(d.dists, eachvariate))` @inbounds broadcasted = Broadcast.broadcasted( logpdf, d.dists, eachvariate(x, ArrayLikeVariate{M}), ) return sum(Broadcast.instantiate(broadcasted)) end
[ 177, 187 ]
function __logpdf( d::ProductDistribution{N,M}, x::AbstractArray{<:Real,N}, ) where {N,M} # we use pairwise summation (https://github.com/JuliaLang/julia/pull/31020) # to compute `sum(logpdf.(d.dists, eachvariate))` @inbounds broadcasted = Broadcast.broadcasted( logpdf, d.dists, eachvariate(x, ArrayLikeVariate{M}), ) return sum(Broadcast.instantiate(broadcasted)) end
function __logpdf( d::ProductDistribution{N,M}, x::AbstractArray{<:Real,N}, ) where {N,M} # we use pairwise summation (https://github.com/JuliaLang/julia/pull/31020) # to compute `sum(logpdf.(d.dists, eachvariate))` @inbounds broadcasted = Broadcast.broadcasted( logpdf, d.dists, eachvariate(x, ArrayLikeVariate{M}), ) return sum(Broadcast.instantiate(broadcasted)) end
__logpdf
177
187
src/product.jl
#FILE: Distributions.jl/src/common.jl ##CHUNK 1 """ @inline function logpdf!( out::AbstractArray{<:Real}, d::Distribution{ArrayLikeVariate{N}}, x::AbstractArray{<:Real,M}, ) where {N,M} @boundscheck begin M > N || throw(DimensionMismatch( "number of dimensions of the variates ($M) must be greater than the dimension of the distribution ($N)" )) ntuple(i -> size(x, i), Val(N)) == size(d) || throw(DimensionMismatch("inconsistent array dimensions")) length(out) == prod(i -> size(x, i), (N + 1):M) || throw(DimensionMismatch("inconsistent array dimensions")) end return _logpdf!(out, d, x) end # default definition ##CHUNK 2 loglikelihood(d::Distribution{ArrayLikeVariate{N}}, x) where {N} The log-likelihood of distribution `d` with respect to all variate(s) contained in `x`. Here, `x` can be any output of `rand(d, dims...)` and `rand!(d, x)`. For instance, `x` can be - an array of dimension `N` with `size(x) == size(d)`, - an array of dimension `N + 1` with `size(x)[1:N] == size(d)`, or - an array of arrays `xi` of dimension `N` with `size(xi) == size(d)`. """ Base.@propagate_inbounds @inline function loglikelihood( d::Distribution{ArrayLikeVariate{N}}, x::AbstractArray{<:Real,M}, ) where {N,M} if M == N return logpdf(d, x) else @boundscheck begin M > N || throw(DimensionMismatch( "number of dimensions of the variates ($M) must be greater than or equal to the dimension of the distribution ($N)" ##CHUNK 3 Base.@propagate_inbounds @inline function loglikelihood( d::Distribution{ArrayLikeVariate{N}}, x::AbstractArray{<:Real,M}, ) where {N,M} if M == N return logpdf(d, x) else @boundscheck begin M > N || throw(DimensionMismatch( "number of dimensions of the variates ($M) must be greater than or equal to the dimension of the distribution ($N)" )) ntuple(i -> size(x, i), Val(N)) == size(d) || throw(DimensionMismatch("inconsistent array dimensions")) end return @inbounds sum(Base.Fix1(logpdf, d), eachvariate(x, ArrayLikeVariate{N})) end end Base.@propagate_inbounds function loglikelihood( d::Distribution{ArrayLikeVariate{N}}, x::AbstractArray{<:AbstractArray{<:Real,N}}, ) where {N} #CURRENT FILE: Distributions.jl/src/product.jl ##CHUNK 1 function __logpdf(d::ArrayOfUnivariateDistribution, x::AbstractArray{<:Real,N}) where {N} # we use pairwise summation (https://github.com/JuliaLang/julia/pull/31020) # without allocations to compute `sum(logpdf.(d.dists, x))` broadcasted = Broadcast.broadcasted(logpdf, d.dists, x) return sum(Broadcast.instantiate(broadcasted)) end # more efficient implementation of `_rand!` for `Fill` array of univariate distributions function _rand!( rng::AbstractRNG, d::FillArrayOfUnivariateDistribution{N}, x::AbstractArray{<:Real,N}, ) where {N} return @inbounds rand!(rng, sampler(first(d.dists)), x) end # more efficient implementation of `_logpdf` for `Fill` array of univariate distributions # we have to fix a method ambiguity function _logpdf( d::FillArrayOfUnivariateDistribution{N}, x::AbstractArray{<:Real,N} ##CHUNK 2 d::ProductDistribution{N,M,<:Fill}, x::AbstractArray{<:Real,N}, ) where {N,M} return __logpdf(d, x) end function _logpdf( d::ProductDistribution{2,M,<:Fill}, x::AbstractMatrix{<:Real}, ) where {M} return __logpdf(d, x) end function __logpdf( d::ProductDistribution{N,M,<:Fill}, x::AbstractArray{<:Real,N}, ) where {N,M} return @inbounds loglikelihood(first(d.dists), x) end """ product_distribution(dists::AbstractArray{<:Distribution{<:ArrayLikeVariate{M}},N}) ##CHUNK 3 end return x end # `_logpdf` for arrays of univariate distributions # we have to fix a method ambiguity function _logpdf(d::ArrayOfUnivariateDistribution, x::AbstractArray{<:Real,N}) where {N} return __logpdf(d, x) end _logpdf(d::MatrixOfUnivariateDistribution, x::AbstractMatrix{<:Real}) = __logpdf(d, x) function __logpdf(d::ArrayOfUnivariateDistribution, x::AbstractArray{<:Real,N}) where {N} # we use pairwise summation (https://github.com/JuliaLang/julia/pull/31020) # without allocations to compute `sum(logpdf.(d.dists, x))` broadcasted = Broadcast.broadcasted(logpdf, d.dists, x) return sum(Broadcast.instantiate(broadcasted)) end # more efficient implementation of `_rand!` for `Fill` array of univariate distributions function _rand!( rng::AbstractRNG, ##CHUNK 4 d::ProductDistribution{N,M,<:Fill}, A::AbstractArray{<:Real,N}, ) where {N,M} @inbounds rand!(rng, sampler(first(d.dists)), A) return A end # more efficient implementation of `_logpdf` for `Fill` arrays of distributions # we have to fix a method ambiguity function _logpdf( d::ProductDistribution{N,M,<:Fill}, x::AbstractArray{<:Real,N}, ) where {N,M} return __logpdf(d, x) end function _logpdf( d::ProductDistribution{2,M,<:Fill}, x::AbstractMatrix{<:Real}, ) where {M} return __logpdf(d, x) ##CHUNK 5 # `_rand! for arrays of distributions function _rand!( rng::AbstractRNG, d::ProductDistribution{N,M}, A::AbstractArray{<:Real,N}, ) where {N,M} @inbounds for (di, Ai) in zip(d.dists, eachvariate(A, ArrayLikeVariate{M})) rand!(rng, di, Ai) end return A end # `_logpdf` for arrays of distributions # we have to fix some method ambiguities _logpdf(d::ProductDistribution{N}, x::AbstractArray{<:Real,N}) where {N} = __logpdf(d, x) _logpdf(d::ProductDistribution{2}, x::AbstractMatrix{<:Real}) = __logpdf(d, x) # more efficient implementation of `_rand!` for `Fill` arrays of distributions function _rand!( rng::AbstractRNG, ##CHUNK 6 end function __logpdf( d::ProductDistribution{N,M,<:Fill}, x::AbstractArray{<:Real,N}, ) where {N,M} return @inbounds loglikelihood(first(d.dists), x) end """ product_distribution(dists::AbstractArray{<:Distribution{<:ArrayLikeVariate{M}},N}) Create a distribution of `M + N`-dimensional arrays as a product distribution of independent `M`-dimensional distributions by stacking them. The function falls back to constructing a [`ProductDistribution`](@ref) distribution but specialized methods can be defined. """ function product_distribution(dists::AbstractArray{<:Distribution{<:ArrayLikeVariate}}) return ProductDistribution(dists) end ##CHUNK 7 cov(d::ProductDistribution{2}, ::Val{false}) = reshape(cov(d), size(d)..., size(d)...) # `_rand!` for arrays of univariate distributions function _rand!( rng::AbstractRNG, d::ArrayOfUnivariateDistribution{N}, x::AbstractArray{<:Real,N}, ) where {N} @inbounds for (i, di) in zip(eachindex(x), d.dists) x[i] = rand(rng, di) end return x end # `_logpdf` for arrays of univariate distributions # we have to fix a method ambiguity function _logpdf(d::ArrayOfUnivariateDistribution, x::AbstractArray{<:Real,N}) where {N} return __logpdf(d, x) end _logpdf(d::MatrixOfUnivariateDistribution, x::AbstractMatrix{<:Real}) = __logpdf(d, x)
3
35
Distributions.jl
90
function quantile_bisect(d::ContinuousUnivariateDistribution, p::Real, lx::T, rx::T) where {T<:Real} rx < lx && throw(ArgumentError("empty bracketing interval [$lx, $rx]")) # In some special cases, e.g. #1501, rx == lx` # If the distribution is degenerate the check below can fail, hence we skip it if rx == lx # Returns `lx` of the same type as `(lx + rx) / 2` # For specific types such as `Float64` it is more performant than `oftype((lx + rx) / 2, lx)` return middle(lx) end # base tolerance on types to support e.g. `Float32` (avoids an infinite loop) # ≈ 3.7e-11 for Float64 # ≈ 2.4e-5 for Float32 tol = cbrt(eps(float(T)))^2 # find quantile using bisect algorithm cl = cdf(d, lx) cr = cdf(d, rx) cl <= p <= cr || throw(ArgumentError("[$lx, $rx] is not a valid bracketing interval for `quantile(d, $p)`")) while rx - lx > tol m = (lx + rx)/2 c = cdf(d, m) if p > c cl = c lx = m else cr = c rx = m end end return (lx + rx)/2 end
function quantile_bisect(d::ContinuousUnivariateDistribution, p::Real, lx::T, rx::T) where {T<:Real} rx < lx && throw(ArgumentError("empty bracketing interval [$lx, $rx]")) # In some special cases, e.g. #1501, rx == lx` # If the distribution is degenerate the check below can fail, hence we skip it if rx == lx # Returns `lx` of the same type as `(lx + rx) / 2` # For specific types such as `Float64` it is more performant than `oftype((lx + rx) / 2, lx)` return middle(lx) end # base tolerance on types to support e.g. `Float32` (avoids an infinite loop) # ≈ 3.7e-11 for Float64 # ≈ 2.4e-5 for Float32 tol = cbrt(eps(float(T)))^2 # find quantile using bisect algorithm cl = cdf(d, lx) cr = cdf(d, rx) cl <= p <= cr || throw(ArgumentError("[$lx, $rx] is not a valid bracketing interval for `quantile(d, $p)`")) while rx - lx > tol m = (lx + rx)/2 c = cdf(d, m) if p > c cl = c lx = m else cr = c rx = m end end return (lx + rx)/2 end
[ 3, 35 ]
function quantile_bisect(d::ContinuousUnivariateDistribution, p::Real, lx::T, rx::T) where {T<:Real} rx < lx && throw(ArgumentError("empty bracketing interval [$lx, $rx]")) # In some special cases, e.g. #1501, rx == lx` # If the distribution is degenerate the check below can fail, hence we skip it if rx == lx # Returns `lx` of the same type as `(lx + rx) / 2` # For specific types such as `Float64` it is more performant than `oftype((lx + rx) / 2, lx)` return middle(lx) end # base tolerance on types to support e.g. `Float32` (avoids an infinite loop) # ≈ 3.7e-11 for Float64 # ≈ 2.4e-5 for Float32 tol = cbrt(eps(float(T)))^2 # find quantile using bisect algorithm cl = cdf(d, lx) cr = cdf(d, rx) cl <= p <= cr || throw(ArgumentError("[$lx, $rx] is not a valid bracketing interval for `quantile(d, $p)`")) while rx - lx > tol m = (lx + rx)/2 c = cdf(d, m) if p > c cl = c lx = m else cr = c rx = m end end return (lx + rx)/2 end
function quantile_bisect(d::ContinuousUnivariateDistribution, p::Real, lx::T, rx::T) where {T<:Real} rx < lx && throw(ArgumentError("empty bracketing interval [$lx, $rx]")) # In some special cases, e.g. #1501, rx == lx` # If the distribution is degenerate the check below can fail, hence we skip it if rx == lx # Returns `lx` of the same type as `(lx + rx) / 2` # For specific types such as `Float64` it is more performant than `oftype((lx + rx) / 2, lx)` return middle(lx) end # base tolerance on types to support e.g. `Float32` (avoids an infinite loop) # ≈ 3.7e-11 for Float64 # ≈ 2.4e-5 for Float32 tol = cbrt(eps(float(T)))^2 # find quantile using bisect algorithm cl = cdf(d, lx) cr = cdf(d, rx) cl <= p <= cr || throw(ArgumentError("[$lx, $rx] is not a valid bracketing interval for `quantile(d, $p)`")) while rx - lx > tol m = (lx + rx)/2 c = cdf(d, m) if p > c cl = c lx = m else cr = c rx = m end end return (lx + rx)/2 end
quantile_bisect
3
35
src/quantilealgs.jl
#FILE: Distributions.jl/src/univariate/continuous/triangular.jl ##CHUNK 1 # Handle x == c separately to avoid `NaN` if `c == a` or `c == b` oftype(x - a, 2) / (b - a) end return insupport(d, x) ? res : zero(res) end logpdf(d::TriangularDist, x::Real) = log(pdf(d, x)) function cdf(d::TriangularDist, x::Real) a, b, c = params(d) if x < c res = (x - a)^2 / ((b - a) * (c - a)) return x < a ? zero(res) : res else res = 1 - (b - x)^2 / ((b - a) * (b - c)) return x ≥ b ? one(res) : res end end function quantile(d::TriangularDist, p::Real) (a, b, c) = params(d) ##CHUNK 2 function mgf(d::TriangularDist, t::Real) a, b, c = params(d) # In principle, only two branches (degenerate + non-degenerate case) are needed # But writing out all four cases will avoid unnecessary computations if a < c if c < b # Case: a < c < b return exp(c * t) * ((c - a) * _phi2((a - c) * t) + (b - c) * _phi2((b - c) * t)) / (b - a) else # Case: a < c = b return exp(c * t) * _phi2((a - c) * t) end elseif c < b # Case: a = c < b return exp(c * t) * _phi2((b - c) * t) else # Case: a = c = b return exp(c * t) end end #FILE: Distributions.jl/src/multivariate/jointorderstatistics.jl ##CHUNK 1 issorted(x) && return lp return oftype(lp, -Inf) end i = first(ranks) xᵢ = first(x) if i > 1 # _marginalize_range(d.dist, 0, i, -Inf, xᵢ, T) lp += (i - 1) * logcdf(d.dist, xᵢ) - loggamma(T(i)) end for (j, xⱼ) in Iterators.drop(zip(ranks, x), 1) xⱼ < xᵢ && return oftype(lp, -Inf) lp += _marginalize_range(d.dist, i, j, xᵢ, xⱼ, T) i = j xᵢ = xⱼ end if i < n # _marginalize_range(d.dist, i, n + 1, xᵢ, Inf, T) lp += (n - i) * logccdf(d.dist, xᵢ) - loggamma(T(n - i + 1)) end return lp end ##CHUNK 2 # given ∏ₖf(xₖ), marginalize all xₖ for i < k < j function _marginalize_range(dist, i, j, xᵢ, xⱼ, T) k = j - i - 1 k == 0 && return zero(T) return k * T(logdiffcdf(dist, xⱼ, xᵢ)) - loggamma(T(k + 1)) end function _rand!(rng::AbstractRNG, d::JointOrderStatistics, x::AbstractVector{<:Real}) n = d.n if n == length(d.ranks) # ranks == 1:n # direct method, slower than inversion method for large `n` and distributions with # fast quantile function or that use inversion sampling rand!(rng, d.dist, x) sort!(x) else # use exponential generation method with inversion, where for gaps in the ranks, we # use the fact that the sum Y of k IID variables xₘ ~ Exp(1) is Y ~ Gamma(k, 1). # Lurie, D., and H. O. Hartley. "Machine-generation of order statistics for Monte # Carlo computations." The American Statistician 26.1 (1972): 26-27. # this is slow if length(d.ranks) is close to n and quantile for d.dist is expensive, #FILE: Distributions.jl/test/testutils.jl ##CHUNK 1 # q01 and q99 are 0.01 and 0.99 quantiles, and count the numbers of samples # falling into each bin. For each bin, we will compute a confidence interval # of the number, and see whether the actual number is in this interval. # # If the distribution has a bounded range, it also checks whether # the samples are all within this range. # # By setting a small q, we ensure that failure of the tests rarely # happen in practice. # verbose && println("test_samples on $(typeof(s))") n > 1 || error("The number of samples must be greater than 1.") nbins > 1 || error("The number of bins must be greater than 1.") 0.0 < q < 0.1 || error("The value of q must be within the open interval (0.0, 0.1).") # determine the range of values to examine vmin = minimum(distr) vmax = maximum(distr) #FILE: Distributions.jl/test/pdfnorm.jl ##CHUNK 1 using Test, Distributions, SpecialFunctions using QuadGK # `numeric_norm` is a helper function to compute numerically the squared L2 # norms of the distributions. These methods aren't very robust because can't # deal with divergent norms, or discrete distributions with infinite support. numeric_norm(d::ContinuousUnivariateDistribution) = quadgk(x -> pdf(d, x) ^ 2, support(d).lb, support(d).ub)[1] function numeric_norm(d::DiscreteUnivariateDistribution) # When the distribution has infinite support, sum up to an arbitrary large # value. upper = isfinite(maximum(d)) ? round(Int, maximum(d)) : 100 return sum(pdf(d, k) ^ 2 for k in round(Int, minimum(d)):upper) end @testset "pdf L2 norm" begin # Test error on a non implemented norm. @test_throws MethodError pdfsquaredL2norm(Gumbel()) #FILE: Distributions.jl/test/univariate/orderstatistic.jl ##CHUNK 1 @test logccdf(d, maximum(d)) ≈ -Inf @test logccdf(d, minimum(d) - 1) ≈ zero(T) q = cdf(d, x) if dist isa DiscreteDistribution # for discrete distributions, tiny numerical error can cause the wrong # integer value to be returned. q -= sqrt(eps(T)) end xq = @inferred(T, quantile(d, q)) xqc = @inferred(T, cquantile(d, 1 - q)) @test xq ≈ xqc @test isapprox(xq, T(x); atol=1e-4) || (dist isa DiscreteDistribution && xq < x) end end end @testset "rand" begin @testset for T in [Float32, Float64], dist in [Uniform(T(-2), T(1)), Normal(T(1), T(2))] #FILE: Distributions.jl/src/samplers/binomial.jl ##CHUNK 1 c::Float64 λL::Float64 λR::Float64 end BinomialTPESampler() = BinomialTPESampler(false, 0, 0., 0., 0., 0., 0, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.) function BinomialTPESampler(n::Int, prob::Float64) if prob <= 0.5 comp = false r = prob q = 1.0 - prob else comp = true r = 1.0 - prob q = prob end #FILE: Distributions.jl/src/truncated/normal.jl ##CHUNK 1 u = rand(rng) if u < exp(-0.5 * (r - a)^2) && r < ub return r end end elseif ub < 0 && ub - lb > 2.0 / (-ub + sqrt(ub^2 + 4.0)) * exp((ub^2 + ub * sqrt(ub^2 + 4.0)) / 4.0) a = (-ub + sqrt(ub^2 + 4.0)) / 2.0 while true r = rand(rng, Exponential(1.0 / a)) - ub u = rand(rng) if u < exp(-0.5 * (r - a)^2) && r < -lb return -r end end else while true r = lb + rand(rng) * (ub - lb) u = rand(rng) if lb > 0 rho = exp((lb^2 - r^2) * 0.5) #FILE: Distributions.jl/src/univariate/orderstatistic.jl ##CHUNK 1 @eval begin function $f(d::OrderStatistic, p::Real) # since cdf is Fᵢₙ(x) = Uᵢₙ(Fₓ(x)), and Uᵢₙ is invertible and increasing, we # have Fₓ(x) = Uᵢₙ⁻¹(Fᵢₙ(x)). then quantile function is # Qᵢₙ(p) = inf{x: p ≤ Fᵢₙ(x)} = inf{x: Uᵢₙ⁻¹(p) ≤ Fₓ(x)} = Qₓ(Uᵢₙ⁻¹(p)) b = _uniform_orderstatistic(d) return quantile(d.dist, $f(b, p)) end end end function rand(rng::AbstractRNG, d::OrderStatistic) # inverse transform sampling. Since quantile function is Qₓ(Uᵢₙ⁻¹(p)), we draw a random # variable from Uᵢₙ and pass it through the quantile function of `d.dist` T = eltype(d.dist) b = _uniform_orderstatistic(d) return T(quantile(d.dist, rand(rng, b))) end #CURRENT FILE: Distributions.jl/src/quantilealgs.jl
25
48
Distributions.jl
91
function _use_multline_show(d::Distribution, pnames) # decide whether to use one-line or multi-line format # # Criteria: if total number of values is greater than 8, or # there are params that are neither numbers, tuples, or vectors, # we use multi-line format # namevals = _NameVal[] multline = false tlen = 0 for (i, p) in enumerate(pnames) pv = getfield(d, p) if !(isa(pv, Number) || isa(pv, NTuple) || isa(pv, AbstractVector)) multline = true else tlen += length(pv) end push!(namevals, (p, pv)) end if tlen > 8 multline = true end return (multline, namevals) end
function _use_multline_show(d::Distribution, pnames) # decide whether to use one-line or multi-line format # # Criteria: if total number of values is greater than 8, or # there are params that are neither numbers, tuples, or vectors, # we use multi-line format # namevals = _NameVal[] multline = false tlen = 0 for (i, p) in enumerate(pnames) pv = getfield(d, p) if !(isa(pv, Number) || isa(pv, NTuple) || isa(pv, AbstractVector)) multline = true else tlen += length(pv) end push!(namevals, (p, pv)) end if tlen > 8 multline = true end return (multline, namevals) end
[ 25, 48 ]
function _use_multline_show(d::Distribution, pnames) # decide whether to use one-line or multi-line format # # Criteria: if total number of values is greater than 8, or # there are params that are neither numbers, tuples, or vectors, # we use multi-line format # namevals = _NameVal[] multline = false tlen = 0 for (i, p) in enumerate(pnames) pv = getfield(d, p) if !(isa(pv, Number) || isa(pv, NTuple) || isa(pv, AbstractVector)) multline = true else tlen += length(pv) end push!(namevals, (p, pv)) end if tlen > 8 multline = true end return (multline, namevals) end
function _use_multline_show(d::Distribution, pnames) # decide whether to use one-line or multi-line format # # Criteria: if total number of values is greater than 8, or # there are params that are neither numbers, tuples, or vectors, # we use multi-line format # namevals = _NameVal[] multline = false tlen = 0 for (i, p) in enumerate(pnames) pv = getfield(d, p) if !(isa(pv, Number) || isa(pv, NTuple) || isa(pv, AbstractVector)) multline = true else tlen += length(pv) end push!(namevals, (p, pv)) end if tlen > 8 multline = true end return (multline, namevals) end
_use_multline_show
25
48
src/show.jl
#FILE: Distributions.jl/src/Distributions.jl ##CHUNK 1 modes, # mode(s) of distribution as vector moment, # moments of distribution nsamples, # get the number of samples contained in an array ncategories, # the number of categories in a Categorical distribution ncomponents, # the number of components in a mixture model ntrials, # the number of trials being performed in the experiment params, # get the tuple of parameters params!, # provide storage space to calculate the tuple of parameters for a multivariate distribution like mvlognormal partype, # returns a type large enough to hold all of a distribution's parameters' element types pdf, # probability density function (ContinuousDistribution) pdfsquaredL2norm, # squared L2 norm of the probability density function probs, # Get the vector of probabilities probval, # The pdf/pmf value for a uniform distribution product_distribution, # product of univariate distributions quantile, # inverse of cdf (defined for p in (0,1)) qqbuild, # build a paired quantiles data structure for qqplots rate, # get the rate parameter sampler, # create a Sampler object for efficient samples scale, # get the scale parameter scale!, # provide storage for the scale parameter (used in multivariate distribution mvlognormal) #FILE: Distributions.jl/test/matrixvariates.jl ##CHUNK 1 # ============================================================================= # 6. main method # ============================================================================= function test_matrixvariate(dist::Type{<:MatrixDistribution}, n::Integer, p::Integer, M::Integer) test_distr(dist, n, p, M) test_against_univariate(dist) test_against_multivariate(dist, n, p) test_against_stan(dist) test_special(dist) nothing end # ============================================================================= # 7. run unit tests for matrix-variate distributions # ============================================================================= ##CHUNK 2 matrixvariates = [(MatrixNormal, 2, 4, 10^5), (Wishart, 2, 2, 10^6), (InverseWishart, 2, 2, 10^6), (MatrixTDist, 2, 4, 10^5), (MatrixBeta, 3, 3, 10^5), (MatrixFDist, 3, 3, 10^5), (LKJ, 3, 3, 10^5)] for distribution in matrixvariates dist, n, p, M = distribution println(" testing $(dist)") @testset "$(dist)" begin test_matrixvariate(dist, n, p, M) end end end #FILE: Distributions.jl/src/genericfit.jl ##CHUNK 1 # generic functions for distribution fitting function suffstats(dt::Type{D}, xs...) where D<:Distribution argtypes = tuple(D, map(typeof, xs)...) error("suffstats is not implemented for $argtypes.") end """ fit_mle(D, x) Fit a distribution of type `D` to a given data set `x`. - For univariate distribution, x can be an array of arbitrary size. - For multivariate distribution, x should be a matrix, where each column is a sample. """ fit_mle(D, x) """ fit_mle(D, x, w) #FILE: Distributions.jl/src/matrixvariates.jl ##CHUNK 1 Compute the 4-dimensional array whose `(i, j, k, l)` element is `cov(X[i,j], X[k, l])`. """ function cov(d::MatrixDistribution, ::Val{false}) n, p = size(d) [cov(d, i, j, k, l) for i in 1:n, j in 1:p, k in 1:n, l in 1:p] end # pdf & logpdf # TODO: Remove or restrict - this causes many ambiguity errors... _logpdf(d::MatrixDistribution, X::AbstractMatrix{<:Real}) = logkernel(d, X) + d.logc0 # for testing is_univariate(d::MatrixDistribution) = size(d) == (1, 1) check_univariate(d::MatrixDistribution) = is_univariate(d) || throw(ArgumentError("not 1 x 1")) ##### Specific distributions ##### for fname in ["wishart.jl", "inversewishart.jl", "matrixnormal.jl", #CURRENT FILE: Distributions.jl/src/show.jl ##CHUNK 1 end const _NameVal = Tuple{Symbol,Any} function _use_multline_show(d::Distribution) _use_multline_show(d, fieldnames(typeof(d))) end function show_oneline(io::IO, d::Distribution, namevals) print(io, distrname(d)) np = length(namevals) print(io, '(') for (i, nv) in enumerate(namevals) (p, pv) = nv print(io, p) print(io, '=') show(io, pv) if i < np print(io, ", ") ##CHUNK 2 print(io, distrname(d)) np = length(namevals) print(io, '(') for (i, nv) in enumerate(namevals) (p, pv) = nv print(io, p) print(io, '=') show(io, pv) if i < np print(io, ", ") end end print(io, ')') end function show_multline(io::IO, d::Distribution, namevals; newline=true) print(io, distrname(d)) println(io, "(") for (p, pv) in namevals print(io, p) ##CHUNK 3 # the name of a distribution # # Generally, this should be just the type name, e.g. Normal. # Under certain circumstances, one may want to specialize # this function to provide a name that is easier to read, # especially when the type is parametric. # distrname(d::Distribution) = string(typeof(d)) show(io::IO, d::Distribution) = show(io, d, fieldnames(typeof(d))) # For some distributions, the fields may contain internal details, # which we don't want to show, this function allows one to # specify which fields to show. # function show(io::IO, d::Distribution, pnames) uml, namevals = _use_multline_show(d, pnames) uml ? show_multline(io, d, namevals) : show_oneline(io, d, namevals) ##CHUNK 4 show(io::IO, d::Distribution) = show(io, d, fieldnames(typeof(d))) # For some distributions, the fields may contain internal details, # which we don't want to show, this function allows one to # specify which fields to show. # function show(io::IO, d::Distribution, pnames) uml, namevals = _use_multline_show(d, pnames) uml ? show_multline(io, d, namevals) : show_oneline(io, d, namevals) end const _NameVal = Tuple{Symbol,Any} function _use_multline_show(d::Distribution) _use_multline_show(d, fieldnames(typeof(d))) end function show_oneline(io::IO, d::Distribution, namevals) ##CHUNK 5 end end print(io, ')') end function show_multline(io::IO, d::Distribution, namevals; newline=true) print(io, distrname(d)) println(io, "(") for (p, pv) in namevals print(io, p) print(io, ": ") println(io, pv) end newline ? println(io, ")") : print(io, ")") end
168
178
Distributions.jl
92
function cdf(d::Truncated, x::Real) result = clamp((cdf(d.untruncated, x) - d.lcdf) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x < d.lower zero(result) elseif d.upper !== nothing && x >= d.upper one(result) else result end end
function cdf(d::Truncated, x::Real) result = clamp((cdf(d.untruncated, x) - d.lcdf) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x < d.lower zero(result) elseif d.upper !== nothing && x >= d.upper one(result) else result end end
[ 168, 178 ]
function cdf(d::Truncated, x::Real) result = clamp((cdf(d.untruncated, x) - d.lcdf) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x < d.lower zero(result) elseif d.upper !== nothing && x >= d.upper one(result) else result end end
function cdf(d::Truncated, x::Real) result = clamp((cdf(d.untruncated, x) - d.lcdf) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x < d.lower zero(result) elseif d.upper !== nothing && x >= d.upper one(result) else result end end
cdf
168
178
src/truncate.jl
#FILE: Distributions.jl/src/censored.jl ##CHUNK 1 elseif upper === nothing || x < upper result else one(result) end end function logcdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = logcdf(d.uncensored, x) return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper === nothing || x < d.upper result else zero(result) end end ##CHUNK 2 result = logcdf(d.uncensored, x) return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper === nothing || x < d.upper result else zero(result) end end function ccdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = ccdf(d.uncensored, x) return if lower !== nothing && x < lower one(result) elseif upper === nothing || x < upper result else zero(result) ##CHUNK 3 end end function logccdf(d::Censored{<:Any,<:Any,T}, x::Real) where {T} lower = d.lower upper = d.upper result = logccdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper result else oftype(result, -Inf) end end #### Sampling rand(rng::AbstractRNG, d::Censored) = _clamp(rand(rng, d.uncensored), d.lower, d.upper) ##CHUNK 4 lower = d.lower upper = d.upper px = float(pdf(d0, x)) return if _in_open_interval(x, lower, upper) px elseif x == lower x == upper ? one(px) : oftype(px, cdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(px, ccdf(d0, x) + px) else oftype(px, ccdf(d0, x)) end else # not in support zero(px) end end function logpdf(d::Censored, x::Real) d0 = d.uncensored #FILE: Distributions.jl/test/censored.jl ##CHUNK 1 truncated( d0, d.lower === nothing ? -Inf : floor(d.lower) + 1, d.upper === nothing ? Inf : ceil(d.upper) - 1, ) elseif d0 isa ContinuousDistribution truncated( d0, d.lower === nothing ? -Inf : nextfloat(float(d.lower)), d.upper === nothing ? Inf : prevfloat(float(d.upper)), ) else error("truncation to open interval not implemented for $d0") end prob_lower = d.lower === nothing ? 0 : cdf(d0, d.lower) prob_upper = if d.upper === nothing 0 elseif d0 isa ContinuousDistribution ccdf(d0, d.upper) else #CURRENT FILE: Distributions.jl/src/truncate.jl ##CHUNK 1 result = clamp((d.ucdf - cdf(d.untruncated, x)) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x <= d.lower one(result) elseif d.upper !== nothing && x > d.upper zero(result) else result end end function logccdf(d::Truncated, x::Real) result = logsubexp(logccdf(d.untruncated, x), log1p(-d.ucdf)) - d.logtp return if d.lower !== nothing && x <= d.lower zero(result) elseif d.upper !== nothing && x > d.upper oftype(result, -Inf) else result end ##CHUNK 2 return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper !== nothing && x >= d.upper zero(result) else result end end function ccdf(d::Truncated, x::Real) result = clamp((d.ucdf - cdf(d.untruncated, x)) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x <= d.lower one(result) elseif d.upper !== nothing && x > d.upper zero(result) else result end end ##CHUNK 3 end function logpdf(d::Truncated, x::Real) result = logpdf(d.untruncated, x) - d.logtp return _in_closed_interval(x, d.lower, d.upper) ? result : oftype(result, -Inf) end function logcdf(d::Truncated, x::Real) result = logsubexp(logcdf(d.untruncated, x), d.loglcdf) - d.logtp return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper !== nothing && x >= d.upper zero(result) else result end end function ccdf(d::Truncated, x::Real) ##CHUNK 4 function quantile(d::Truncated, p::Real) x = quantile(d.untruncated, d.lcdf + p * d.tp) min_x, max_x = extrema(d) return clamp(x, oftype(x, min_x), oftype(x, max_x)) end function pdf(d::Truncated, x::Real) result = pdf(d.untruncated, x) / d.tp return _in_closed_interval(x, d.lower, d.upper) ? result : zero(result) end function logpdf(d::Truncated, x::Real) result = logpdf(d.untruncated, x) - d.logtp return _in_closed_interval(x, d.lower, d.upper) ? result : oftype(result, -Inf) end function logcdf(d::Truncated, x::Real) result = logsubexp(logcdf(d.untruncated, x), d.loglcdf) - d.logtp ##CHUNK 5 function logccdf(d::Truncated, x::Real) result = logsubexp(logccdf(d.untruncated, x), log1p(-d.ucdf)) - d.logtp return if d.lower !== nothing && x <= d.lower zero(result) elseif d.upper !== nothing && x > d.upper oftype(result, -Inf) else result end end ## random number generation function rand(rng::AbstractRNG, d::Truncated) d0 = d.untruncated tp = d.tp lower = d.lower upper = d.upper if tp > 0.25
191
201
Distributions.jl
93
function ccdf(d::Truncated, x::Real) result = clamp((d.ucdf - cdf(d.untruncated, x)) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x <= d.lower one(result) elseif d.upper !== nothing && x > d.upper zero(result) else result end end
function ccdf(d::Truncated, x::Real) result = clamp((d.ucdf - cdf(d.untruncated, x)) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x <= d.lower one(result) elseif d.upper !== nothing && x > d.upper zero(result) else result end end
[ 191, 201 ]
function ccdf(d::Truncated, x::Real) result = clamp((d.ucdf - cdf(d.untruncated, x)) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x <= d.lower one(result) elseif d.upper !== nothing && x > d.upper zero(result) else result end end
function ccdf(d::Truncated, x::Real) result = clamp((d.ucdf - cdf(d.untruncated, x)) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x <= d.lower one(result) elseif d.upper !== nothing && x > d.upper zero(result) else result end end
ccdf
191
201
src/truncate.jl
#FILE: Distributions.jl/src/censored.jl ##CHUNK 1 elseif upper === nothing || x < upper result else one(result) end end function logcdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = logcdf(d.uncensored, x) return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper === nothing || x < d.upper result else zero(result) end end ##CHUNK 2 lower = d.lower upper = d.upper px = float(pdf(d0, x)) return if _in_open_interval(x, lower, upper) px elseif x == lower x == upper ? one(px) : oftype(px, cdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(px, ccdf(d0, x) + px) else oftype(px, ccdf(d0, x)) end else # not in support zero(px) end end function logpdf(d::Censored, x::Real) d0 = d.uncensored ##CHUNK 3 function ccdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = ccdf(d.uncensored, x) return if lower !== nothing && x < lower one(result) elseif upper === nothing || x < upper result else zero(result) end end function logccdf(d::Censored{<:Any,<:Any,T}, x::Real) where {T} lower = d.lower upper = d.upper result = logccdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper ##CHUNK 4 result = logcdf(d.uncensored, x) return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper === nothing || x < d.upper result else zero(result) end end function ccdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = ccdf(d.uncensored, x) return if lower !== nothing && x < lower one(result) elseif upper === nothing || x < upper result else zero(result) #FILE: Distributions.jl/test/censored.jl ##CHUNK 1 truncated( d0, d.lower === nothing ? -Inf : floor(d.lower) + 1, d.upper === nothing ? Inf : ceil(d.upper) - 1, ) elseif d0 isa ContinuousDistribution truncated( d0, d.lower === nothing ? -Inf : nextfloat(float(d.lower)), d.upper === nothing ? Inf : prevfloat(float(d.upper)), ) else error("truncation to open interval not implemented for $d0") end prob_lower = d.lower === nothing ? 0 : cdf(d0, d.lower) prob_upper = if d.upper === nothing 0 elseif d0 isa ContinuousDistribution ccdf(d0, d.upper) else #CURRENT FILE: Distributions.jl/src/truncate.jl ##CHUNK 1 end function logpdf(d::Truncated, x::Real) result = logpdf(d.untruncated, x) - d.logtp return _in_closed_interval(x, d.lower, d.upper) ? result : oftype(result, -Inf) end function cdf(d::Truncated, x::Real) result = clamp((cdf(d.untruncated, x) - d.lcdf) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x < d.lower zero(result) elseif d.upper !== nothing && x >= d.upper one(result) else result end end function logcdf(d::Truncated, x::Real) ##CHUNK 2 function quantile(d::Truncated, p::Real) x = quantile(d.untruncated, d.lcdf + p * d.tp) min_x, max_x = extrema(d) return clamp(x, oftype(x, min_x), oftype(x, max_x)) end function pdf(d::Truncated, x::Real) result = pdf(d.untruncated, x) / d.tp return _in_closed_interval(x, d.lower, d.upper) ? result : zero(result) end function logpdf(d::Truncated, x::Real) result = logpdf(d.untruncated, x) - d.logtp return _in_closed_interval(x, d.lower, d.upper) ? result : oftype(result, -Inf) end function cdf(d::Truncated, x::Real) result = clamp((cdf(d.untruncated, x) - d.lcdf) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` ##CHUNK 3 return if d.lower !== nothing && x < d.lower zero(result) elseif d.upper !== nothing && x >= d.upper one(result) else result end end function logcdf(d::Truncated, x::Real) result = logsubexp(logcdf(d.untruncated, x), d.loglcdf) - d.logtp return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper !== nothing && x >= d.upper zero(result) else result end end ##CHUNK 4 result = logsubexp(logcdf(d.untruncated, x), d.loglcdf) - d.logtp return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper !== nothing && x >= d.upper zero(result) else result end end function logccdf(d::Truncated, x::Real) result = logsubexp(logccdf(d.untruncated, x), log1p(-d.ucdf)) - d.logtp return if d.lower !== nothing && x <= d.lower zero(result) elseif d.upper !== nothing && x > d.upper oftype(result, -Inf) else result end ##CHUNK 5 function logccdf(d::Truncated, x::Real) result = logsubexp(logccdf(d.untruncated, x), log1p(-d.ucdf)) - d.logtp return if d.lower !== nothing && x <= d.lower zero(result) elseif d.upper !== nothing && x > d.upper oftype(result, -Inf) else result end end ## random number generation function rand(rng::AbstractRNG, d::Truncated) d0 = d.untruncated tp = d.tp lower = d.lower upper = d.upper if tp > 0.25
426
463
Distributions.jl
94
function _pdf_fill_outside!(r::AbstractArray, d::DiscreteUnivariateDistribution, X::UnitRange) vl = vfirst = first(X) vr = vlast = last(X) n = vlast - vfirst + 1 if islowerbounded(d) lb = minimum(d) if vl < lb vl = lb end end if isupperbounded(d) ub = maximum(d) if vr > ub vr = ub end end # fill left part if vl > vfirst for i = 1:(vl - vfirst) r[i] = 0.0 end end # fill central part: with non-zero pdf fm1 = vfirst - 1 for v = vl:vr r[v - fm1] = pdf(d, v) end # fill right part if vr < vlast for i = (vr-vfirst+2):n r[i] = 0.0 end end return vl, vr, vfirst, vlast end
function _pdf_fill_outside!(r::AbstractArray, d::DiscreteUnivariateDistribution, X::UnitRange) vl = vfirst = first(X) vr = vlast = last(X) n = vlast - vfirst + 1 if islowerbounded(d) lb = minimum(d) if vl < lb vl = lb end end if isupperbounded(d) ub = maximum(d) if vr > ub vr = ub end end # fill left part if vl > vfirst for i = 1:(vl - vfirst) r[i] = 0.0 end end # fill central part: with non-zero pdf fm1 = vfirst - 1 for v = vl:vr r[v - fm1] = pdf(d, v) end # fill right part if vr < vlast for i = (vr-vfirst+2):n r[i] = 0.0 end end return vl, vr, vfirst, vlast end
[ 426, 463 ]
function _pdf_fill_outside!(r::AbstractArray, d::DiscreteUnivariateDistribution, X::UnitRange) vl = vfirst = first(X) vr = vlast = last(X) n = vlast - vfirst + 1 if islowerbounded(d) lb = minimum(d) if vl < lb vl = lb end end if isupperbounded(d) ub = maximum(d) if vr > ub vr = ub end end # fill left part if vl > vfirst for i = 1:(vl - vfirst) r[i] = 0.0 end end # fill central part: with non-zero pdf fm1 = vfirst - 1 for v = vl:vr r[v - fm1] = pdf(d, v) end # fill right part if vr < vlast for i = (vr-vfirst+2):n r[i] = 0.0 end end return vl, vr, vfirst, vlast end
function _pdf_fill_outside!(r::AbstractArray, d::DiscreteUnivariateDistribution, X::UnitRange) vl = vfirst = first(X) vr = vlast = last(X) n = vlast - vfirst + 1 if islowerbounded(d) lb = minimum(d) if vl < lb vl = lb end end if isupperbounded(d) ub = maximum(d) if vr > ub vr = ub end end # fill left part if vl > vfirst for i = 1:(vl - vfirst) r[i] = 0.0 end end # fill central part: with non-zero pdf fm1 = vfirst - 1 for v = vl:vr r[v - fm1] = pdf(d, v) end # fill right part if vr < vlast for i = (vr-vfirst+2):n r[i] = 0.0 end end return vl, vr, vfirst, vlast end
_pdf_fill_outside!
426
463
src/univariates.jl
#FILE: Distributions.jl/test/testutils.jl ##CHUNK 1 elseif islowerbounded(d) @test map(Base.Fix1(pdf, d), rmin-2:rmax) ≈ vcat(0.0, 0.0, p0) end end function test_evaluation(d::DiscreteUnivariateDistribution, vs::AbstractVector, testquan::Bool=true) nv = length(vs) p = Vector{Float64}(undef, nv) c = Vector{Float64}(undef, nv) cc = Vector{Float64}(undef, nv) lp = Vector{Float64}(undef, nv) lc = Vector{Float64}(undef, nv) lcc = Vector{Float64}(undef, nv) ci = 0. for (i, v) in enumerate(vs) p[i] = pdf(d, v) c[i] = cdf(d, v) cc[i] = ccdf(d, v) ##CHUNK 2 cc = Vector{Float64}(undef, nv) lp = Vector{Float64}(undef, nv) lc = Vector{Float64}(undef, nv) lcc = Vector{Float64}(undef, nv) ci = 0. for (i, v) in enumerate(vs) p[i] = pdf(d, v) c[i] = cdf(d, v) cc[i] = ccdf(d, v) lp[i] = logpdf(d, v) lc[i] = logcdf(d, v) lcc[i] = logccdf(d, v) @assert p[i] >= 0.0 @assert (i == 1 || c[i] >= c[i-1]) ci += p[i] @test ci ≈ c[i] @test isapprox(c[i] + cc[i], 1.0 , atol=1.0e-12) ##CHUNK 3 p = Vector{Float64}(undef, nv) c = Vector{Float64}(undef, nv) cc = Vector{Float64}(undef, nv) lp = Vector{Float64}(undef, nv) lc = Vector{Float64}(undef, nv) lcc = Vector{Float64}(undef, nv) for (i, v) in enumerate(vs) if !isa(d, StudentizedRange) p[i] = pdf(d, v) lp[i] = logpdf(d, v) @assert p[i] >= 0.0 end c[i] = cdf(d, v) cc[i] = ccdf(d, v) lc[i] = logcdf(d, v) lcc[i] = logccdf(d, v) @assert (i == 1 || c[i] >= c[i-1]) #FILE: Distributions.jl/src/multivariate/jointorderstatistics.jl ##CHUNK 1 issorted(x) && return lp return oftype(lp, -Inf) end i = first(ranks) xᵢ = first(x) if i > 1 # _marginalize_range(d.dist, 0, i, -Inf, xᵢ, T) lp += (i - 1) * logcdf(d.dist, xᵢ) - loggamma(T(i)) end for (j, xⱼ) in Iterators.drop(zip(ranks, x), 1) xⱼ < xᵢ && return oftype(lp, -Inf) lp += _marginalize_range(d.dist, i, j, xᵢ, xⱼ, T) i = j xᵢ = xⱼ end if i < n # _marginalize_range(d.dist, i, n + 1, xᵢ, Inf, T) lp += (n - i) * logccdf(d.dist, xᵢ) - loggamma(T(n - i + 1)) end return lp end #FILE: Distributions.jl/src/samplers/binomial.jl ##CHUNK 1 # compute probability vector of a Binomial distribution function binompvec(n::Int, p::Float64) pv = Vector{Float64}(undef, n+1) if p == 0.0 fill!(pv, 0.0) pv[1] = 1.0 elseif p == 1.0 fill!(pv, 0.0) pv[n+1] = 1.0 else q = 1.0 - p a = p / q @inbounds pv[1] = pk = q ^ n for k = 1:n @inbounds pv[k+1] = (pk *= ((n - k + 1) / k) * a) end end return pv end #FILE: Distributions.jl/src/censored.jl ##CHUNK 1 lower = d.lower upper = d.upper logpx = logpdf(d0, first(x)) log_prob_lower = lower === nothing ? zero(logpx) : oftype(logpx, logcdf(d0, lower)) log_prob_upper = upper === nothing ? zero(logpx) : oftype(logpx, _logccdf_inclusive(d0, upper)) logzero = oftype(logpx, -Inf) return sum(x) do xi _in_open_interval(xi, lower, upper) && return logpdf(d0, xi) xi == lower && return log_prob_lower xi == upper && return log_prob_upper return logzero end end function cdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = cdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) #CURRENT FILE: Distributions.jl/src/univariates.jl ##CHUNK 1 fm1 = vfirst - 1 for v = vl:vr r[v - fm1] = pdf(d, v) end return r end abstract type RecursiveProbabilityEvaluator end function _pdf!(r::AbstractArray, d::DiscreteUnivariateDistribution, X::UnitRange, rpe::RecursiveProbabilityEvaluator) vl,vr, vfirst, vlast = _pdf_fill_outside!(r, d, X) # fill central part: with non-zero pdf if vl <= vr fm1 = vfirst - 1 r[vl - fm1] = pv = pdf(d, vl) for v = (vl+1):vr r[v - fm1] = pv = nextpdf(rpe, pv, v) end ##CHUNK 2 # gradlogpdf gradlogpdf(d::ContinuousUnivariateDistribution, x::Real) = throw(MethodError(gradlogpdf, (d, x))) function _pdf!(r::AbstractArray{<:Real}, d::DiscreteUnivariateDistribution, X::UnitRange) vl,vr, vfirst, vlast = _pdf_fill_outside!(r, d, X) # fill central part: with non-zero pdf fm1 = vfirst - 1 for v = vl:vr r[v - fm1] = pdf(d, v) end return r end abstract type RecursiveProbabilityEvaluator end ##CHUNK 3 function _pdf!(r::AbstractArray, d::DiscreteUnivariateDistribution, X::UnitRange, rpe::RecursiveProbabilityEvaluator) vl,vr, vfirst, vlast = _pdf_fill_outside!(r, d, X) # fill central part: with non-zero pdf if vl <= vr fm1 = vfirst - 1 r[vl - fm1] = pv = pdf(d, vl) for v = (vl+1):vr r[v - fm1] = pv = nextpdf(rpe, pv, v) end end return r end ### special definitions for distributions with integer-valued support function cdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) ##CHUNK 4 else c = 1 - sum(Base.Fix1(pdf, d), (min(x + 1, maximum_d)):maximum_d) x >= maximum_d ? one(c) : c end return result end function integerunitrange_ccdf(d::DiscreteUnivariateDistribution, x::Integer) minimum_d, maximum_d = extrema(d) isfinite(minimum_d) || isfinite(maximum_d) || error("support is unbounded") result = if isfinite(minimum_d) && !(isfinite(maximum_d) && x >= div(minimum_d + maximum_d, 2)) c = 1 - sum(Base.Fix1(pdf, d), minimum_d:(max(x, minimum_d))) x < minimum_d ? one(c) : c else c = sum(Base.Fix1(pdf, d), (min(x + 1, maximum_d)):maximum_d) x >= maximum_d ? zero(c) : c end
479
492
Distributions.jl
95
function _pdf!(r::AbstractArray, d::DiscreteUnivariateDistribution, X::UnitRange, rpe::RecursiveProbabilityEvaluator) vl,vr, vfirst, vlast = _pdf_fill_outside!(r, d, X) # fill central part: with non-zero pdf if vl <= vr fm1 = vfirst - 1 r[vl - fm1] = pv = pdf(d, vl) for v = (vl+1):vr r[v - fm1] = pv = nextpdf(rpe, pv, v) end end return r end
function _pdf!(r::AbstractArray, d::DiscreteUnivariateDistribution, X::UnitRange, rpe::RecursiveProbabilityEvaluator) vl,vr, vfirst, vlast = _pdf_fill_outside!(r, d, X) # fill central part: with non-zero pdf if vl <= vr fm1 = vfirst - 1 r[vl - fm1] = pv = pdf(d, vl) for v = (vl+1):vr r[v - fm1] = pv = nextpdf(rpe, pv, v) end end return r end
[ 479, 492 ]
function _pdf!(r::AbstractArray, d::DiscreteUnivariateDistribution, X::UnitRange, rpe::RecursiveProbabilityEvaluator) vl,vr, vfirst, vlast = _pdf_fill_outside!(r, d, X) # fill central part: with non-zero pdf if vl <= vr fm1 = vfirst - 1 r[vl - fm1] = pv = pdf(d, vl) for v = (vl+1):vr r[v - fm1] = pv = nextpdf(rpe, pv, v) end end return r end
function _pdf!(r::AbstractArray, d::DiscreteUnivariateDistribution, X::UnitRange, rpe::RecursiveProbabilityEvaluator) vl,vr, vfirst, vlast = _pdf_fill_outside!(r, d, X) # fill central part: with non-zero pdf if vl <= vr fm1 = vfirst - 1 r[vl - fm1] = pv = pdf(d, vl) for v = (vl+1):vr r[v - fm1] = pv = nextpdf(rpe, pv, v) end end return r end
_pdf!
479
492
src/univariates.jl
#FILE: Distributions.jl/src/univariate/discrete/categorical.jl ##CHUNK 1 function _pdf!(r::AbstractArray{<:Real}, d::Categorical{T}, rgn::UnitRange) where {T<:Real} vfirst = round(Int, first(rgn)) vlast = round(Int, last(rgn)) vl = max(vfirst, 1) vr = min(vlast, ncategories(d)) p = probs(d) if vl > vfirst for i = 1:(vl - vfirst) r[i] = zero(T) end end fm1 = vfirst - 1 for v = vl:vr r[v - fm1] = p[v] end if vr < vlast for i = (vr - vfirst + 2):length(rgn) r[i] = zero(T) end ##CHUNK 2 cdf(d::Categorical, x::Real) = cdf_int(d, x) ccdf(d::Categorical, x::Real) = ccdf_int(d, x) cdf(d::Categorical, x::Int) = integerunitrange_cdf(d, x) ccdf(d::Categorical, x::Int) = integerunitrange_ccdf(d, x) function pdf(d::Categorical, x::Real) ps = probs(d) return insupport(d, x) ? ps[round(Int, x)] : zero(eltype(ps)) end function _pdf!(r::AbstractArray{<:Real}, d::Categorical{T}, rgn::UnitRange) where {T<:Real} vfirst = round(Int, first(rgn)) vlast = round(Int, last(rgn)) vl = max(vfirst, 1) vr = min(vlast, ncategories(d)) p = probs(d) if vl > vfirst for i = 1:(vl - vfirst) r[i] = zero(T) #FILE: Distributions.jl/src/mixtures/mixturemodel.jl ##CHUNK 1 end function _mixpdf!(r::AbstractArray, d::AbstractMixtureModel, x) K = ncomponents(d) p = probs(d) fill!(r, 0.0) t = Array{eltype(p)}(undef, size(r)) @inbounds for i in eachindex(p) pi = p[i] if pi > 0.0 if d isa UnivariateMixture t .= Base.Fix1(pdf, component(d, i)).(x) else pdf!(t, component(d, i), x) end axpy!(pi, t, r) end end return r end ##CHUNK 2 function cdf(d::UnivariateMixture, x::Real) p = probs(d) r = sum(pi * cdf(component(d, i), x) for (i, pi) in enumerate(p) if !iszero(pi)) return r end function _mixpdf1(d::AbstractMixtureModel, x) p = probs(d) return sum(pi * pdf(component(d, i), x) for (i, pi) in enumerate(p) if !iszero(pi)) end function _mixpdf!(r::AbstractArray, d::AbstractMixtureModel, x) K = ncomponents(d) p = probs(d) fill!(r, 0.0) t = Array{eltype(p)}(undef, size(r)) @inbounds for i in eachindex(p) pi = p[i] if pi > 0.0 #FILE: Distributions.jl/src/quantilealgs.jl ##CHUNK 1 return T(minimum(d)) elseif p == 1 return T(maximum(d)) else return T(NaN) end end function cquantile_newton(d::ContinuousUnivariateDistribution, p::Real, xs::Real=mode(d), tol::Real=1e-12) x = xs + (ccdf(d, xs)-p) / pdf(d, xs) T = typeof(x) if 0 < p < 1 x0 = T(xs) while abs(x-x0) > max(abs(x),abs(x0)) * tol x0 = x x = x0 + (ccdf(d, x0)-p) / pdf(d, x0) end return x elseif p == 1 return T(minimum(d)) #CURRENT FILE: Distributions.jl/src/univariates.jl ##CHUNK 1 fm1 = vfirst - 1 for v = vl:vr r[v - fm1] = pdf(d, v) end # fill right part if vr < vlast for i = (vr-vfirst+2):n r[i] = 0.0 end end return vl, vr, vfirst, vlast end function _pdf!(r::AbstractArray{<:Real}, d::DiscreteUnivariateDistribution, X::UnitRange) vl,vr, vfirst, vlast = _pdf_fill_outside!(r, d, X) # fill central part: with non-zero pdf fm1 = vfirst - 1 for v = vl:vr ##CHUNK 2 end return vl, vr, vfirst, vlast end function _pdf!(r::AbstractArray{<:Real}, d::DiscreteUnivariateDistribution, X::UnitRange) vl,vr, vfirst, vlast = _pdf_fill_outside!(r, d, X) # fill central part: with non-zero pdf fm1 = vfirst - 1 for v = vl:vr r[v - fm1] = pdf(d, v) end return r end abstract type RecursiveProbabilityEvaluator end ### special definitions for distributions with integer-valued support ##CHUNK 3 end # fill left part if vl > vfirst for i = 1:(vl - vfirst) r[i] = 0.0 end end # fill central part: with non-zero pdf fm1 = vfirst - 1 for v = vl:vr r[v - fm1] = pdf(d, v) end # fill right part if vr < vlast for i = (vr-vfirst+2):n r[i] = 0.0 end ##CHUNK 4 lb = minimum(d) if vl < lb vl = lb end end if isupperbounded(d) ub = maximum(d) if vr > ub vr = ub end end # fill left part if vl > vfirst for i = 1:(vl - vfirst) r[i] = 0.0 end end # fill central part: with non-zero pdf ##CHUNK 5 # gradlogpdf gradlogpdf(d::ContinuousUnivariateDistribution, x::Real) = throw(MethodError(gradlogpdf, (d, x))) function _pdf_fill_outside!(r::AbstractArray, d::DiscreteUnivariateDistribution, X::UnitRange) vl = vfirst = first(X) vr = vlast = last(X) n = vlast - vfirst + 1 if islowerbounded(d) lb = minimum(d) if vl < lb vl = lb end end if isupperbounded(d) ub = maximum(d) if vr > ub vr = ub end
496
510
Distributions.jl
96
function cdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(cdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 zero(c) else one(c) end end
function cdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(cdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 zero(c) else one(c) end end
[ 496, 510 ]
function cdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(cdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 zero(c) else one(c) end end
function cdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(cdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 zero(c) else one(c) end end
cdf_int
496
510
src/univariates.jl
#FILE: Distributions.jl/src/truncate.jl ##CHUNK 1 function ccdf(d::Truncated, x::Real) result = clamp((d.ucdf - cdf(d.untruncated, x)) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x <= d.lower one(result) elseif d.upper !== nothing && x > d.upper zero(result) else result end end function logccdf(d::Truncated, x::Real) result = logsubexp(logccdf(d.untruncated, x), log1p(-d.ucdf)) - d.logtp return if d.lower !== nothing && x <= d.lower zero(result) elseif d.upper !== nothing && x > d.upper oftype(result, -Inf) else result ##CHUNK 2 result = logsubexp(logcdf(d.untruncated, x), d.loglcdf) - d.logtp return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper !== nothing && x >= d.upper zero(result) else result end end function ccdf(d::Truncated, x::Real) result = clamp((d.ucdf - cdf(d.untruncated, x)) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x <= d.lower one(result) elseif d.upper !== nothing && x > d.upper zero(result) else result end #FILE: Distributions.jl/src/univariate/continuous/inversegaussian.jl ##CHUNK 1 # otherwise `NaN` is returned for `+Inf` return isinf(x) && x > 0 ? zero(z) : z end function logcdf(d::InverseGaussian, x::Real) μ, λ = params(d) y = max(x, 0) u = sqrt(λ / y) v = y / μ a = normlogcdf(u * (v - 1)) b = 2λ / μ + normlogcdf(-u * (v + 1)) z = logaddexp(a, b) # otherwise `NaN` is returned for `+Inf` return isinf(x) && x > 0 ? zero(z) : z end function logccdf(d::InverseGaussian, x::Real) μ, λ = params(d) #FILE: Distributions.jl/src/censored.jl ##CHUNK 1 result = logcdf(d.uncensored, x) return if d.lower !== nothing && x < d.lower oftype(result, -Inf) elseif d.upper === nothing || x < d.upper result else zero(result) end end function ccdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = ccdf(d.uncensored, x) return if lower !== nothing && x < lower one(result) elseif upper === nothing || x < upper result else zero(result) #CURRENT FILE: Distributions.jl/src/univariates.jl ##CHUNK 1 return r end ### special definitions for distributions with integer-valued support function ccdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(ccdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 one(c) else zero(c) end ##CHUNK 2 end function logcdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(logcdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 oftype(c, -Inf) else zero(c) end end function logccdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` ##CHUNK 3 c = float(ccdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 one(c) else zero(c) end end function logcdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(logcdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) ##CHUNK 4 oftype(c, NaN) elseif x < 0 oftype(c, -Inf) else zero(c) end end function logccdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(logccdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 zero(c) else ##CHUNK 5 isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(logccdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 zero(c) else oftype(c, -Inf) end end # implementation of the cdf for distributions whose support is a unitrange of integers # note: incorrect for discrete distributions whose support includes non-integer numbers function integerunitrange_cdf(d::DiscreteUnivariateDistribution, x::Integer) minimum_d, maximum_d = extrema(d) isfinite(minimum_d) || isfinite(maximum_d) || error("support is unbounded") ##CHUNK 6 # fill central part: with non-zero pdf if vl <= vr fm1 = vfirst - 1 r[vl - fm1] = pv = pdf(d, vl) for v = (vl+1):vr r[v - fm1] = pv = nextpdf(rpe, pv, v) end end return r end ### special definitions for distributions with integer-valued support function ccdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x)
512
526
Distributions.jl
97
function ccdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(ccdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 one(c) else zero(c) end end
function ccdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(ccdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 one(c) else zero(c) end end
[ 512, 526 ]
function ccdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(ccdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 one(c) else zero(c) end end
function ccdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(ccdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 one(c) else zero(c) end end
ccdf_int
512
526
src/univariates.jl
#FILE: Distributions.jl/src/truncate.jl ##CHUNK 1 function ccdf(d::Truncated, x::Real) result = clamp((d.ucdf - cdf(d.untruncated, x)) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x <= d.lower one(result) elseif d.upper !== nothing && x > d.upper zero(result) else result end end function logccdf(d::Truncated, x::Real) result = logsubexp(logccdf(d.untruncated, x), log1p(-d.ucdf)) - d.logtp return if d.lower !== nothing && x <= d.lower zero(result) elseif d.upper !== nothing && x > d.upper oftype(result, -Inf) else result #FILE: Distributions.jl/src/univariate/continuous/inversegaussian.jl ##CHUNK 1 # otherwise `NaN` is returned for `+Inf` return isinf(x) && x > 0 ? zero(z) : z end function logcdf(d::InverseGaussian, x::Real) μ, λ = params(d) y = max(x, 0) u = sqrt(λ / y) v = y / μ a = normlogcdf(u * (v - 1)) b = 2λ / μ + normlogcdf(-u * (v + 1)) z = logaddexp(a, b) # otherwise `NaN` is returned for `+Inf` return isinf(x) && x > 0 ? zero(z) : z end function logccdf(d::InverseGaussian, x::Real) μ, λ = params(d) #FILE: Distributions.jl/src/censored.jl ##CHUNK 1 function ccdf(d::Censored, x::Real) lower = d.lower upper = d.upper result = ccdf(d.uncensored, x) return if lower !== nothing && x < lower one(result) elseif upper === nothing || x < upper result else zero(result) end end function logccdf(d::Censored{<:Any,<:Any,T}, x::Real) where {T} lower = d.lower upper = d.upper result = logccdf(d.uncensored, x) return if lower !== nothing && x < lower zero(result) elseif upper === nothing || x < upper ##CHUNK 2 lower = d.lower upper = d.upper px = float(pdf(d0, x)) return if _in_open_interval(x, lower, upper) px elseif x == lower x == upper ? one(px) : oftype(px, cdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(px, ccdf(d0, x) + px) else oftype(px, ccdf(d0, x)) end else # not in support zero(px) end end function logpdf(d::Censored, x::Real) d0 = d.uncensored #CURRENT FILE: Distributions.jl/src/univariates.jl ##CHUNK 1 function logcdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(logcdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 oftype(c, -Inf) else zero(c) end end function logccdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` ##CHUNK 2 return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 zero(c) else one(c) end end function logcdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(logcdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) ##CHUNK 3 return r end ### special definitions for distributions with integer-valued support function cdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(cdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 zero(c) else one(c) end end ##CHUNK 4 oftype(c, NaN) elseif x < 0 oftype(c, -Inf) else zero(c) end end function logccdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(logccdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 zero(c) else ##CHUNK 5 isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(logccdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 zero(c) else oftype(c, -Inf) end end # implementation of the cdf for distributions whose support is a unitrange of integers # note: incorrect for discrete distributions whose support includes non-integer numbers function integerunitrange_cdf(d::DiscreteUnivariateDistribution, x::Integer) minimum_d, maximum_d = extrema(d) isfinite(minimum_d) || isfinite(maximum_d) || error("support is unbounded") ##CHUNK 6 # fill central part: with non-zero pdf if vl <= vr fm1 = vfirst - 1 r[vl - fm1] = pv = pdf(d, vl) for v = (vl+1):vr r[v - fm1] = pv = nextpdf(rpe, pv, v) end end return r end ### special definitions for distributions with integer-valued support function cdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(cdf(d, floor(Int, _x)))
528
542
Distributions.jl
98
function logcdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(logcdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 oftype(c, -Inf) else zero(c) end end
function logcdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(logcdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 oftype(c, -Inf) else zero(c) end end
[ 528, 542 ]
function logcdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(logcdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 oftype(c, -Inf) else zero(c) end end
function logcdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(logcdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 oftype(c, -Inf) else zero(c) end end
logcdf_int
528
542
src/univariates.jl
#FILE: Distributions.jl/src/truncate.jl ##CHUNK 1 function ccdf(d::Truncated, x::Real) result = clamp((d.ucdf - cdf(d.untruncated, x)) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x <= d.lower one(result) elseif d.upper !== nothing && x > d.upper zero(result) else result end end function logccdf(d::Truncated, x::Real) result = logsubexp(logccdf(d.untruncated, x), log1p(-d.ucdf)) - d.logtp return if d.lower !== nothing && x <= d.lower zero(result) elseif d.upper !== nothing && x > d.upper oftype(result, -Inf) else result #FILE: Distributions.jl/src/univariate/continuous/inversegaussian.jl ##CHUNK 1 # otherwise `NaN` is returned for `+Inf` return isinf(x) && x > 0 ? zero(z) : z end function logcdf(d::InverseGaussian, x::Real) μ, λ = params(d) y = max(x, 0) u = sqrt(λ / y) v = y / μ a = normlogcdf(u * (v - 1)) b = 2λ / μ + normlogcdf(-u * (v + 1)) z = logaddexp(a, b) # otherwise `NaN` is returned for `+Inf` return isinf(x) && x > 0 ? zero(z) : z end function logccdf(d::InverseGaussian, x::Real) μ, λ = params(d) #FILE: Distributions.jl/test/univariate_bounds.jl ##CHUNK 1 @test isnan(logccdf(d, NaN)) @test iszero(pdf(d, lb)) @test iszero(pdf(d, ub)) lb_lpdf = logpdf(d, lb) @test isinf(lb_lpdf) && lb_lpdf < 0 ub_lpdf = logpdf(d, ub) @test isinf(ub_lpdf) && ub_lpdf < 0 @test logpdf(d, -Inf) == -Inf @test logpdf(d, Inf) == -Inf end #FILE: Distributions.jl/src/censored.jl ##CHUNK 1 lower = d.lower upper = d.upper logpx = logpdf(d0, x) return if _in_open_interval(x, lower, upper) logpx elseif x == lower x == upper ? zero(logpx) : oftype(logpx, logcdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(logpx, logaddexp(logccdf(d0, x), logpx)) else oftype(logpx, logccdf(d0, x)) end else # not in support oftype(logpx, -Inf) end end function loglikelihood(d::Censored, x::AbstractArray{<:Real}) d0 = d.uncensored #CURRENT FILE: Distributions.jl/src/univariates.jl ##CHUNK 1 function ccdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(ccdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 one(c) else zero(c) end end function logccdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` ##CHUNK 2 return r end ### special definitions for distributions with integer-valued support function cdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(cdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 zero(c) else one(c) end end ##CHUNK 3 elseif x < 0 one(c) else zero(c) end end function logccdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(logccdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 zero(c) else ##CHUNK 4 return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 zero(c) else one(c) end end function ccdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(ccdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) ##CHUNK 5 isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(logccdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 zero(c) else oftype(c, -Inf) end end # implementation of the cdf for distributions whose support is a unitrange of integers # note: incorrect for discrete distributions whose support includes non-integer numbers function integerunitrange_cdf(d::DiscreteUnivariateDistribution, x::Integer) minimum_d, maximum_d = extrema(d) isfinite(minimum_d) || isfinite(maximum_d) || error("support is unbounded") ##CHUNK 6 # fill central part: with non-zero pdf if vl <= vr fm1 = vfirst - 1 r[vl - fm1] = pv = pdf(d, vl) for v = (vl+1):vr r[v - fm1] = pv = nextpdf(rpe, pv, v) end end return r end ### special definitions for distributions with integer-valued support function cdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(cdf(d, floor(Int, _x)))
544
558
Distributions.jl
99
function logccdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(logccdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 zero(c) else oftype(c, -Inf) end end
function logccdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(logccdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 zero(c) else oftype(c, -Inf) end end
[ 544, 558 ]
function logccdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(logccdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 zero(c) else oftype(c, -Inf) end end
function logccdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(logccdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 zero(c) else oftype(c, -Inf) end end
logccdf_int
544
558
src/univariates.jl
#FILE: Distributions.jl/src/truncate.jl ##CHUNK 1 function ccdf(d::Truncated, x::Real) result = clamp((d.ucdf - cdf(d.untruncated, x)) / d.tp, 0, 1) # Special cases for values outside of the support to avoid e.g. NaN issues with `Binomial` return if d.lower !== nothing && x <= d.lower one(result) elseif d.upper !== nothing && x > d.upper zero(result) else result end end function logccdf(d::Truncated, x::Real) result = logsubexp(logccdf(d.untruncated, x), log1p(-d.ucdf)) - d.logtp return if d.lower !== nothing && x <= d.lower zero(result) elseif d.upper !== nothing && x > d.upper oftype(result, -Inf) else result ##CHUNK 2 end function logccdf(d::Truncated, x::Real) result = logsubexp(logccdf(d.untruncated, x), log1p(-d.ucdf)) - d.logtp return if d.lower !== nothing && x <= d.lower zero(result) elseif d.upper !== nothing && x > d.upper oftype(result, -Inf) else result end end ## random number generation function rand(rng::AbstractRNG, d::Truncated) d0 = d.untruncated tp = d.tp lower = d.lower upper = d.upper #FILE: Distributions.jl/src/univariate/continuous/inversegaussian.jl ##CHUNK 1 # otherwise `NaN` is returned for `+Inf` return isinf(x) && x > 0 ? zero(z) : z end function logcdf(d::InverseGaussian, x::Real) μ, λ = params(d) y = max(x, 0) u = sqrt(λ / y) v = y / μ a = normlogcdf(u * (v - 1)) b = 2λ / μ + normlogcdf(-u * (v + 1)) z = logaddexp(a, b) # otherwise `NaN` is returned for `+Inf` return isinf(x) && x > 0 ? zero(z) : z end function logccdf(d::InverseGaussian, x::Real) μ, λ = params(d) #FILE: Distributions.jl/src/censored.jl ##CHUNK 1 lower = d.lower upper = d.upper logpx = logpdf(d0, x) return if _in_open_interval(x, lower, upper) logpx elseif x == lower x == upper ? zero(logpx) : oftype(logpx, logcdf(d0, x)) elseif x == upper if value_support(typeof(d0)) === Discrete oftype(logpx, logaddexp(logccdf(d0, x), logpx)) else oftype(logpx, logccdf(d0, x)) end else # not in support oftype(logpx, -Inf) end end function loglikelihood(d::Censored, x::AbstractArray{<:Real}) d0 = d.uncensored #CURRENT FILE: Distributions.jl/src/univariates.jl ##CHUNK 1 function ccdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(ccdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 one(c) else zero(c) end end function logcdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) ##CHUNK 2 elseif x < 0 one(c) else zero(c) end end function logcdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(logcdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 oftype(c, -Inf) else zero(c) ##CHUNK 3 return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 zero(c) else one(c) end end function ccdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(ccdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) ##CHUNK 4 return r end ### special definitions for distributions with integer-valued support function cdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(cdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 zero(c) else one(c) end end ##CHUNK 5 _x = isfinite_x ? x : zero(x) c = float(logcdf(d, floor(Int, _x))) return if isfinite_x c elseif isnan(x) oftype(c, NaN) elseif x < 0 oftype(c, -Inf) else zero(c) end end # implementation of the cdf for distributions whose support is a unitrange of integers # note: incorrect for discrete distributions whose support includes non-integer numbers function integerunitrange_cdf(d::DiscreteUnivariateDistribution, x::Integer) minimum_d, maximum_d = extrema(d) isfinite(minimum_d) || isfinite(maximum_d) || error("support is unbounded") ##CHUNK 6 # fill central part: with non-zero pdf if vl <= vr fm1 = vfirst - 1 r[vl - fm1] = pv = pdf(d, vl) for v = (vl+1):vr r[v - fm1] = pv = nextpdf(rpe, pv, v) end end return r end ### special definitions for distributions with integer-valued support function cdf_int(d::DiscreteUnivariateDistribution, x::Real) # handle `NaN` and `±Inf` which can't be truncated to `Int` isfinite_x = isfinite(x) _x = isfinite_x ? x : zero(x) c = float(cdf(d, floor(Int, _x)))