1 //! A priority queue implemented with a 4-ary heap.
3 //! Insertion and popping the minimal element have `O(log n)` time complexity.
4 //! Checking the minimal element is `O(1)`. Keys of elements in the heap can
5 //! also be increased or decreased
10 //! use stud_rust_base::index_heap::{Indexing, IndexdMinHeap};
12 //! #[derive(Copy, Clone, Eq, PartialEq, Debug, Ord, PartialOrd)]
13 //! pub struct State {
14 //! pub distance: usize,
19 //! // The `Indexing` traits needs to be implemented as well, so we can find elements to decrease their key.
20 //! impl Indexing for State {
21 //! fn as_index(&self) -> usize {
22 //! self.node as usize
27 //! let mut heap = IndexdMinHeap::new(3);
28 //! heap.push(State { node: 0, distance: 42 });
29 //! heap.push(State { node: 1, distance: 23 });
30 //! heap.push(State { node: 2, distance: 50000 });
31 //! assert_eq!(heap.peek().cloned(), Some(State { node: 1, distance: 23 }));
32 //! heap.decrease_key(State { node: 0, distance: 1 });
33 //! assert_eq!(heap.pop(), Some(State { node: 0, distance: 1 }));
43 /// A trait to map elements in a heap to a unique index.
44 /// The element type of the `IndexdMinHeap` has to implement this trait.
46 /// This method has to map a heap element to a unique `usize` index.
47 fn as_index(&self) -> usize;
50 /// A priority queue where the elements are IDs from 0 to id_count-1 where id_count is a number that is set in the constructor.
51 /// The elements are sorted ascending by the ordering defined by the `Ord` trait.
52 /// The interface mirros the standard library BinaryHeap (except for the reversed order).
53 /// Only the methods necessary for dijkstras algorithm are implemented.
54 /// In addition, `increase_key` and `decrease_key` methods are available.
56 pub struct IndexdMinHeap<T: Ord + Indexing> {
57 positions: Vec<usize>,
61 const TREE_ARITY: usize = 4;
62 const INVALID_POSITION: usize = std::usize::MAX;
64 impl<T: Ord + Indexing> IndexdMinHeap<T> {
65 /// Creates an empty `IndexdMinHeap` as a min-heap.
66 /// The indices (as defined by the `Indexing` trait) of all inserted elements
67 /// will have to be between in `[0, max_index)`
68 pub fn new(max_index: usize) -> IndexdMinHeap<T> {
70 positions: vec![INVALID_POSITION; max_index],
75 /// Returns the length of the binary heap.
76 pub fn len(&self) -> usize {
80 /// Checks if the binary heap is empty.
81 pub fn is_empty(&self) -> bool {
85 /// Checks if the heap already contains an element mapped to the given index
86 pub fn contains_index(&self, id: usize) -> bool {
87 self.positions[id] != INVALID_POSITION
90 /// Drops all items from the heap.
91 pub fn clear(&mut self) {
92 for element in &self.data {
93 self.positions[element.as_index()] = INVALID_POSITION;
98 /// Returns a reference to the smallest item in the heap, or None if it is empty.
99 pub fn peek(&self) -> Option<&T> {
103 /// Removes the greatest item from the binary heap and returns it, or None if it is empty.
104 pub fn pop(&mut self) -> Option<T> {
105 self.data.pop().map(|mut item| {
106 self.positions[item.as_index()] = INVALID_POSITION;
107 if !self.is_empty() {
108 self.positions[item.as_index()] = 0;
109 self.positions[self.data[0].as_index()] = INVALID_POSITION;
110 swap(&mut item, &mut self.data[0]);
111 self.move_down_in_tree(0);
117 /// Pushes an item onto the binary heap.
118 /// Panics if an element with the same index already exists.
119 pub fn push(&mut self, element: T) {
120 assert!(!self.contains_index(element.as_index()));
121 let insert_position = self.len();
122 self.positions[element.as_index()] = insert_position;
123 self.data.push(element);
124 self.move_up_in_tree(insert_position);
127 /// Updates the key of an element if the new key is smaller than the old key.
128 /// Does nothing if the new key is larger.
129 /// Panics if the element is not part of the queue.
130 pub fn decrease_key(&mut self, element: T) {
131 let position = self.positions[element.as_index()];
132 self.data[position] = element;
133 self.move_up_in_tree(position);
136 /// Updates the key of an element if the new key is larger than the old key.
137 /// Does nothing if the new key is smaller.
138 /// Panics if the element is not part of the queue.
139 pub fn increase_key(&mut self, element: T) {
140 let position = self.positions[element.as_index()];
141 self.data[position] = element;
142 self.move_down_in_tree(position);
145 fn move_up_in_tree(&mut self, position: usize) {
147 let mut position = position;
148 let mut hole = Hole::new(&mut self.data, position);
151 let parent = (position - 1) / TREE_ARITY;
153 if hole.get(parent) < hole.element() {
157 self.positions[hole.get(parent).as_index()] = position;
158 hole.move_to(parent);
162 self.positions[hole.element().as_index()] = position;
166 fn move_down_in_tree(&mut self, position: usize) {
168 let mut position = position;
169 let heap_size = self.len();
170 let mut hole = Hole::new(&mut self.data, position);
173 if let Some(smallest_child) = IndexdMinHeap::<T>::children_index_range(position, heap_size).min_by_key(|&child_index| hole.get(child_index)) {
174 if hole.get(smallest_child) >= hole.element() {
175 self.positions[hole.element().as_index()] = position;
176 return; // no child is smaller
179 self.positions[hole.get(smallest_child).as_index()] = position;
180 hole.move_to(smallest_child);
181 position = smallest_child;
183 self.positions[hole.element().as_index()] = position;
184 return; // no children at all
190 fn children_index_range(parent_index: usize, heap_size: usize) -> std::ops::Range<usize> {
191 let first_child = TREE_ARITY * parent_index + 1;
192 let last_child = min(TREE_ARITY * parent_index + TREE_ARITY + 1, heap_size);
193 first_child..last_child
198 // This is an optimization copied straight from the rust stdlib binary heap
199 // it allows to avoid always swapping elements pairwise and rather
200 // move each element only once.
203 /// Hole represents a hole in a slice i.e. an index without valid value
204 /// (because it was moved from or duplicated).
205 /// In drop, `Hole` will restore the slice by filling the hole
206 /// position with the value that was originally removed.
207 struct Hole<'a, T: 'a> {
209 /// `elt` is always `Some` from new until drop.
214 impl<'a, T> Hole<'a, T> {
215 /// Create a new Hole at index `pos`.
217 /// Unsafe because pos must be within the data slice.
219 unsafe fn new(data: &'a mut [T], pos: usize) -> Self {
220 debug_assert!(pos < data.len());
221 let elt = ptr::read(&data[pos]);
229 /// Returns a reference to the element removed.
231 fn element(&self) -> &T {
232 self.elt.as_ref().unwrap()
235 /// Returns a reference to the element at `index`.
237 /// Unsafe because index must be within the data slice and not equal to pos.
239 unsafe fn get(&self, index: usize) -> &T {
240 debug_assert!(index != self.pos);
241 debug_assert!(index < self.data.len());
242 self.data.get_unchecked(index)
245 /// Move hole to new location
247 /// Unsafe because index must be within the data slice and not equal to pos.
249 unsafe fn move_to(&mut self, index: usize) {
250 debug_assert!(index != self.pos);
251 debug_assert!(index < self.data.len());
252 let index_ptr: *const _ = self.data.get_unchecked(index);
253 let hole_ptr = self.data.get_unchecked_mut(self.pos);
254 ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1);
259 impl<'a, T> Drop for Hole<'a, T> {
262 // fill the hole again
265 ptr::write(self.data.get_unchecked_mut(pos), self.elt.take().unwrap());