1use super::Header;
2
3use fnv::FnvHasher;
4use http::header;
5use http::method::Method;
6
7use std::collections::VecDeque;
8use std::hash::{Hash, Hasher};
9use std::{cmp, mem, usize};
10
11/// HPACK encoder table
12#[derive(Debug)]
13pub struct Table {
14 mask: usize,
15 indices: Vec<Option<Pos>>,
16 slots: VecDeque<Slot>,
17 inserted: usize,
18 // Size is in bytes
19 size: usize,
20 max_size: usize,
21}
22
23#[derive(Debug)]
24pub enum Index {
25 // The header is already fully indexed
26 Indexed(usize, Header),
27
28 // The name is indexed, but not the value
29 Name(usize, Header),
30
31 // The full header has been inserted into the table.
32 Inserted(usize),
33
34 // Only the value has been inserted (hpack table idx, slots idx)
35 InsertedValue(usize, usize),
36
37 // The header is not indexed by this table
38 NotIndexed(Header),
39}
40
41#[derive(Debug)]
42struct Slot {
43 hash: HashValue,
44 header: Header,
45 next: Option<usize>,
46}
47
48#[derive(Debug, Clone, Copy, Eq, PartialEq)]
49struct Pos {
50 index: usize,
51 hash: HashValue,
52}
53
54#[derive(Debug, Copy, Clone, Eq, PartialEq)]
55struct HashValue(usize);
56
57const MAX_SIZE: usize = 1 << 16;
58const DYN_OFFSET: usize = 62;
59
60macro_rules! probe_loop {
61 ($probe_var: ident < $len: expr, $body: expr) => {
62 debug_assert!($len > 0);
63 loop {
64 if $probe_var < $len {
65 $body
66 $probe_var += 1;
67 } else {
68 $probe_var = 0;
69 }
70 }
71 };
72}
73
74impl Table {
75 pub fn new(max_size: usize, capacity: usize) -> Table {
76 if capacity == 0 {
77 Table {
78 mask: 0,
79 indices: vec![],
80 slots: VecDeque::new(),
81 inserted: 0,
82 size: 0,
83 max_size,
84 }
85 } else {
86 let capacity = cmp::max(to_raw_capacity(capacity).next_power_of_two(), 8);
87
88 Table {
89 mask: capacity.wrapping_sub(1),
90 indices: vec![None; capacity],
91 slots: VecDeque::with_capacity(usable_capacity(capacity)),
92 inserted: 0,
93 size: 0,
94 max_size,
95 }
96 }
97 }
98
99 #[inline]
100 pub fn capacity(&self) -> usize {
101 usable_capacity(self.indices.len())
102 }
103
104 pub fn max_size(&self) -> usize {
105 self.max_size
106 }
107
108 /// Gets the header stored in the table
109 pub fn resolve<'a>(&'a self, index: &'a Index) -> &'a Header {
110 use self::Index::*;
111
112 match *index {
113 Indexed(_, ref h) => h,
114 Name(_, ref h) => h,
115 Inserted(idx) => &self.slots[idx].header,
116 InsertedValue(_, idx) => &self.slots[idx].header,
117 NotIndexed(ref h) => h,
118 }
119 }
120
121 pub fn resolve_idx(&self, index: &Index) -> usize {
122 use self::Index::*;
123
124 match *index {
125 Indexed(idx, ..) => idx,
126 Name(idx, ..) => idx,
127 Inserted(idx) => idx + DYN_OFFSET,
128 InsertedValue(_name_idx, slot_idx) => slot_idx + DYN_OFFSET,
129 NotIndexed(_) => panic!("cannot resolve index"),
130 }
131 }
132
133 /// Index the header in the HPACK table.
134 pub fn index(&mut self, header: Header) -> Index {
135 // Check the static table
136 let statik = index_static(&header);
137
138 // Don't index certain headers. This logic is borrowed from nghttp2.
139 if header.skip_value_index() {
140 // Right now, if this is true, the header name is always in the
141 // static table. At some point in the future, this might not be true
142 // and this logic will need to be updated.
143 debug_assert!(statik.is_some(), "skip_value_index requires a static name",);
144 return Index::new(statik, header);
145 }
146
147 // If the header is already indexed by the static table, return that
148 if let Some((n, true)) = statik {
149 return Index::Indexed(n, header);
150 }
151
152 // Don't index large headers
153 if header.len() * 4 > self.max_size * 3 {
154 return Index::new(statik, header);
155 }
156
157 self.index_dynamic(header, statik)
158 }
159
160 fn index_dynamic(&mut self, header: Header, statik: Option<(usize, bool)>) -> Index {
161 debug_assert!(self.assert_valid_state("one"));
162
163 if header.len() + self.size < self.max_size || !header.is_sensitive() {
164 // Only grow internal storage if needed
165 self.reserve_one();
166 }
167
168 if self.indices.is_empty() {
169 // If `indices` is not empty, then it is impossible for all
170 // `indices` entries to be `Some`. So, we only need to check for the
171 // empty case.
172 return Index::new(statik, header);
173 }
174
175 let hash = hash_header(&header);
176
177 let desired_pos = desired_pos(self.mask, hash);
178 let mut probe = desired_pos;
179 let mut dist = 0;
180
181 // Start at the ideal position, checking all slots
182 probe_loop!(probe < self.indices.len(), {
183 if let Some(pos) = self.indices[probe] {
184 // The slot is already occupied, but check if it has a lower
185 // displacement.
186 let their_dist = probe_distance(self.mask, pos.hash, probe);
187
188 let slot_idx = pos.index.wrapping_add(self.inserted);
189
190 if their_dist < dist {
191 // Index robinhood
192 return self.index_vacant(header, hash, dist, probe, statik);
193 } else if pos.hash == hash && self.slots[slot_idx].header.name() == header.name() {
194 // Matching name, check values
195 return self.index_occupied(header, hash, pos.index, statik.map(|(n, _)| n));
196 }
197 } else {
198 return self.index_vacant(header, hash, dist, probe, statik);
199 }
200
201 dist += 1;
202 });
203 }
204
205 fn index_occupied(
206 &mut self,
207 header: Header,
208 hash: HashValue,
209 mut index: usize,
210 statik: Option<usize>,
211 ) -> Index {
212 debug_assert!(self.assert_valid_state("top"));
213
214 // There already is a match for the given header name. Check if a value
215 // matches. The header will also only be inserted if the table is not at
216 // capacity.
217 loop {
218 // Compute the real index into the VecDeque
219 let real_idx = index.wrapping_add(self.inserted);
220
221 if self.slots[real_idx].header.value_eq(&header) {
222 // We have a full match!
223 return Index::Indexed(real_idx + DYN_OFFSET, header);
224 }
225
226 if let Some(next) = self.slots[real_idx].next {
227 index = next;
228 continue;
229 }
230
231 if header.is_sensitive() {
232 // Should we assert this?
233 // debug_assert!(statik.is_none());
234 return Index::Name(real_idx + DYN_OFFSET, header);
235 }
236
237 self.update_size(header.len(), Some(index));
238
239 // Insert the new header
240 self.insert(header, hash);
241
242 // Recompute real_idx as it just changed.
243 let new_real_idx = index.wrapping_add(self.inserted);
244
245 // The previous node in the linked list may have gotten evicted
246 // while making room for this header.
247 if new_real_idx < self.slots.len() {
248 let idx = 0usize.wrapping_sub(self.inserted);
249
250 self.slots[new_real_idx].next = Some(idx);
251 }
252
253 debug_assert!(self.assert_valid_state("bottom"));
254
255 // Even if the previous header was evicted, we can still reference
256 // it when inserting the new one...
257 return if let Some(n) = statik {
258 // If name is in static table, use it instead
259 Index::InsertedValue(n, 0)
260 } else {
261 Index::InsertedValue(real_idx + DYN_OFFSET, 0)
262 };
263 }
264 }
265
266 fn index_vacant(
267 &mut self,
268 header: Header,
269 hash: HashValue,
270 mut dist: usize,
271 mut probe: usize,
272 statik: Option<(usize, bool)>,
273 ) -> Index {
274 if header.is_sensitive() {
275 return Index::new(statik, header);
276 }
277
278 debug_assert!(self.assert_valid_state("top"));
279 debug_assert!(dist == 0 || self.indices[probe.wrapping_sub(1) & self.mask].is_some());
280
281 // Passing in `usize::MAX` for prev_idx since there is no previous
282 // header in this case.
283 if self.update_size(header.len(), None) {
284 while dist != 0 {
285 let back = probe.wrapping_sub(1) & self.mask;
286
287 if let Some(pos) = self.indices[back] {
288 let their_dist = probe_distance(self.mask, pos.hash, back);
289
290 if their_dist < (dist - 1) {
291 probe = back;
292 dist -= 1;
293 } else {
294 break;
295 }
296 } else {
297 probe = back;
298 dist -= 1;
299 }
300 }
301 }
302
303 debug_assert!(self.assert_valid_state("after update"));
304
305 self.insert(header, hash);
306
307 let pos_idx = 0usize.wrapping_sub(self.inserted);
308
309 let prev = mem::replace(
310 &mut self.indices[probe],
311 Some(Pos {
312 index: pos_idx,
313 hash,
314 }),
315 );
316
317 if let Some(mut prev) = prev {
318 // Shift forward
319 let mut probe = probe + 1;
320
321 probe_loop!(probe < self.indices.len(), {
322 let pos = &mut self.indices[probe];
323
324 prev = match mem::replace(pos, Some(prev)) {
325 Some(p) => p,
326 None => break,
327 };
328 });
329 }
330
331 debug_assert!(self.assert_valid_state("bottom"));
332
333 if let Some((n, _)) = statik {
334 Index::InsertedValue(n, 0)
335 } else {
336 Index::Inserted(0)
337 }
338 }
339
340 fn insert(&mut self, header: Header, hash: HashValue) {
341 self.inserted = self.inserted.wrapping_add(1);
342
343 self.slots.push_front(Slot {
344 hash,
345 header,
346 next: None,
347 });
348 }
349
350 pub fn resize(&mut self, size: usize) {
351 self.max_size = size;
352
353 if size == 0 {
354 self.size = 0;
355
356 for i in &mut self.indices {
357 *i = None;
358 }
359
360 self.slots.clear();
361 self.inserted = 0;
362 } else {
363 self.converge(None);
364 }
365 }
366
367 fn update_size(&mut self, len: usize, prev_idx: Option<usize>) -> bool {
368 self.size += len;
369 self.converge(prev_idx)
370 }
371
372 fn converge(&mut self, prev_idx: Option<usize>) -> bool {
373 let mut ret = false;
374
375 while self.size > self.max_size {
376 ret = true;
377 self.evict(prev_idx);
378 }
379
380 ret
381 }
382
383 fn evict(&mut self, prev_idx: Option<usize>) {
384 let pos_idx = (self.slots.len() - 1).wrapping_sub(self.inserted);
385
386 debug_assert!(!self.slots.is_empty());
387 debug_assert!(self.assert_valid_state("one"));
388
389 // Remove the header
390 let slot = self.slots.pop_back().unwrap();
391 let mut probe = desired_pos(self.mask, slot.hash);
392
393 // Update the size
394 self.size -= slot.header.len();
395
396 debug_assert_eq!(
397 self.indices
398 .iter()
399 .filter_map(|p| *p)
400 .filter(|p| p.index == pos_idx)
401 .count(),
402 1
403 );
404
405 // Find the associated position
406 probe_loop!(probe < self.indices.len(), {
407 debug_assert!(self.indices[probe].is_some());
408
409 let mut pos = self.indices[probe].unwrap();
410
411 if pos.index == pos_idx {
412 if let Some(idx) = slot.next {
413 pos.index = idx;
414 self.indices[probe] = Some(pos);
415 } else if Some(pos.index) == prev_idx {
416 pos.index = 0usize.wrapping_sub(self.inserted + 1);
417 self.indices[probe] = Some(pos);
418 } else {
419 self.indices[probe] = None;
420 self.remove_phase_two(probe);
421 }
422
423 break;
424 }
425 });
426
427 debug_assert!(self.assert_valid_state("two"));
428 }
429
430 // Shifts all indices that were displaced by the header that has just been
431 // removed.
432 fn remove_phase_two(&mut self, probe: usize) {
433 let mut last_probe = probe;
434 let mut probe = probe + 1;
435
436 probe_loop!(probe < self.indices.len(), {
437 if let Some(pos) = self.indices[probe] {
438 if probe_distance(self.mask, pos.hash, probe) > 0 {
439 self.indices[last_probe] = self.indices[probe].take();
440 } else {
441 break;
442 }
443 } else {
444 break;
445 }
446
447 last_probe = probe;
448 });
449
450 debug_assert!(self.assert_valid_state("two"));
451 }
452
453 fn reserve_one(&mut self) {
454 let len = self.slots.len();
455
456 if len == self.capacity() {
457 if len == 0 {
458 let new_raw_cap = 8;
459 self.mask = 8 - 1;
460 self.indices = vec![None; new_raw_cap];
461 } else {
462 let raw_cap = self.indices.len();
463 self.grow(raw_cap << 1);
464 }
465 }
466 }
467
468 #[inline]
469 fn grow(&mut self, new_raw_cap: usize) {
470 // This path can never be reached when handling the first allocation in
471 // the map.
472
473 debug_assert!(self.assert_valid_state("top"));
474
475 // find first ideally placed element -- start of cluster
476 let mut first_ideal = 0;
477
478 for (i, pos) in self.indices.iter().enumerate() {
479 if let Some(pos) = *pos {
480 if 0 == probe_distance(self.mask, pos.hash, i) {
481 first_ideal = i;
482 break;
483 }
484 }
485 }
486
487 // visit the entries in an order where we can simply reinsert them
488 // into self.indices without any bucket stealing.
489 let old_indices = mem::replace(&mut self.indices, vec![None; new_raw_cap]);
490 self.mask = new_raw_cap.wrapping_sub(1);
491
492 for &pos in &old_indices[first_ideal..] {
493 self.reinsert_entry_in_order(pos);
494 }
495
496 for &pos in &old_indices[..first_ideal] {
497 self.reinsert_entry_in_order(pos);
498 }
499
500 debug_assert!(self.assert_valid_state("bottom"));
501 }
502
503 fn reinsert_entry_in_order(&mut self, pos: Option<Pos>) {
504 if let Some(pos) = pos {
505 // Find first empty bucket and insert there
506 let mut probe = desired_pos(self.mask, pos.hash);
507
508 probe_loop!(probe < self.indices.len(), {
509 if self.indices[probe].is_none() {
510 // empty bucket, insert here
511 self.indices[probe] = Some(pos);
512 return;
513 }
514
515 debug_assert!({
516 let them = self.indices[probe].unwrap();
517 let their_distance = probe_distance(self.mask, them.hash, probe);
518 let our_distance = probe_distance(self.mask, pos.hash, probe);
519
520 their_distance >= our_distance
521 });
522 });
523 }
524 }
525
526 #[cfg(not(test))]
527 fn assert_valid_state(&self, _: &'static str) -> bool {
528 true
529 }
530
531 #[cfg(test)]
532 fn assert_valid_state(&self, _msg: &'static str) -> bool {
533 /*
534 // Checks that the internal map structure is valid
535 //
536 // Ensure all hash codes in indices match the associated slot
537 for pos in &self.indices {
538 if let Some(pos) = *pos {
539 let real_idx = pos.index.wrapping_add(self.inserted);
540
541 if real_idx.wrapping_add(1) != 0 {
542 assert!(real_idx < self.slots.len(),
543 "out of index; real={}; len={}, msg={}",
544 real_idx, self.slots.len(), msg);
545
546 assert_eq!(pos.hash, self.slots[real_idx].hash,
547 "index hash does not match slot; msg={}", msg);
548 }
549 }
550 }
551
552 // Every index is only available once
553 for i in 0..self.indices.len() {
554 if self.indices[i].is_none() {
555 continue;
556 }
557
558 for j in i+1..self.indices.len() {
559 assert_ne!(self.indices[i], self.indices[j],
560 "duplicate indices; msg={}", msg);
561 }
562 }
563
564 for (index, slot) in self.slots.iter().enumerate() {
565 let mut indexed = None;
566
567 // First, see if the slot is indexed
568 for (i, pos) in self.indices.iter().enumerate() {
569 if let Some(pos) = *pos {
570 let real_idx = pos.index.wrapping_add(self.inserted);
571 if real_idx == index {
572 indexed = Some(i);
573 // Already know that there is no dup, so break
574 break;
575 }
576 }
577 }
578
579 if let Some(actual) = indexed {
580 // Ensure that it is accessible..
581 let desired = desired_pos(self.mask, slot.hash);
582 let mut probe = desired;
583 let mut dist = 0;
584
585 probe_loop!(probe < self.indices.len(), {
586 assert!(self.indices[probe].is_some(),
587 "unexpected empty slot; probe={}; hash={:?}; msg={}",
588 probe, slot.hash, msg);
589
590 let pos = self.indices[probe].unwrap();
591
592 let their_dist = probe_distance(self.mask, pos.hash, probe);
593 let real_idx = pos.index.wrapping_add(self.inserted);
594
595 if real_idx == index {
596 break;
597 }
598
599 assert!(dist <= their_dist,
600 "could not find entry; actual={}; desired={}" +
601 "probe={}, dist={}; their_dist={}; index={}; msg={}",
602 actual, desired, probe, dist, their_dist,
603 index.wrapping_sub(self.inserted), msg);
604
605 dist += 1;
606 });
607 } else {
608 // There is exactly one next link
609 let cnt = self.slots.iter().map(|s| s.next)
610 .filter(|n| *n == Some(index.wrapping_sub(self.inserted)))
611 .count();
612
613 assert_eq!(1, cnt, "more than one node pointing here; msg={}", msg);
614 }
615 }
616 */
617
618 // TODO: Ensure linked lists are correct: no cycles, etc...
619
620 true
621 }
622}
623
624#[cfg(test)]
625impl Table {
626 /// Returns the number of headers in the table
627 pub fn len(&self) -> usize {
628 self.slots.len()
629 }
630
631 /// Returns the table size
632 pub fn size(&self) -> usize {
633 self.size
634 }
635}
636
637impl Index {
638 fn new(v: Option<(usize, bool)>, e: Header) -> Index {
639 match v {
640 None => Index::NotIndexed(e),
641 Some((n: usize, true)) => Index::Indexed(n, e),
642 Some((n: usize, false)) => Index::Name(n, e),
643 }
644 }
645}
646
647#[inline]
648fn usable_capacity(cap: usize) -> usize {
649 cap - cap / 4
650}
651
652#[inline]
653fn to_raw_capacity(n: usize) -> usize {
654 n + n / 3
655}
656
657#[inline]
658fn desired_pos(mask: usize, hash: HashValue) -> usize {
659 hash.0 & mask
660}
661
662#[inline]
663fn probe_distance(mask: usize, hash: HashValue, current: usize) -> usize {
664 current.wrapping_sub(desired_pos(mask, hash)) & mask
665}
666
667fn hash_header(header: &Header) -> HashValue {
668 const MASK: u64 = (MAX_SIZE as u64) - 1;
669
670 let mut h: FnvHasher = FnvHasher::default();
671 header.name().hash(&mut h);
672 HashValue((h.finish() & MASK) as usize)
673}
674
675/// Checks the static table for the header. If found, returns the index and a
676/// boolean representing if the value matched as well.
677fn index_static(header: &Header) -> Option<(usize, bool)> {
678 match *header {
679 Header::Field {
680 ref name,
681 ref value,
682 } => match *name {
683 header::ACCEPT_CHARSET => Some((15, false)),
684 header::ACCEPT_ENCODING => {
685 if value == "gzip, deflate" {
686 Some((16, true))
687 } else {
688 Some((16, false))
689 }
690 }
691 header::ACCEPT_LANGUAGE => Some((17, false)),
692 header::ACCEPT_RANGES => Some((18, false)),
693 header::ACCEPT => Some((19, false)),
694 header::ACCESS_CONTROL_ALLOW_ORIGIN => Some((20, false)),
695 header::AGE => Some((21, false)),
696 header::ALLOW => Some((22, false)),
697 header::AUTHORIZATION => Some((23, false)),
698 header::CACHE_CONTROL => Some((24, false)),
699 header::CONTENT_DISPOSITION => Some((25, false)),
700 header::CONTENT_ENCODING => Some((26, false)),
701 header::CONTENT_LANGUAGE => Some((27, false)),
702 header::CONTENT_LENGTH => Some((28, false)),
703 header::CONTENT_LOCATION => Some((29, false)),
704 header::CONTENT_RANGE => Some((30, false)),
705 header::CONTENT_TYPE => Some((31, false)),
706 header::COOKIE => Some((32, false)),
707 header::DATE => Some((33, false)),
708 header::ETAG => Some((34, false)),
709 header::EXPECT => Some((35, false)),
710 header::EXPIRES => Some((36, false)),
711 header::FROM => Some((37, false)),
712 header::HOST => Some((38, false)),
713 header::IF_MATCH => Some((39, false)),
714 header::IF_MODIFIED_SINCE => Some((40, false)),
715 header::IF_NONE_MATCH => Some((41, false)),
716 header::IF_RANGE => Some((42, false)),
717 header::IF_UNMODIFIED_SINCE => Some((43, false)),
718 header::LAST_MODIFIED => Some((44, false)),
719 header::LINK => Some((45, false)),
720 header::LOCATION => Some((46, false)),
721 header::MAX_FORWARDS => Some((47, false)),
722 header::PROXY_AUTHENTICATE => Some((48, false)),
723 header::PROXY_AUTHORIZATION => Some((49, false)),
724 header::RANGE => Some((50, false)),
725 header::REFERER => Some((51, false)),
726 header::REFRESH => Some((52, false)),
727 header::RETRY_AFTER => Some((53, false)),
728 header::SERVER => Some((54, false)),
729 header::SET_COOKIE => Some((55, false)),
730 header::STRICT_TRANSPORT_SECURITY => Some((56, false)),
731 header::TRANSFER_ENCODING => Some((57, false)),
732 header::USER_AGENT => Some((58, false)),
733 header::VARY => Some((59, false)),
734 header::VIA => Some((60, false)),
735 header::WWW_AUTHENTICATE => Some((61, false)),
736 _ => None,
737 },
738 Header::Authority(_) => Some((1, false)),
739 Header::Method(ref v) => match *v {
740 Method::GET => Some((2, true)),
741 Method::POST => Some((3, true)),
742 _ => Some((2, false)),
743 },
744 Header::Scheme(ref v) => match &**v {
745 "http" => Some((6, true)),
746 "https" => Some((7, true)),
747 _ => Some((6, false)),
748 },
749 Header::Path(ref v) => match &**v {
750 "/" => Some((4, true)),
751 "/index.html" => Some((5, true)),
752 _ => Some((4, false)),
753 },
754 Header::Protocol(..) => None,
755 Header::Status(ref v) => match u16::from(*v) {
756 200 => Some((8, true)),
757 204 => Some((9, true)),
758 206 => Some((10, true)),
759 304 => Some((11, true)),
760 400 => Some((12, true)),
761 404 => Some((13, true)),
762 500 => Some((14, true)),
763 _ => Some((8, false)),
764 },
765 }
766}
767