Rename benchmarks, improve encoding performance

This commit is contained in:
Michael Pfaff 2022-11-15 00:03:07 -05:00
parent da343e43f3
commit 5e491e52f9
Signed by: michael
GPG Key ID: CF402C4A012AA9D4
4 changed files with 124 additions and 81 deletions

View File

@ -89,14 +89,14 @@ fn benchmark_sized<const N: usize, const HEAP_ONLY: bool>(
[(); N * 2]:,
{
if !HEAP_ONLY {
c.bench_function(name!(name, "sized"), |b| {
c.bench_function(name!(name, "dec/sized"), |b| {
b.iter(|| hex_bytes_sized::<N>(black_box(bytes)))
});
c.bench_function(name!(name, "sized const"), |b| {
c.bench_function(name!(name, "dec/sized-const"), |b| {
b.iter(|| hex_bytes_sized_const::<N>(black_box(bytes)))
});
}
c.bench_function(name!(name, "sized heap"), |b| {
c.bench_function(name!(name, "dec/sized-heap"), |b| {
b.iter(|| hex_bytes_sized_heap::<N>(black_box(bytes)))
});
benchmark(name, bytes, c);
@ -108,18 +108,18 @@ const BENCH_NON_NICHED: bool = true;
fn benchmark(name: &str, bytes: &[u8], c: &mut Criterion) {
if BENCH_UNSAFE {
c.bench_function(name!(name, "dyn unsafe"), |b| {
c.bench_function(name!(name, "dec/dyn-unsafe"), |b| {
b.iter(|| hex_bytes_dyn_unsafe(black_box(bytes)))
});
}
//c.bench_function(format!("{name} - dyn unsafe for"), |b| b.iter(|| hex_bytes_dyn_unsafe_for(black_box(bytes))));
if BENCH_UNSAFE_ITER {
c.bench_function(name!(name, "dyn unsafe iter"), |b| {
c.bench_function(name!(name, "dec/dyn-unsafe-iter"), |b| {
b.iter(|| hex_bytes_dyn_unsafe_iter(black_box(bytes)))
});
}
if BENCH_NON_NICHED {
c.bench_function(name!(name, "dyn non-niched"), |b| {
c.bench_function(name!(name, "dec/dyn-non-niched"), |b| {
b.iter(|| hex_bytes_dyn(black_box(bytes)))
});
}
@ -289,7 +289,7 @@ pub fn bench_micro_hex_digit(c: &mut Criterion) {
let hex_digits = Simd::from_array(black_box(HEX_DIGITS_VALID));
c.bench_function(name!("micro", "hex_digit"), |b| {
c.bench_function(name!("micro", "dec/hex_digit"), |b| {
b.iter(|| {
for b in black_box(HEX_DIGITS_VALID) {
black_box(hex_digit(b));
@ -297,7 +297,7 @@ pub fn bench_micro_hex_digit(c: &mut Criterion) {
})
});
c.bench_function(name!("micro", "hex_digit_simd"), |b| {
c.bench_function(name!("micro", "dec/hex_digit_simd"), |b| {
b.iter(|| hex_digit_simd::<DIGIT_BATCH_SIZE>(hex_digits))
});
}
@ -315,7 +315,7 @@ pub fn bench_micro_hex_byte(c: &mut Criterion) {
fn bench_decoder<T: HexByteDecoder + HexByteSimdDecoder>(c: &mut Criterion, name: &str) {
let hex_bytes = conv::u8x2_to_u8(HEX_BYTES_VALID);
c.bench_function(name!("micro", format!("{name}::decode_packed")), |b| {
c.bench_function(name!("micro", format!("dec/{name}/packed")), |b| {
b.iter(|| {
for b in black_box(HEX_BYTES_VALID) {
black_box(T::decode_packed(&b));
@ -323,7 +323,7 @@ pub fn bench_micro_hex_byte(c: &mut Criterion) {
})
});
c.bench_function(name!("micro", format!("{name}::decode_unpacked")), |b| {
c.bench_function(name!("micro", format!("dec/{name}/unpacked")), |b| {
b.iter(|| {
for [hi, lo] in black_box(HEX_BYTES_VALID) {
black_box(T::decode_unpacked(hi, lo));
@ -331,12 +331,12 @@ pub fn bench_micro_hex_byte(c: &mut Criterion) {
})
});
c.bench_function(name!("micro", format!("{name}::decode_simd")), |b| {
c.bench_function(name!("micro", format!("dec/{name}/simd")), |b| {
b.iter(|| T::decode_simd(black_box(hex_bytes)))
});
}
c.bench_function(name!("micro", "hex_byte"), |b| {
c.bench_function(name!("micro", "dec/hex_byte"), |b| {
b.iter(|| {
for b in black_box(HEX_BYTES_VALID) {
black_box(hex_byte(b[0], b[1]));
@ -349,9 +349,9 @@ pub fn bench_micro_hex_byte(c: &mut Criterion) {
pub fn bench_nano_hex_digit(c: &mut Criterion) {
let digit = black_box('5' as u8);
c.bench_function(name!("nano", "hex_digit"), |b| b.iter(|| hex_digit(digit)));
c.bench_function(name!("nano", "dec/hex_digit"), |b| b.iter(|| hex_digit(digit)));
c.bench_function(name!("nano", "hex_digit +bb"), |b| {
c.bench_function(name!("nano", "dec/hex_digit+bb"), |b| {
b.iter(|| hex_digit(black_box(digit)))
});
}
@ -360,20 +360,20 @@ pub fn bench_nano_hex_byte(c: &mut Criterion) {
const DIGITS: [u8; 2] = ['5' as u8, 'b' as u8];
let digit = black_box(DIGITS);
c.bench_function(name!("nano", "hex_byte"), |b| {
c.bench_function(name!("nano", "dec/hex_byte"), |b| {
b.iter(|| hex_byte(digit[0], digit[1]))
});
fn bench_decoder<T: HexByteDecoder + HexByteSimdDecoder>(c: &mut Criterion, name: &str) {
let digit = black_box(DIGITS);
c.bench_function(name!("nano", format!("{name}::decode_packed")), |b| {
c.bench_function(name!("nano", format!("dec/{name}/packed")), |b| {
b.iter(|| {
black_box(T::decode_packed(&digit));
})
});
c.bench_function(name!("nano", format!("{name}::decode_unpacked")), |b| {
c.bench_function(name!("nano", format!("dec/{name}/unpacked")), |b| {
b.iter(|| {
black_box(T::decode_unpacked(
black_box(DIGITS[0]),

View File

@ -8,14 +8,15 @@ use criterion::{black_box, criterion_group, criterion_main, Criterion};
use fast_hex::enc::{Encode as _, Encoder};
use fast_hex::test::name;
type Enc = Encoder<true>;
const ASCII_BYTES: &[u8; 16] = b"Donald J. Trump!";
const HEX_BYTES: &[u8; ASCII_BYTES.len() * 2] = b"446F6E616C64204A2E205472756D7021";
const ASCII_BYTES_LONG: &[u8; 256] = b"Donald J. Trump!Donald J. Trump!Donald J. Trump!Donald J. Trump!Donald J. Trump!Donald J. Trump!Donald J. Trump!Donald J. Trump!Donald J. Trump!Donald J. Trump!Donald J. Trump!Donald J. Trump!Donald J. Trump!Donald J. Trump!Donald J. Trump!Donald J. Trump!";
const HEX_BYTES_LONG: &[u8; ASCII_BYTES_LONG.len() * 2] = b"446F6E616C64204A2E205472756D7021446F6E616C64204A2E205472756D7021446F6E616C64204A2E205472756D7021446F6E616C64204A2E205472756D7021446F6E616C64204A2E205472756D7021446F6E616C64204A2E205472756D7021446F6E616C64204A2E205472756D7021446F6E616C64204A2E205472756D7021446F6E616C64204A2E205472756D7021446F6E616C64204A2E205472756D7021446F6E616C64204A2E205472756D7021446F6E616C64204A2E205472756D7021446F6E616C64204A2E205472756D7021446F6E616C64204A2E205472756D7021446F6E616C64204A2E205472756D7021446F6E616C64204A2E205472756D7021";
type EncUpper = Encoder::<true>;
type EncLower = Encoder::<false>;
fn benchmark_sized<const N: usize, const HEAP_ONLY: bool>(
name: &str,
bytes: &[u8; N],
@ -24,22 +25,34 @@ fn benchmark_sized<const N: usize, const HEAP_ONLY: bool>(
[(); N * 2]:,
{
if !HEAP_ONLY {
c.bench_function(name!(name, "enc const"), |b| {
b.iter(|| Enc::enc_const(black_box(bytes)))
c.bench_function(name!(name, "enc/upper/const"), |b| {
b.iter(|| EncUpper::enc_const(black_box(bytes)))
});
c.bench_function(name!(name, "enc sized"), |b| {
b.iter(|| Enc::enc_sized(black_box(bytes)))
c.bench_function(name!(name, "enc/lower/const"), |b| {
b.iter(|| EncLower::enc_const(black_box(bytes)))
});
c.bench_function(name!(name, "enc/upper/sized"), |b| {
b.iter(|| EncUpper::enc_sized(black_box(bytes)))
});
c.bench_function(name!(name, "enc/lower/sized"), |b| {
b.iter(|| EncLower::enc_sized(black_box(bytes)))
});
}
c.bench_function(name!(name, "enc sized heap"), |b| {
b.iter(|| Enc::enc_sized_heap(black_box(bytes)))
c.bench_function(name!(name, "enc/upper/sized-heap"), |b| {
b.iter(|| EncUpper::enc_sized_heap(black_box(bytes)))
});
c.bench_function(name!(name, "enc/lower/sized-heap"), |b| {
b.iter(|| EncLower::enc_sized_heap(black_box(bytes)))
});
benchmark(name, bytes, c);
}
fn benchmark(name: &str, bytes: &[u8], c: &mut Criterion) {
c.bench_function(name!(name, "enc slice"), |b| {
b.iter(|| Enc::enc_slice(black_box(bytes)))
c.bench_function(name!(name, "enc/upper/slice"), |b| {
b.iter(|| EncUpper::enc_slice(black_box(bytes)))
});
c.bench_function(name!(name, "enc/lower/slice"), |b| {
b.iter(|| EncLower::enc_slice(black_box(bytes)))
});
}
@ -57,5 +70,5 @@ fn verification() {
}
}
criterion_group!(encode_benches, bench_16, bench_256,);
criterion_group!(encode_benches, bench_16, bench_256);
criterion_main!(verification, encode_benches);

View File

@ -41,24 +41,35 @@ macro_rules! select {
}
#[inline(always)]
fn nbl_to_ascii<const UPPER: bool>(nbl: u8) -> u8 {
const fn nbl_to_ascii<const UPPER: bool>(nbl: u8) -> u8 {
// fourth bit set if true
let at_least_10 = {
let b1 = nbl & 0b1010;
let b2 = nbl & 0b1100;
((nbl >> 1) | (b2 & (b2 << 1)) | (b1 & (b1 << 2))) & 0b1000
// 10: 1010
// 11: 1011
// 12: 1100
// 13: 1101
// 14: 1110
// 15: 1111
//let b1 = nbl & 0b1010;
//let b2 = nbl & 0b1100;
//((nbl >> 1) | (b2 & (b2 << 1)) | (b1 & (b1 << 2))) & 0b1000
//((b2 & (b2 << 1)) | (b1 & (b1 << 2))) & 0b1000
if nbl >= 10 { 0b1 } else { 0b0 }
};
// 6th bit is always 1 with a-z and 0-9
let b6_val = if UPPER { (at_least_10 ^ 0b1000) << 2 } else { 0b100000 };
let b6_val = if UPPER { (at_least_10 ^ 0b1) << 5 } else { 0b100000 };
// 5th bit is always 1 with 0-9
let b5_val = (at_least_10 ^ 0b1000) << 1;
let b5_val = (at_least_10 ^ 0b1) << 4;
// 7th bit is always 1 with a-z and A-Z
let b7_val = at_least_10 << 3;
let b7_val = at_least_10 << 6;
// fill all bits with the value of the 4th bit
let is_at_least_10_all_mask = (((at_least_10 << 4) as i8) >> 7) as u8;
let is_at_least_10_all_mask = (((at_least_10 << 7) as i8) >> 7) as u8;
// sub 9 if we're >=10
// 9: 1001
// a-z and A-Z start at ..0001 rather than ..0000 like 0-9, so we sub 9, not 10
let sub = 9 & is_at_least_10_all_mask;
// interestingly, this is much slower than the nastly alt we're using above
//let sub = at_least_10 | (at_least_10 >> 3);
// apply the sub, then OR in the constants
(nbl - sub) | b6_val | b5_val | b7_val
}
@ -69,7 +80,8 @@ fn nbl_wide_to_ascii<const UPPER: bool>(nbl: u16) -> u16 {
let at_least_10 = {
let b1 = nbl & 0b1010;
let b2 = nbl & 0b1100;
((nbl >> 1) | (b2 & (b2 << 1)) | (b1 & (b1 << 2))) & 0b1000
//((nbl >> 1) | (b2 & (b2 << 1)) | (b1 & (b1 << 2))) & 0b1000
((b2 & (b2 << 1)) | (b1 & (b1 << 2))) & 0b1000
};
// mask used don the 6th bit.
let b6_val = if UPPER { (at_least_10 ^ 0b1000) << 2 } else { 0b100000 };
@ -141,67 +153,75 @@ macro_rules! const_impl {
const UNROLL: usize = 8;
let ub = $src.len();
let aub = util::align_down_to::<{ UNROLL }>(ub);
let mut src = $src.as_ptr() as *const u64;
//let mut dst = $dst.as_mut_ptr() as *mut u16;
let mut src = $src.as_ptr() as *const u8;
//let mut dst = $dst.as_mut_ptr() as *mut u64;
let mut dst = $dst.as_mut_ptr() as *mut u16;
// 2-8% slower on 256-bytes input
const USE_LOOKUP_TABLE: bool = false;
while i < aub {
unsafe {
let [b1, b2, b3, b4, b5, b6, b7, b8] = (src.read_unaligned()).to_ne_bytes();
src = src.add(1);
//let [b1, b2, b3, b4, b5, b6, b7, b8] = [(); UNROLL];
//unroll!(let [b1, b2, b3, b4, b5, b6, b7, b8] => |_| {
// let b = *src;
// src = src.add(1);
// b
//});
//let mut buf1: u64 = 0;
//let mut buf2: u64 = 0;
unroll!(let [b1, b2, b3, b4, b5, b6, b7, b8] => |b| {
*select!($UPPER ? HEX_BYTES_UPPER : HEX_BYTES_LOWER).get_unchecked(b as usize)
// benchmarks show this to be 40% faster than a u64 unaligned_read().to_ne_bytes()
let [b1, b2, b3, b4, b5, b6, b7, b8] = [(); UNROLL];
unroll!(let [b1, b2, b3, b4, b5, b6, b7, b8] => |_| {
let b = *src;
src = src.add(1);
b
});
//unroll!(let [b1: (0, b1), b2: (1, b2), b3: (2, b3), b4: (3, b4), b5: (4, b5), b6: (5, b6), b7: (6, b7), b8: (7, b8)] => |i, v| {
// if i < 4 {
// (v as u64) << (i * 16)
// } else {
// (v as u64) << ((i - 4) * 16)
// }
//});
unroll!(let [b1, b2, b3, b4, b5, b6, b7, b8] => |b| {
if USE_LOOKUP_TABLE {
*select!($UPPER ? HEX_BYTES_UPPER : HEX_BYTES_LOWER).get_unchecked(b as usize)
} else {
byte_to_ascii::<$UPPER>(b)
}
});
/*unroll!(let [b1: (0, b1), b2: (1, b2), b3: (2, b3), b4: (3, b4), b5: (4, b5), b6: (5, b6), b7: (6, b7), b8: (7, b8)] => |j, v| {
if j < 4 {
(v as u64) << (j * 16)
} else {
(v as u64) << ((j - 4) * 16)
}
});*/
// TODO: would using vector store actually be faster here (particularly for the
// heap variant)
unroll!([(0, b1), (1, b2), (2, b3), (3, b4), (4, b5), (5, b6), (6, b7), (7, b8)] => |_, v| {
//*dst = *select!($UPPER ? HEX_BYTES_UPPER : HEX_BYTES_LOWER).get_unchecked(b as usize);
*dst = v;
//if i < 4 {
// //println!("[{i}] {v:064b}");
// buf1 |= v;
//} else {
// //println!("[{i}] {v:064b}");
// buf2 |= v;
//}
dst = dst.add(1);
});
/*let mut buf1: u64 = 0;
let mut buf2: u64 = 0;
unroll!([(0, b1), (1, b2), (2, b3), (3, b4), (4, b5), (5, b6), (6, b7), (7, b8)] => |j, v| {
if j < 4 {
//println!("[{j}] {v:064b}");
buf1 |= v;
} else {
//println!("[{j}] {v:064b}");
buf2 |= v;
}
// if i < 4 {
// buf1[i] = MaybeUninit::new(v);
// } else {
// buf2[i - 4] = MaybeUninit::new(v);
// }
//*dst = byte_to_ascii::<$UPPER>(b);
dst = dst.add(1);
});
// TODO: would using vector store actually be faster here (particularly for the
// heap variant)
//assert!(dst < ($dst.as_mut_ptr() as *mut u64).add($dst.len()));
//*dst = buf1;
//dst = dst.add(1);
*dst = buf1;
dst = dst.add(1);
//assert!(dst < ($dst.as_mut_ptr() as *mut u64).add($dst.len()));
//*dst = buf2;
//dst = dst.add(1);
*dst = buf2;
dst = dst.add(1);*/
i += UNROLL;
}
}
let mut src = src as *const u8;
let mut dst = dst as *mut u16;
while i < ub {
unsafe {
let b = *src;
*dst = *select!($UPPER ? HEX_BYTES_UPPER : HEX_BYTES_LOWER).get_unchecked(b as usize);
//*dst = byte_to_ascii::<$UPPER>(b);
*dst = if USE_LOOKUP_TABLE {
*select!($UPPER ? HEX_BYTES_UPPER : HEX_BYTES_LOWER).get_unchecked(b as usize)
} else {
byte_to_ascii::<$UPPER>(b)
};
dst = dst.add(1);
src = src.add(1);
i += 1;
@ -231,7 +251,8 @@ macro_rules! common_impl {
std::arch::asm!("vmovdqu {dst}, [{src}]", src = in(reg) src, dst = lateout(xmm_reg) chunk);
let hi = chunk.and(0xf0u8.splat().into());
let hi: simd::arch::__m128i = simd::shr_64!(4, (xmm_reg) hi);
// 64 vs 16 seems to make no difference
let hi: simd::arch::__m128i = simd::shr!(64, 4, (xmm_reg) hi);
let lo = chunk.and(0x0fu8.splat().into());

View File

@ -610,15 +610,24 @@ macro_rules! widen_256_impl {
#[doc(hidden)]
#[macro_export]
macro_rules! __simd__shr_64 {
($n:literal, ($in_reg:ident) $in:expr) => {{
macro_rules! __simd__shr {
($inst:ident, $n:literal, ($in_reg:ident) $in:expr) => {{
let out: _;
std::arch::asm!(concat!("vpsrlq {dst}, {src}, ", $n), src = in($in_reg) $in, dst = lateout($in_reg) out);
std::arch::asm!(concat!(stringify!($inst), " {dst}, {src}, ", $n), src = in($in_reg) $in, dst = lateout($in_reg) out);
out
}};
(16, $n:literal, ($in_reg:ident) $in:expr) => {
$crate::simd::shr!(vpsrlw, $n, ($in_reg) $in)
};
(32, $n:literal, ($in_reg:ident) $in:expr) => {
$crate::simd::shr!(vpsrld, $n, ($in_reg) $in)
};
(64, $n:literal, ($in_reg:ident) $in:expr) => {
$crate::simd::shr!(vpsrlq, $n, ($in_reg) $in)
};
}
pub use __simd__shr_64 as shr_64;
pub use __simd__shr as shr;
/*impl SimdWiden for arch::__m128i {
#[inline(always)]