Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions sha1/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ block-buffer = "0.9"
opaque-debug = "0.2"
sha1-asm = { version = "0.4", optional = true }
libc = { version = "0.2.68", optional = true }
cfg-if = "0.1"

[dev-dependencies]
digest = { version = "0.9", features = ["dev"] }
Expand Down
8 changes: 0 additions & 8 deletions sha1/src/aarch64.rs

This file was deleted.

32 changes: 32 additions & 0 deletions sha1/src/compress.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
use digest::consts::U64;
use digest::generic_array::GenericArray;

mod aarch64;
#[cfg(any(not(feature = "asm"), feature = "asm-aarch64"))]
mod soft;
mod x86;

cfg_if::cfg_if! {
if #[cfg(feature = "asm-aarch64")] {
use aarch64::compress as compress_inner;
} else if #[cfg(feature = "asm")] {
// TODO: replace after sha1-asm rework
fn compress_inner(state: &mut [u32; 5], blocks: &[u8; 64]) {
for block in blocks {
sha1_asm::compress(state, block);
}
}
} else if #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] {
use x86::compress as compress_inner;
} else {
use soft::compress as compress_inner;
}
}

pub fn compress(state: &mut [u32; 5], blocks: &[GenericArray<u8, U64>]) {
// SAFETY: GenericArray<u8, U64> and [u8; 64] have
// exactly the same memory layout
#[allow(unsafe_code)]
let blocks: &[[u8; 64]] = unsafe { &*(blocks as *const _ as *const [[u8; 64]]) };
compress_inner(state, blocks);
}
21 changes: 21 additions & 0 deletions sha1/src/compress/aarch64.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#![cfg(feature = "asm-aarch64")]
use libc::{getauxval, AT_HWCAP, HWCAP_SHA1};

fn sha1_supported() -> bool {
#[allow(unsafe_code)]
let hwcaps: u64 = unsafe { getauxval(AT_HWCAP) };
(hwcaps & HWCAP_SHA1) != 0
}

pub fn compress(state: &mut [u32; 5], blocks: &[u8; 64]) {
// TODO: Replace this platform-specific call with is_aarch64_feature_detected!("sha1") once
// that macro is stabilised and https://github.com/rust-lang/rfcs/pull/2725 is implemented
// to let us use it on no_std.
if sha1_supported() {
for block in blocks {
sha1_asm::compress(state, block);
}
} else {
super::soft::compress(state, blocks);
}
}
95 changes: 13 additions & 82 deletions sha1/src/utils.rs → sha1/src/compress/soft.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,6 @@
#![allow(clippy::many_single_char_names)]
use crate::consts::{BLOCK_LEN, K0, K1, K2, K3};
use core::convert::TryInto;
use digest::generic_array::typenum::U64;
use digest::generic_array::GenericArray;

type Block = GenericArray<u8, U64>;

#[inline(always)]
fn add(a: [u32; 4], b: [u32; 4]) -> [u32; 4] {
Expand All @@ -21,27 +17,18 @@ fn xor(a: [u32; 4], b: [u32; 4]) -> [u32; 4] {
[a[0] ^ b[0], a[1] ^ b[1], a[2] ^ b[2], a[3] ^ b[3]]
}

/// Not an intrinsic, but gets the first element of a vector.
#[inline]
pub fn sha1_first(w0: [u32; 4]) -> u32 {
w0[0]
}

/// Not an intrinsic, but adds a word to the first element of a vector.
#[inline]
pub fn sha1_first_add(e: u32, w0: [u32; 4]) -> [u32; 4] {
let [a, b, c, d] = w0;
[e.wrapping_add(a), b, c, d]
}

/// Emulates `llvm.x86.sha1msg1` intrinsic.
fn sha1msg1(a: [u32; 4], b: [u32; 4]) -> [u32; 4] {
let [_, _, w2, w3] = a;
let [w4, w5, _, _] = b;
[a[0] ^ w2, a[1] ^ w3, a[2] ^ w4, a[3] ^ w5]
}

/// Emulates `llvm.x86.sha1msg2` intrinsic.
fn sha1msg2(a: [u32; 4], b: [u32; 4]) -> [u32; 4] {
let [x0, x1, x2, x3] = a;
let [_, w13, w14, w15] = b;
Expand All @@ -54,21 +41,11 @@ fn sha1msg2(a: [u32; 4], b: [u32; 4]) -> [u32; 4] {
[w16, w17, w18, w19]
}

/// Performs 4 rounds of the message schedule update.
/*
pub fn sha1_schedule_x4(v0: [u32; 4], v1: [u32; 4], v2: [u32; 4], v3: [u32; 4]) -> [u32; 4] {
sha1msg2(sha1msg1(v0, v1) ^ v2, v3)
}
*/

/// Emulates `llvm.x86.sha1nexte` intrinsic.
#[inline]
fn sha1_first_half(abcd: [u32; 4], msg: [u32; 4]) -> [u32; 4] {
sha1_first_add(sha1_first(abcd).rotate_left(30), msg)
sha1_first_add(abcd[0].rotate_left(30), msg)
}

/// Emulates `llvm.x86.sha1rnds4` intrinsic.
/// Performs 4 rounds of the message block digest.
fn sha1_digest_round_x4(abcd: [u32; 4], work: [u32; 4], i: i8) -> [u32; 4] {
const K0V: [u32; 4] = [K0, K0, K0, K0];
const K1V: [u32; 4] = [K1, K1, K1, K1];
Expand All @@ -84,7 +61,6 @@ fn sha1_digest_round_x4(abcd: [u32; 4], work: [u32; 4], i: i8) -> [u32; 4] {
}
}

/// Not an intrinsic, but helps emulate `llvm.x86.sha1rnds4` intrinsic.
fn sha1rnds4c(abcd: [u32; 4], msg: [u32; 4]) -> [u32; 4] {
let [mut a, mut b, mut c, mut d] = abcd;
let [t, u, v, w] = msg;
Expand Down Expand Up @@ -123,7 +99,6 @@ fn sha1rnds4c(abcd: [u32; 4], msg: [u32; 4]) -> [u32; 4] {
[b, c, d, e]
}

/// Not an intrinsic, but helps emulate `llvm.x86.sha1rnds4` intrinsic.
fn sha1rnds4p(abcd: [u32; 4], msg: [u32; 4]) -> [u32; 4] {
let [mut a, mut b, mut c, mut d] = abcd;
let [t, u, v, w] = msg;
Expand Down Expand Up @@ -162,7 +137,6 @@ fn sha1rnds4p(abcd: [u32; 4], msg: [u32; 4]) -> [u32; 4] {
[b, c, d, e]
}

/// Not an intrinsic, but helps emulate `llvm.x86.sha1rnds4` intrinsic.
fn sha1rnds4m(abcd: [u32; 4], msg: [u32; 4]) -> [u32; 4] {
let [mut a, mut b, mut c, mut d] = abcd;
let [t, u, v, w] = msg;
Expand Down Expand Up @@ -201,7 +175,7 @@ fn sha1rnds4m(abcd: [u32; 4], msg: [u32; 4]) -> [u32; 4] {
[b, c, d, e]
}

/// Process a block with the SHA-1 algorithm.
#[inline(always)]
fn sha1_digest_block_u32(state: &mut [u32; 5], block: &[u32; 16]) {
macro_rules! schedule {
($v0:expr, $v1:expr, $v2:expr, $v3:expr) => {
Expand All @@ -216,7 +190,6 @@ fn sha1_digest_block_u32(state: &mut [u32; 5], block: &[u32; 16]) {
}

// Rounds 0..20
// TODO: replace with `[u32; 4]::load`
let mut h0 = [state[0], state[1], state[2], state[3]];
let mut w0 = [block[0], block[1], block[2], block[3]];
let mut h1 = sha1_digest_round_x4(h0, sha1_first_add(state[4], w0), 0);
Expand Down Expand Up @@ -265,7 +238,7 @@ fn sha1_digest_block_u32(state: &mut [u32; 5], block: &[u32; 16]) {
w4 = schedule!(w0, w1, w2, w3);
h0 = rounds4!(h1, h0, w4, 3);

let e = sha1_first(h1).rotate_left(30);
let e = h1[0].rotate_left(30);
let [a, b, c, d] = h0;

state[0] = state[0].wrapping_add(a);
Expand All @@ -275,58 +248,16 @@ fn sha1_digest_block_u32(state: &mut [u32; 5], block: &[u32; 16]) {
state[4] = state[4].wrapping_add(e);
}

/// Process a block with the SHA-1 algorithm. (See more...)
///
/// SHA-1 is a cryptographic hash function, and as such, it operates
/// on an arbitrary number of bytes. This function operates on a fixed
/// number of bytes. If you call this function with anything other than
/// 64 bytes, then it will panic! This function takes two arguments:
///
/// * `state` is reference to an **array** of 5 words.
/// * `block` is reference to a **slice** of 64 bytes.
///
/// If you want the function that performs a message digest on an arbitrary
/// number of bytes, then see also the `Sha1` struct above.
///
/// # Implementation
///
/// First, some background. Both ARM and Intel are releasing documentation
/// that they plan to include instruction set extensions for SHA1 and SHA256
/// sometime in the near future. Second, LLVM won't lower these intrinsics yet,
/// so these functions were written emulate these instructions. Finally,
/// the block function implemented with these emulated intrinsics turned out
/// to be quite fast! What follows is a discussion of this CPU-level view
/// of the SHA-1 algorithm and how it relates to the mathematical definition.
///
/// The SHA instruction set extensions can be divided up into two categories:
///
/// * message work schedule update calculation ("schedule" v., "work" n.)
/// * message block 80-round digest calculation ("digest" v., "block" n.)
///
/// The schedule-related functions can be used to easily perform 4 rounds
/// of the message work schedule update calculation, as shown below:
///
/// ```ignore
/// macro_rules! schedule_x4 {
/// ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => (
/// sha1msg2(sha1msg1($v0, $v1) ^ $v2, $v3)
/// )
/// }
///
/// macro_rules! round_x4 {
/// ($h0:ident, $h1:ident, $wk:expr, $i:expr) => (
/// sha1rnds4($h0, sha1_first_half($h1, $wk), $i)
/// )
/// }
/// ```
///
/// and also shown above is how the digest-related functions can be used to
/// perform 4 rounds of the message block digest calculation.
///
pub fn compress(state: &mut [u32; 5], block: &Block) {
pub fn compress(state: &mut [u32; 5], blocks: &[[u8; 64]]) {
let mut block_u32 = [0u32; BLOCK_LEN];
for (o, chunk) in block_u32.iter_mut().zip(block.chunks_exact(4)) {
*o = u32::from_be_bytes(chunk.try_into().unwrap());
// since LLVM can't properly use aliasing yet it will make
// unnecessary state stores without this copy
let mut state_cpy = *state;
for block in blocks.iter() {
for (o, chunk) in block_u32.iter_mut().zip(block.chunks_exact(4)) {
*o = u32::from_be_bytes(chunk.try_into().unwrap());
}
sha1_digest_block_u32(&mut state_cpy, &block_u32);
}
sha1_digest_block_u32(state, &block_u32);
*state = state_cpy;
}
Loading