mirror of
https://github.com/signalapp/libsignal.git
synced 2024-09-19 19:42:19 +02:00
keytrans: Add libsignal-keytrans crate
Co-authored-by: Brendan McMillion <brendanmcmillion@gmail.com>
This commit is contained in:
parent
a8bc95bc7e
commit
33836ff5c2
87
Cargo.lock
generated
87
Cargo.lock
generated
@ -718,6 +718,12 @@ dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "const-oid"
|
||||
version = "0.9.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8"
|
||||
|
||||
[[package]]
|
||||
name = "const-str"
|
||||
version = "0.5.7"
|
||||
@ -928,6 +934,16 @@ dependencies = [
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "der"
|
||||
version = "0.7.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0"
|
||||
dependencies = [
|
||||
"const-oid",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deranged"
|
||||
version = "0.3.11"
|
||||
@ -1101,6 +1117,30 @@ version = "1.0.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125"
|
||||
|
||||
[[package]]
|
||||
name = "ed25519"
|
||||
version = "2.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53"
|
||||
dependencies = [
|
||||
"pkcs8",
|
||||
"signature",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ed25519-dalek"
|
||||
version = "2.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871"
|
||||
dependencies = [
|
||||
"curve25519-dalek",
|
||||
"ed25519",
|
||||
"serde",
|
||||
"sha2",
|
||||
"subtle",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "either"
|
||||
version = "1.13.0"
|
||||
@ -2090,6 +2130,24 @@ dependencies = [
|
||||
"log-panics",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libsignal-keytrans"
|
||||
version = "0.0.1"
|
||||
dependencies = [
|
||||
"assert_matches",
|
||||
"async-trait",
|
||||
"curve25519-dalek",
|
||||
"displaydoc",
|
||||
"ed25519-dalek",
|
||||
"hex-literal",
|
||||
"hmac",
|
||||
"proptest",
|
||||
"prost",
|
||||
"prost-build",
|
||||
"sha2",
|
||||
"test-case",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libsignal-message-backup"
|
||||
version = "0.1.0"
|
||||
@ -2937,6 +2995,16 @@ version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pkcs8"
|
||||
version = "0.10.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7"
|
||||
dependencies = [
|
||||
"der",
|
||||
"spki",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "plotters"
|
||||
version = "0.3.6"
|
||||
@ -3832,6 +3900,15 @@ dependencies = [
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "signature"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de"
|
||||
dependencies = [
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "siphasher"
|
||||
version = "0.3.11"
|
||||
@ -3896,6 +3973,16 @@ version = "0.9.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
|
||||
|
||||
[[package]]
|
||||
name = "spki"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d"
|
||||
dependencies = [
|
||||
"base64ct",
|
||||
"der",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "static_assertions"
|
||||
version = "1.1.0"
|
||||
|
@ -4,6 +4,7 @@ members = [
|
||||
"rust/attest",
|
||||
"rust/crypto",
|
||||
"rust/device-transfer",
|
||||
"rust/keytrans",
|
||||
"rust/media",
|
||||
"rust/message-backup",
|
||||
"rust/net",
|
||||
|
@ -46,10 +46,10 @@
|
||||
|
||||
<h2>Overview of licenses:</h2>
|
||||
<ul class="licenses-overview">
|
||||
<li><a href="#MIT">MIT License</a> (307)</li>
|
||||
<li><a href="#AGPL-3.0">GNU Affero General Public License v3.0</a> (27)</li>
|
||||
<li><a href="#MIT">MIT License</a> (309)</li>
|
||||
<li><a href="#AGPL-3.0">GNU Affero General Public License v3.0</a> (28)</li>
|
||||
<li><a href="#Apache-2.0">Apache License 2.0</a> (12)</li>
|
||||
<li><a href="#BSD-3-Clause">BSD 3-Clause "New" or "Revised" License</a> (8)</li>
|
||||
<li><a href="#BSD-3-Clause">BSD 3-Clause "New" or "Revised" License</a> (9)</li>
|
||||
<li><a href="#ISC">ISC License</a> (6)</li>
|
||||
<li><a href="#OpenSSL">OpenSSL License</a> (2)</li>
|
||||
<li><a href="#BSD-2-Clause">BSD 2-Clause "Simplified" License</a> (1)</li>
|
||||
@ -747,6 +747,7 @@ For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<li><a href="https://crates.io/crates/libsignal-core">libsignal-core 0.1.0</a></li>
|
||||
<li><a href="https://crates.io/crates/signal-crypto">signal-crypto 0.1.0</a></li>
|
||||
<li><a href="https://crates.io/crates/device-transfer">device-transfer 0.1.0</a></li>
|
||||
<li><a href="https://crates.io/crates/libsignal-keytrans">libsignal-keytrans 0.0.1</a></li>
|
||||
<li><a href="https://crates.io/crates/signal-media">signal-media 0.1.0</a></li>
|
||||
<li><a href="https://crates.io/crates/libsignal-message-backup">libsignal-message-backup 0.1.0</a></li>
|
||||
<li><a href="https://crates.io/crates/libsignal-message-backup-macros">libsignal-message-backup-macros 0.1.0</a></li>
|
||||
@ -2225,6 +2226,42 @@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.</pre>
|
||||
</li>
|
||||
<li class="license">
|
||||
<h3 id="BSD-3-Clause">BSD 3-Clause "New" or "Revised" License</h3>
|
||||
<h4>Used by:</h4>
|
||||
<ul class="license-used-by">
|
||||
<li><a href="https://github.com/dalek-cryptography/curve25519-dalek/tree/main/ed25519-dalek">ed25519-dalek 2.1.1</a></li>
|
||||
</ul>
|
||||
<pre class="license-text">Copyright (c) 2017-2019 isis agora lovecruft. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
</pre>
|
||||
</li>
|
||||
<li class="license">
|
||||
<h3 id="BSD-3-Clause">BSD 3-Clause "New" or "Revised" License</h3>
|
||||
@ -4832,6 +4869,40 @@ The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
</pre>
|
||||
</li>
|
||||
<li class="license">
|
||||
<h3 id="MIT">MIT License</h3>
|
||||
<h4>Used by:</h4>
|
||||
<ul class="license-used-by">
|
||||
<li><a href="https://github.com/RustCrypto/signatures/tree/master/ed25519">ed25519 2.2.3</a></li>
|
||||
<li><a href="https://github.com/RustCrypto/traits/tree/master/signature">signature 2.2.0</a></li>
|
||||
</ul>
|
||||
<pre class="license-text">Copyright (c) 2018-2023 RustCrypto Developers
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
|
@ -669,7 +669,7 @@ For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
|
||||
```
|
||||
|
||||
## attest 0.1.0, libsignal-ffi 0.55.0, libsignal-jni 0.55.0, libsignal-jni-testing 0.55.0, libsignal-node 0.55.0, signal-neon-futures 0.1.0, signal-neon-futures-tests 0.1.0, libsignal-bridge 0.1.0, libsignal-bridge-macros 0.1.0, libsignal-bridge-testing 0.1.0, libsignal-bridge-types 0.1.0, libsignal-core 0.1.0, signal-crypto 0.1.0, device-transfer 0.1.0, signal-media 0.1.0, libsignal-message-backup 0.1.0, libsignal-message-backup-macros 0.1.0, libsignal-net 0.1.0, signal-pin 0.1.0, poksho 0.7.0, libsignal-protocol 0.1.0, libsignal-svr3 0.1.0, usernames 0.1.0, zkcredential 0.1.0, zkgroup 0.9.0
|
||||
## attest 0.1.0, libsignal-ffi 0.55.0, libsignal-jni 0.55.0, libsignal-jni-testing 0.55.0, libsignal-node 0.55.0, signal-neon-futures 0.1.0, signal-neon-futures-tests 0.1.0, libsignal-bridge 0.1.0, libsignal-bridge-macros 0.1.0, libsignal-bridge-testing 0.1.0, libsignal-bridge-types 0.1.0, libsignal-core 0.1.0, signal-crypto 0.1.0, device-transfer 0.1.0, libsignal-keytrans 0.0.1, signal-media 0.1.0, libsignal-message-backup 0.1.0, libsignal-message-backup-macros 0.1.0, libsignal-net 0.1.0, signal-pin 0.1.0, poksho 0.7.0, libsignal-protocol 0.1.0, libsignal-svr3 0.1.0, usernames 0.1.0, zkcredential 0.1.0, zkgroup 0.9.0
|
||||
|
||||
```
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
@ -2112,6 +2112,40 @@ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
```
|
||||
|
||||
## ed25519-dalek 2.1.1
|
||||
|
||||
```
|
||||
Copyright (c) 2017-2019 isis agora lovecruft. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
```
|
||||
|
||||
## x25519-dalek 2.0.1
|
||||
|
||||
```
|
||||
@ -4510,6 +4544,37 @@ DEALINGS IN THE SOFTWARE.
|
||||
|
||||
```
|
||||
|
||||
## ed25519 2.2.3, signature 2.2.0
|
||||
|
||||
```
|
||||
Copyright (c) 2018-2023 RustCrypto Developers
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
|
||||
```
|
||||
|
||||
## try-lock 0.2.5
|
||||
|
||||
```
|
||||
|
@ -924,7 +924,7 @@ You should also get your employer (if you work as a programmer) or school, if an
|
||||
<key>License</key>
|
||||
<string>GNU Affero General Public License v3.0</string>
|
||||
<key>Title</key>
|
||||
<string>attest 0.1.0, libsignal-ffi 0.55.0, libsignal-jni 0.55.0, libsignal-jni-testing 0.55.0, libsignal-node 0.55.0, signal-neon-futures 0.1.0, signal-neon-futures-tests 0.1.0, libsignal-bridge 0.1.0, libsignal-bridge-macros 0.1.0, libsignal-bridge-testing 0.1.0, libsignal-bridge-types 0.1.0, libsignal-core 0.1.0, signal-crypto 0.1.0, device-transfer 0.1.0, signal-media 0.1.0, libsignal-message-backup 0.1.0, libsignal-message-backup-macros 0.1.0, libsignal-net 0.1.0, signal-pin 0.1.0, poksho 0.7.0, libsignal-protocol 0.1.0, libsignal-svr3 0.1.0, usernames 0.1.0, zkcredential 0.1.0, zkgroup 0.9.0</string>
|
||||
<string>attest 0.1.0, libsignal-ffi 0.55.0, libsignal-jni 0.55.0, libsignal-jni-testing 0.55.0, libsignal-node 0.55.0, signal-neon-futures 0.1.0, signal-neon-futures-tests 0.1.0, libsignal-bridge 0.1.0, libsignal-bridge-macros 0.1.0, libsignal-bridge-testing 0.1.0, libsignal-bridge-types 0.1.0, libsignal-core 0.1.0, signal-crypto 0.1.0, device-transfer 0.1.0, libsignal-keytrans 0.0.1, signal-media 0.1.0, libsignal-message-backup 0.1.0, libsignal-message-backup-macros 0.1.0, libsignal-net 0.1.0, signal-pin 0.1.0, poksho 0.7.0, libsignal-protocol 0.1.0, libsignal-svr3 0.1.0, usernames 0.1.0, zkcredential 0.1.0, zkgroup 0.9.0</string>
|
||||
<key>Type</key>
|
||||
<string>PSGroupSpecifier</string>
|
||||
</dict>
|
||||
@ -2174,6 +2174,44 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.</string>
|
||||
<key>Type</key>
|
||||
<string>PSGroupSpecifier</string>
|
||||
</dict>
|
||||
<dict>
|
||||
<key>FooterText</key>
|
||||
<string>Copyright (c) 2017-2019 isis agora lovecruft. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
</string>
|
||||
<key>License</key>
|
||||
<string>BSD 3-Clause "New" or "Revised" License</string>
|
||||
<key>Title</key>
|
||||
<string>ed25519-dalek 2.1.1</string>
|
||||
<key>Type</key>
|
||||
<string>PSGroupSpecifier</string>
|
||||
</dict>
|
||||
<dict>
|
||||
<key>FooterText</key>
|
||||
<string>Copyright (c) 2017-2021 isis agora lovecruft. All rights reserved.
|
||||
@ -4892,6 +4930,41 @@ DEALINGS IN THE SOFTWARE.
|
||||
<key>Type</key>
|
||||
<string>PSGroupSpecifier</string>
|
||||
</dict>
|
||||
<dict>
|
||||
<key>FooterText</key>
|
||||
<string>Copyright (c) 2018-2023 RustCrypto Developers
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
</string>
|
||||
<key>License</key>
|
||||
<string>MIT License</string>
|
||||
<key>Title</key>
|
||||
<string>ed25519 2.2.3, signature 2.2.0</string>
|
||||
<key>Type</key>
|
||||
<string>PSGroupSpecifier</string>
|
||||
</dict>
|
||||
<dict>
|
||||
<key>FooterText</key>
|
||||
<string>Copyright (c) 2018-2023 Sean McArthur
|
||||
|
29
rust/keytrans/Cargo.toml
Normal file
29
rust/keytrans/Cargo.toml
Normal file
@ -0,0 +1,29 @@
|
||||
#
|
||||
# Copyright (C) 2024 Signal Messenger, LLC.
|
||||
# SPDX-License-Identifier: AGPL-3.0-only
|
||||
#
|
||||
|
||||
[package]
|
||||
name = "libsignal-keytrans"
|
||||
version = "0.0.1"
|
||||
authors = ["Signal Messenger LLC"]
|
||||
edition = "2021"
|
||||
license = "AGPL-3.0-only"
|
||||
|
||||
[dependencies]
|
||||
async-trait = "0.1.41"
|
||||
curve25519-dalek = { version = "4.1.3" }
|
||||
displaydoc = "0.2"
|
||||
ed25519-dalek = "2.1.0"
|
||||
hmac = "0.12.1"
|
||||
prost = "0.13"
|
||||
sha2 = "0.10"
|
||||
|
||||
[build-dependencies]
|
||||
prost-build = "0.13"
|
||||
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.5.0"
|
||||
hex-literal = "0.4.1"
|
||||
proptest = "1.5.0"
|
||||
test-case = "3.3.1"
|
11
rust/keytrans/build.rs
Normal file
11
rust/keytrans/build.rs
Normal file
@ -0,0 +1,11 @@
|
||||
fn main() {
|
||||
let protos = ["src/wire.proto"];
|
||||
let mut prost_build = prost_build::Config::new();
|
||||
prost_build.protoc_arg("--experimental_allow_proto3_optional");
|
||||
prost_build
|
||||
.compile_protos(&protos, &["src"])
|
||||
.expect("Protobufs in src are valid");
|
||||
for proto in &protos {
|
||||
println!("cargo:rerun-if-changed={}", proto);
|
||||
}
|
||||
}
|
51
rust/keytrans/src/commitments.rs
Normal file
51
rust/keytrans/src/commitments.rs
Normal file
@ -0,0 +1,51 @@
|
||||
//
|
||||
// Copyright 2024 Signal Messenger, LLC.
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
//
|
||||
use hmac::{Hmac, Mac as _};
|
||||
use sha2::Sha256;
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
const FIXED_KEY: &[u8] = &[
|
||||
0xd8, 0x21, 0xf8, 0x79, 0xd, 0x97, 0x70, 0x97, 0x96, 0xb4, 0xd7, 0x90, 0x33, 0x57, 0xc3, 0xf5,
|
||||
];
|
||||
|
||||
pub fn commit(search_key: &[u8], data: &[u8], nonce: &[u8; 16]) -> Vec<u8> {
|
||||
// The expected search_key inputs to this function are: an ACI, an E164,
|
||||
// or a username. None should reach 2^16 bound.
|
||||
let key_len: u16 = search_key.len().try_into().expect("search key too large");
|
||||
// The expected data inputs to this function are: an ACI, or
|
||||
// a serialized public key. Neither should reach 2^32 bound.
|
||||
let data_len: u32 = data.len().try_into().expect("data too large");
|
||||
|
||||
let mut mac = HmacSha256::new_from_slice(FIXED_KEY).unwrap();
|
||||
mac.update(nonce);
|
||||
mac.update(&key_len.to_be_bytes());
|
||||
mac.update(search_key);
|
||||
mac.update(&data_len.to_be_bytes());
|
||||
mac.update(data);
|
||||
|
||||
mac.finalize().into_bytes().to_vec()
|
||||
}
|
||||
|
||||
pub fn verify(search_key: &[u8], commitment: &[u8], data: &[u8], nonce: &[u8; 16]) -> bool {
|
||||
// No concern about timing attacks here, as commitments are public.
|
||||
commit(search_key, data, nonce) == commitment
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use hex_literal::hex;
|
||||
use test_case::test_case;
|
||||
|
||||
#[test_case(&[], &[], &hex!("edc3f59798cd87f2f48ec8836e2b6ef425cde9ab121ffdefc93d769db7cebabf") ; "empty")]
|
||||
#[test_case(b"foo", b"bar", &hex!("25df431e884358826fe66f96d65702580104240abd63fa741d9ea3f32914bbf5") ; "case_1")]
|
||||
#[test_case(b"foo1", b"bar", &hex!("6c31a163a7660d1467fc1c997bd78b0a70b8921ca76b7eb0c6ca077f1e5e121e") ; "case_2")]
|
||||
#[test_case(b"foo", b"bar1", &hex!("5de6c6c9ed4bf48122f6c851c80e6eacbf885947f02f974cdc794b14c8e975f1") ; "case_3")]
|
||||
fn test_commit(key: &[u8], data: &[u8], expected: &[u8]) {
|
||||
let got = commit(key, data, &[0u8; 16]);
|
||||
assert_eq!(got, expected);
|
||||
}
|
||||
}
|
234
rust/keytrans/src/guide.rs
Normal file
234
rust/keytrans/src/guide.rs
Normal file
@ -0,0 +1,234 @@
|
||||
//
|
||||
// Copyright 2024 Signal Messenger, LLC.
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
//
|
||||
use std::result::Result;
|
||||
|
||||
use crate::implicit;
|
||||
|
||||
/// Set of counters is not monotonic
|
||||
#[derive(Debug, displaydoc::Display)]
|
||||
pub struct InvalidState;
|
||||
|
||||
pub struct VersionedId {
|
||||
id: u64,
|
||||
version: u32,
|
||||
}
|
||||
|
||||
impl VersionedId {
|
||||
pub fn new(id: u64, version: u32) -> Self {
|
||||
Self { id, version }
|
||||
}
|
||||
}
|
||||
|
||||
/// ProofGuide is used for determining which nodes should be accessed when
|
||||
/// conducting searches in the Implicit Binary Search Tree.
|
||||
pub struct ProofGuide {
|
||||
/// Position of the key's first occurrence in the log.
|
||||
pos: u64,
|
||||
/// Number of leaf entries in the log.
|
||||
n: u64,
|
||||
/// The version of the key being searched for.
|
||||
version: u32,
|
||||
/// List of ids to fetch as part of search.
|
||||
ids: Vec<u64>,
|
||||
/// List of ids that were fetched, and the counter found.
|
||||
sorted: Vec<VersionedId>,
|
||||
// Whether `ids` represent the frontier.
|
||||
is_frontier: bool,
|
||||
}
|
||||
|
||||
impl ProofGuide {
|
||||
pub fn new(version: Option<u32>, pos: u64, n: u64) -> Self {
|
||||
match version {
|
||||
None => Self::for_most_recent(pos, n),
|
||||
Some(version) => Self::for_version(version, pos, n),
|
||||
}
|
||||
}
|
||||
|
||||
fn for_version(version: u32, pos: u64, n: u64) -> Self {
|
||||
Self {
|
||||
pos,
|
||||
n,
|
||||
version,
|
||||
ids: vec![implicit::root(pos, n)],
|
||||
sorted: vec![],
|
||||
|
||||
is_frontier: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn for_most_recent(pos: u64, n: u64) -> Self {
|
||||
Self {
|
||||
pos,
|
||||
n,
|
||||
version: 0,
|
||||
ids: implicit::frontier(pos, n),
|
||||
sorted: vec![],
|
||||
|
||||
is_frontier: true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the search proof is finished.
|
||||
fn poll(&mut self) -> Result<bool, InvalidState> {
|
||||
if self.ids.len() > self.sorted.len() {
|
||||
return Ok(false);
|
||||
}
|
||||
self.sorted.sort_by_key(|versioned_id| versioned_id.id);
|
||||
|
||||
// Check that the list of counters is monotonic.
|
||||
let sorted = self.sorted.windows(2).all(|w| w[0].version <= w[1].version);
|
||||
if !sorted {
|
||||
return Err(InvalidState);
|
||||
}
|
||||
|
||||
// Determine the "last" id looked up. Generally this is actually just
|
||||
// the last id that was looked up, but if we just fetched the frontier
|
||||
// then we start searching at the first element of the frontier with the
|
||||
// greatest version.
|
||||
let last = if self.is_frontier {
|
||||
self.version = self.sorted[self.sorted.len() - 1].version;
|
||||
self.is_frontier = false;
|
||||
|
||||
self.sorted
|
||||
.iter()
|
||||
.find(|versioned_id| versioned_id.version == self.version)
|
||||
.expect("last element of array must match, if no earlier one does")
|
||||
.id
|
||||
} else {
|
||||
self.ids[self.ids.len() - 1]
|
||||
};
|
||||
if implicit::is_leaf(last) {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// Find the counter associated with the last id looked up.
|
||||
let ctr = self
|
||||
.sorted
|
||||
.iter()
|
||||
.find(|versioned_id| versioned_id.id == last)
|
||||
.expect("last id looked up must have corresponding entry in sorted")
|
||||
.version;
|
||||
|
||||
// Find the next id to lookup by moving left or right depending on ctr.
|
||||
let next_id = if ctr < self.version {
|
||||
if last == self.n - 1 {
|
||||
return Ok(true);
|
||||
}
|
||||
implicit::right(last, self.pos, self.n)
|
||||
} else {
|
||||
if last == self.pos {
|
||||
return Ok(true);
|
||||
}
|
||||
implicit::left(last, self.pos, self.n)
|
||||
};
|
||||
self.ids.push(next_id);
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
/// Returns the next id to fetch from the database.
|
||||
fn next_id(&self) -> u64 {
|
||||
self.ids[self.sorted.len()]
|
||||
}
|
||||
|
||||
/// Adds an id-counter pair to the guide.
|
||||
pub fn insert(&mut self, id: u64, ctr: u32) {
|
||||
self.sorted.push(VersionedId::new(id, ctr));
|
||||
}
|
||||
|
||||
// Returns the index that represents the final search result.
|
||||
fn result(self) -> Option<(usize, u64)> {
|
||||
// Must only be called after poll returned true.
|
||||
assert!(!self.is_frontier, "result() called unexpectedly");
|
||||
|
||||
let VersionedId {
|
||||
id: smallest_id,
|
||||
version: _,
|
||||
} = self
|
||||
.sorted
|
||||
.iter()
|
||||
// Just using find (== version) would iterate over all the items (> version) unnecessarily
|
||||
.find(|versioned_id| versioned_id.version >= self.version)
|
||||
.filter(|versioned_id| versioned_id.version == self.version)
|
||||
.or(None)?;
|
||||
|
||||
// Return the index of the search that contains the result we want.
|
||||
self.ids
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.find(|(_, id)| id == smallest_id)
|
||||
}
|
||||
|
||||
/// Iterates over the ProofGuide by continuously calling `poll` until it returns true.
|
||||
/// Invokes `step` on each iteration passing it the mutable reference to the guide
|
||||
/// as well as the current id for this step.
|
||||
pub fn consume<StepF, E>(mut self, mut step: StepF) -> Result<Option<(usize, u64)>, E>
|
||||
where
|
||||
StepF: FnMut(&mut Self, u64) -> Result<(), E>,
|
||||
E: From<InvalidState>,
|
||||
{
|
||||
loop {
|
||||
if self.poll()? {
|
||||
return Ok(self.result());
|
||||
}
|
||||
let next_id = self.next_id();
|
||||
step(&mut self, next_id)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use test_case::test_case;
|
||||
|
||||
use super::*;
|
||||
|
||||
fn execute_guide(
|
||||
guide: ProofGuide,
|
||||
start: u64,
|
||||
end: u64,
|
||||
target: u64,
|
||||
) -> (Option<(usize, u64)>, Vec<u64>) {
|
||||
let mut ids = vec![];
|
||||
|
||||
let result = guide.consume(|guide, id| {
|
||||
assert!(
|
||||
(start..end).contains(&id),
|
||||
"Requested id is outside the expected range [start, end)"
|
||||
);
|
||||
|
||||
ids.push(id);
|
||||
if id < target {
|
||||
guide.insert(id, 0);
|
||||
} else {
|
||||
guide.insert(id, 1);
|
||||
}
|
||||
Ok::<(), InvalidState>(())
|
||||
});
|
||||
|
||||
(result.unwrap(), ids)
|
||||
}
|
||||
|
||||
#[test_case(0, 100; "version 0")]
|
||||
#[test_case(1, 399; "version 1")]
|
||||
fn test_version_proof_guide(version: u32, expected_id: u64) {
|
||||
let guide = ProofGuide::for_version(version, 100, 700);
|
||||
let (result, ids) = execute_guide(guide, 100, 700, 399);
|
||||
let (result_i, result_id) = result.unwrap();
|
||||
assert_eq!(ids[result_i], expected_id);
|
||||
assert_eq!(result_id, expected_id);
|
||||
}
|
||||
|
||||
#[test_case(700, 701, 100; "target 701")]
|
||||
#[test_case(700, 399, 399; "target 399")]
|
||||
#[test_case(700, 699, 699; "target 699")]
|
||||
#[test_case(701, 700, 700; "target 700")]
|
||||
fn test_most_recent_proof_guide(end: u64, target: u64, expected_id: u64) {
|
||||
let guide = ProofGuide::for_most_recent(100, end);
|
||||
let (result, ids) = execute_guide(guide, 100, end, target);
|
||||
let (result_i, result_id) = result.unwrap();
|
||||
assert_eq!(ids[result_i], expected_id);
|
||||
assert_eq!(result_id, expected_id);
|
||||
}
|
||||
}
|
169
rust/keytrans/src/implicit.rs
Normal file
169
rust/keytrans/src/implicit.rs
Normal file
@ -0,0 +1,169 @@
|
||||
//
|
||||
// Copyright 2024 Signal Messenger, LLC.
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
//
|
||||
//! Implements the Implicit Binary Search Tree.
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
pub use crate::left_balanced::is_leaf;
|
||||
use crate::left_balanced::{left_step, log2, parent_step, right_step};
|
||||
|
||||
fn move_within(mut x: u64, start: u64, n: u64) -> u64 {
|
||||
while !(start..n).contains(&x) {
|
||||
if x < start {
|
||||
x = right_step(x)
|
||||
} else {
|
||||
x = left_step(x)
|
||||
}
|
||||
}
|
||||
x
|
||||
}
|
||||
|
||||
/// Returns the position of the root node of a search.
|
||||
pub fn root(start: u64, n: u64) -> u64 {
|
||||
move_within((1 << log2(n)) - 1, start, n)
|
||||
}
|
||||
|
||||
/// Returns the left child of an intermediate node.
|
||||
pub fn left(x: u64, start: u64, n: u64) -> u64 {
|
||||
move_within(left_step(x), start, n)
|
||||
}
|
||||
|
||||
/// Returns the right child of an intermediate node.
|
||||
pub fn right(x: u64, start: u64, n: u64) -> u64 {
|
||||
move_within(right_step(x), start, n)
|
||||
}
|
||||
|
||||
fn parent(x: u64, n: u64) -> u64 {
|
||||
let mut p = parent_step(x);
|
||||
while p >= n {
|
||||
p = parent_step(p);
|
||||
}
|
||||
p
|
||||
}
|
||||
|
||||
fn direct_path(mut x: u64, start: u64, n: u64) -> impl Iterator<Item = u64> {
|
||||
let r = root(start, n);
|
||||
std::iter::from_fn(move || {
|
||||
if x == r {
|
||||
None
|
||||
} else {
|
||||
x = parent(x, n);
|
||||
Some(x)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the sequence of parent nodes to be checked as part of monitoring a
|
||||
/// single version of a key.
|
||||
pub fn monitoring_path(x: u64, start: u64, n: u64) -> impl Iterator<Item = u64> {
|
||||
direct_path(x, start, n).filter(move |parent| *parent > x)
|
||||
}
|
||||
|
||||
/// Returns the frontier of the log.
|
||||
pub fn frontier(start: u64, n: u64) -> Vec<u64> {
|
||||
let mut last = root(start, n);
|
||||
let mut frontier = vec![last];
|
||||
while last != n - 1 {
|
||||
last = right(last, start, n);
|
||||
frontier.push(last);
|
||||
}
|
||||
frontier
|
||||
}
|
||||
|
||||
fn monitoring_frontier(frontier: &[u64], entries: HashSet<u64>) -> Vec<u64> {
|
||||
let (index, _value) = frontier
|
||||
.iter()
|
||||
.enumerate()
|
||||
.rev()
|
||||
.find(|(_index, value)| entries.contains(value))
|
||||
.expect("monitoring paths must always terminate at some frontier node");
|
||||
frontier[index + 1..].to_vec()
|
||||
}
|
||||
|
||||
/// Returns the full set of entries that should be checked as part of monitoring
|
||||
/// all versions of a key.
|
||||
pub fn full_monitoring_path(entries: &[u64], start: u64, n: u64) -> Vec<u64> {
|
||||
let mut path = vec![];
|
||||
let mut dedup = HashSet::new();
|
||||
|
||||
for entry in entries {
|
||||
for x in monitoring_path(*entry, start, n) {
|
||||
if dedup.insert(x) {
|
||||
path.push(x);
|
||||
}
|
||||
}
|
||||
dedup.insert(*entry);
|
||||
}
|
||||
path.extend(monitoring_frontier(&frontier(start, n), dedup));
|
||||
path
|
||||
}
|
||||
|
||||
/// Find the first parent node which is to the right of the entry.
|
||||
fn first_parent_to_the_right(entry: &u64) -> u64 {
|
||||
let mut out = *entry;
|
||||
while out <= *entry {
|
||||
out = parent_step(out);
|
||||
}
|
||||
out + 1
|
||||
}
|
||||
|
||||
pub fn next_monitor(entries: &[u64]) -> u64 {
|
||||
entries
|
||||
.iter()
|
||||
.map(first_parent_to_the_right)
|
||||
.min()
|
||||
.expect("entries array should not be empty")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
use proptest::prelude::*;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct StartAndN {
|
||||
start: u64,
|
||||
n: u64,
|
||||
}
|
||||
|
||||
prop_compose! {
|
||||
fn start_and_n()(n in 0..=u64::MAX)(start in 0..n, n in Just(n)) -> StartAndN {
|
||||
StartAndN { start, n }
|
||||
}
|
||||
}
|
||||
|
||||
fn direct_path_eager(mut x: u64, start: u64, n: u64) -> Vec<u64> {
|
||||
let r = root(start, n);
|
||||
if x == r {
|
||||
return vec![];
|
||||
}
|
||||
|
||||
let mut d = vec![];
|
||||
while x != r {
|
||||
x = parent(x, n);
|
||||
d.push(x);
|
||||
}
|
||||
d
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn direct_path_model() {
|
||||
proptest!(|(config in start_and_n())| {
|
||||
let StartAndN { start, n } = config;
|
||||
let eager = direct_path_eager(start, start, n);
|
||||
let lazy: Vec<_> = direct_path(start, start, n).collect();
|
||||
assert_eq!(eager, lazy);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn root_prop() {
|
||||
proptest!(|(config in start_and_n())|{
|
||||
let StartAndN { start, n } = config;
|
||||
let _ = root(start, n);
|
||||
});
|
||||
}
|
||||
}
|
66
rust/keytrans/src/left_balanced.rs
Normal file
66
rust/keytrans/src/left_balanced.rs
Normal file
@ -0,0 +1,66 @@
|
||||
//
|
||||
// Copyright 2024 Signal Messenger, LLC.
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
//
|
||||
|
||||
//! Contains functions that are shared between the two left-balanced search tree
|
||||
//! implementations (the Log Tree and the Implicit Binary Search Tree).
|
||||
//!
|
||||
//! This module works on a flat array representation, where the nodes of the
|
||||
//! tree are numbered from left to right. Leaf nodes are stored in even-numbered
|
||||
//! indices, while intermediate nodes are stored in odd-numbered indices:
|
||||
//!
|
||||
//! ```text
|
||||
//! X
|
||||
//! |
|
||||
//! .---------+---------.
|
||||
//! / \
|
||||
//! X X
|
||||
//! | |
|
||||
//! .---+---. .---+---.
|
||||
//! / \ / \
|
||||
//! X X X X
|
||||
//! / \ / \ / \ /
|
||||
//! / \ / \ / \ /
|
||||
//! X X X X X X X
|
||||
//!
|
||||
//! Index: 0 1 2 3 4 5 6 7 8 9 10 11 12 13
|
||||
//! ```
|
||||
//!
|
||||
//! The bit twiddling functions in this file are all taken from RFC 9420,
|
||||
//! although you will not find more insight on how/why they work there.
|
||||
|
||||
pub fn log2(n: u64) -> u32 {
|
||||
n.checked_ilog2().unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Returns true if x is the position of a leaf node.
|
||||
pub fn is_leaf(x: u64) -> bool {
|
||||
(x & 1) == 0
|
||||
}
|
||||
|
||||
/// Returns the level of a node in the tree. Leaves are level 0, their parents
|
||||
/// are level 1, and so on.
|
||||
pub fn level(x: u64) -> usize {
|
||||
x.trailing_ones() as usize
|
||||
}
|
||||
|
||||
pub fn left_step(x: u64) -> u64 {
|
||||
match level(x) {
|
||||
0 => panic!("leaf node has no children"),
|
||||
k => x ^ (1 << (k - 1)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn right_step(x: u64) -> u64 {
|
||||
match level(x) {
|
||||
0 => panic!("leaf node has no children"),
|
||||
k => x ^ (3 << (k - 1)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parent_step(x: u64) -> u64 {
|
||||
let k = level(x);
|
||||
let b = (x >> (k + 1)) & 1;
|
||||
(x | (1 << k)) ^ (b << (k + 1))
|
||||
}
|
205
rust/keytrans/src/lib.rs
Normal file
205
rust/keytrans/src/lib.rs
Normal file
@ -0,0 +1,205 @@
|
||||
//
|
||||
// Copyright 2024 Signal Messenger, LLC.
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
//
|
||||
mod commitments;
|
||||
mod guide;
|
||||
mod implicit;
|
||||
mod left_balanced;
|
||||
mod log;
|
||||
mod prefix;
|
||||
mod verify;
|
||||
mod vrf;
|
||||
mod wire;
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use ed25519_dalek::VerifyingKey as SigPublicKey;
|
||||
use prost::{DecodeError, Message};
|
||||
|
||||
pub use verify::{
|
||||
truncate_search_response, verify_distinguished, verify_monitor, verify_search, verify_update,
|
||||
};
|
||||
use vrf::PublicKey as VrfPublicKey;
|
||||
pub use wire::{
|
||||
Consistency, FullTreeHead, MonitorKey, MonitorRequest, MonitorResponse, SearchRequest,
|
||||
SearchResponse, TreeHead, UpdateRequest, UpdateResponse, UpdateValue,
|
||||
};
|
||||
|
||||
/// DeploymentMode specifies the way that a transparency log is deployed.
|
||||
#[derive(PartialEq, Clone, Copy)]
|
||||
pub enum DeploymentMode {
|
||||
ContactMonitoring,
|
||||
ThirdPartyManagement(SigPublicKey),
|
||||
ThirdPartyAuditing(SigPublicKey),
|
||||
}
|
||||
|
||||
impl DeploymentMode {
|
||||
fn byte(&self) -> u8 {
|
||||
match self {
|
||||
DeploymentMode::ContactMonitoring => 1,
|
||||
DeploymentMode::ThirdPartyManagement(_) => 2,
|
||||
DeploymentMode::ThirdPartyAuditing(_) => 3,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_associated_key(&self) -> Option<&SigPublicKey> {
|
||||
match self {
|
||||
DeploymentMode::ContactMonitoring => None,
|
||||
DeploymentMode::ThirdPartyManagement(key) => Some(key),
|
||||
DeploymentMode::ThirdPartyAuditing(key) => Some(key),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// PublicConfig wraps the cryptographic keys needed to interact with a
|
||||
/// transparency tree.
|
||||
#[derive(Clone)]
|
||||
pub struct PublicConfig {
|
||||
pub mode: DeploymentMode,
|
||||
pub signature_key: SigPublicKey,
|
||||
pub vrf_key: VrfPublicKey,
|
||||
}
|
||||
|
||||
/// MonitoringData is the structure retained for each key in the KT server being
|
||||
/// monitored.
|
||||
pub struct MonitoringData {
|
||||
/// The VRF output on the search key.
|
||||
pub index: [u8; 32],
|
||||
/// The initial position of the key in the log.
|
||||
pub pos: u64,
|
||||
/// Map from position in log to observed version.
|
||||
pub ptrs: HashMap<u64, u32>,
|
||||
/// Whether this client owns the key.
|
||||
pub owned: bool,
|
||||
}
|
||||
|
||||
impl MonitoringData {
|
||||
/// The smallest tree size where monitoring would be valuable.
|
||||
pub fn next_monitor(&self) -> u64 {
|
||||
implicit::next_monitor(&self.entries())
|
||||
}
|
||||
|
||||
/// The entries field of a MonitorKey structure.
|
||||
pub fn entries(&self) -> Vec<u64> {
|
||||
let mut out: Vec<u64> = self.ptrs.keys().copied().collect();
|
||||
out.sort();
|
||||
out
|
||||
}
|
||||
}
|
||||
|
||||
/// Log store operation failed: {0}
|
||||
#[derive(Debug, displaydoc::Display)]
|
||||
pub struct LogStoreError(String);
|
||||
|
||||
impl From<DecodeError> for LogStoreError {
|
||||
fn from(err: DecodeError) -> Self {
|
||||
Self(err.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
/// LogStore is the trait implemented by clients for storing local monitoring
|
||||
/// data specific to a single log.
|
||||
#[async_trait(?Send)]
|
||||
pub trait LogStore {
|
||||
async fn public_config(&self) -> Result<PublicConfig, LogStoreError>;
|
||||
|
||||
async fn get_last_tree_head(&self) -> Result<Option<(TreeHead, [u8; 32])>, LogStoreError>;
|
||||
async fn set_last_tree_head(
|
||||
&mut self,
|
||||
head: TreeHead,
|
||||
root: [u8; 32],
|
||||
) -> Result<(), LogStoreError>;
|
||||
|
||||
async fn get_data(&self, key: &str) -> Result<Option<MonitoringData>, LogStoreError>;
|
||||
async fn set_data(&mut self, key: &str, data: MonitoringData) -> Result<(), LogStoreError>;
|
||||
}
|
||||
|
||||
/// SimplifiedLogStore is a simpler version of the LogStore trait that clients
|
||||
/// can implement to avoid needing to deal with serialization themselves.
|
||||
#[async_trait(?Send)]
|
||||
pub trait SimplifiedLogStore {
|
||||
async fn public_config(&self) -> Result<PublicConfig, LogStoreError>;
|
||||
|
||||
async fn get_raw_tree_head(&self) -> Result<Option<Vec<u8>>, LogStoreError>;
|
||||
async fn set_raw_tree_head(&mut self, data: &[u8]) -> Result<(), LogStoreError>;
|
||||
|
||||
async fn get_raw_data(&self, key: &str) -> Result<Option<Vec<u8>>, LogStoreError>;
|
||||
async fn set_raw_data(
|
||||
&mut self,
|
||||
key: &str,
|
||||
data: &[u8],
|
||||
next_monitor: u64,
|
||||
) -> Result<(), LogStoreError>;
|
||||
async fn list_keys(&self) -> Result<Vec<String>, LogStoreError>;
|
||||
|
||||
fn as_log_store(&mut self) -> &mut dyn LogStore;
|
||||
}
|
||||
|
||||
#[async_trait(?Send)]
|
||||
impl<T: SimplifiedLogStore + ?Sized> LogStore for T {
|
||||
async fn public_config(&self) -> Result<PublicConfig, LogStoreError> {
|
||||
self.public_config().await
|
||||
}
|
||||
|
||||
async fn get_last_tree_head(&self) -> Result<Option<(TreeHead, [u8; 32])>, LogStoreError> {
|
||||
self.get_raw_tree_head()
|
||||
.await?
|
||||
.map(|data| {
|
||||
let stored = wire::StoredTreeHead::decode(data.as_slice())?;
|
||||
let tree_head = stored
|
||||
.tree_head
|
||||
.ok_or_else(|| LogStoreError("malformed tree head found".to_string()))?;
|
||||
let root = stored
|
||||
.root
|
||||
.try_into()
|
||||
.map_err(|_| LogStoreError("malformed root found".to_string()))?;
|
||||
Ok((tree_head, root))
|
||||
})
|
||||
.transpose()
|
||||
}
|
||||
|
||||
async fn set_last_tree_head(
|
||||
&mut self,
|
||||
head: TreeHead,
|
||||
root: [u8; 32],
|
||||
) -> Result<(), LogStoreError> {
|
||||
let raw = wire::StoredTreeHead {
|
||||
tree_head: Some(head),
|
||||
root: root.to_vec(),
|
||||
}
|
||||
.encode_to_vec();
|
||||
self.set_raw_tree_head(&raw).await
|
||||
}
|
||||
|
||||
async fn get_data(&self, key: &str) -> Result<Option<MonitoringData>, LogStoreError> {
|
||||
self.get_raw_data(key)
|
||||
.await?
|
||||
.map(|data| {
|
||||
let stored = wire::StoredMonitoringData::decode(data.as_slice())?;
|
||||
Ok(MonitoringData {
|
||||
index: stored
|
||||
.index
|
||||
.try_into()
|
||||
.map_err(|_| LogStoreError("malformed index found".to_string()))?,
|
||||
pos: stored.pos,
|
||||
ptrs: stored.ptrs,
|
||||
owned: stored.owned,
|
||||
})
|
||||
})
|
||||
.transpose()
|
||||
}
|
||||
|
||||
async fn set_data(&mut self, key: &str, data: MonitoringData) -> Result<(), LogStoreError> {
|
||||
let next_monitor = data.next_monitor();
|
||||
let raw = wire::StoredMonitoringData {
|
||||
index: data.index.to_vec(),
|
||||
pos: data.pos,
|
||||
ptrs: data.ptrs,
|
||||
owned: data.owned,
|
||||
}
|
||||
.encode_to_vec();
|
||||
self.set_raw_data(key, &raw, next_monitor).await
|
||||
}
|
||||
}
|
499
rust/keytrans/src/log.rs
Normal file
499
rust/keytrans/src/log.rs
Normal file
@ -0,0 +1,499 @@
|
||||
//
|
||||
// Copyright 2024 Signal Messenger, LLC.
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
//
|
||||
//! Implements the Log Tree.
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
type Hash = [u8; 32];
|
||||
|
||||
mod math {
|
||||
// This module implements functions for navigating a Log Tree. Note that the
|
||||
// Log Tree structure is different from the Implicit Binary Search Tree's
|
||||
// structure, in that intermediate nodes always have two children.
|
||||
|
||||
pub use crate::left_balanced::level;
|
||||
use crate::left_balanced::{left_step, log2, parent_step, right_step};
|
||||
|
||||
// Returns the number of nodes needed to store a tree with n leaves.
|
||||
fn node_width(n: u64) -> u64 {
|
||||
match n {
|
||||
0 => 0,
|
||||
n => 2 * (n - 1) + 1,
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the id of the root node of a tree with n leaves.
|
||||
pub fn root(n: u64) -> u64 {
|
||||
(1 << log2(node_width(n))) - 1
|
||||
}
|
||||
|
||||
// Returns the left child of an intermediate node.
|
||||
fn left(x: u64) -> u64 {
|
||||
left_step(x)
|
||||
}
|
||||
|
||||
// Returns the right child of an intermediate node.
|
||||
fn right(x: u64, n: u64) -> u64 {
|
||||
let mut r = right_step(x);
|
||||
let w = node_width(n);
|
||||
while r >= w {
|
||||
r = left(r)
|
||||
}
|
||||
r
|
||||
}
|
||||
|
||||
// Returns the id of the parent node of x in a tree with n leaves.
|
||||
fn parent(x: u64, n: u64) -> u64 {
|
||||
if x == root(n) {
|
||||
panic!("root node has no parent");
|
||||
}
|
||||
|
||||
let width = node_width(n);
|
||||
let mut p = parent_step(x);
|
||||
while p >= width {
|
||||
p = parent_step(p);
|
||||
}
|
||||
p
|
||||
}
|
||||
|
||||
// Returns the other child of the node's parent.
|
||||
fn sibling(x: u64, n: u64) -> u64 {
|
||||
let p = parent(x, n);
|
||||
if x < p {
|
||||
right(p, n)
|
||||
} else {
|
||||
left(p)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if node x represents a full subtree.
|
||||
fn is_full_subtree(x: u64, n: u64) -> bool {
|
||||
let rightmost = 2 * (n - 1);
|
||||
let expected = x + (1 << level(x)) - 1;
|
||||
|
||||
expected <= rightmost
|
||||
}
|
||||
|
||||
// Returns the list of full subtrees that x consists of.
|
||||
pub fn full_subtrees(mut x: u64, n: u64) -> Vec<u64> {
|
||||
let mut out = vec![];
|
||||
|
||||
while !is_full_subtree(x, n) {
|
||||
out.push(left(x));
|
||||
x = right(x, n);
|
||||
}
|
||||
out.push(x);
|
||||
|
||||
out
|
||||
}
|
||||
|
||||
// Returns the list of node ids to return for a consistency proof between m
|
||||
// and n, based on the algorithm from RFC 6962.
|
||||
pub fn consistency_proof(m: u64, n: u64) -> Vec<u64> {
|
||||
sub_proof(m, n, true)
|
||||
}
|
||||
|
||||
fn sub_proof(m: u64, n: u64, b: bool) -> Vec<u64> {
|
||||
if m == n {
|
||||
return match b {
|
||||
true => vec![],
|
||||
false => vec![root(m)],
|
||||
};
|
||||
}
|
||||
let mut k = 1u64 << log2(n);
|
||||
if k == n {
|
||||
k /= 2;
|
||||
}
|
||||
if m <= k {
|
||||
let mut proof = sub_proof(m, k, b);
|
||||
proof.push(right(root(n), n));
|
||||
proof
|
||||
} else {
|
||||
let mut proof: Vec<u64> = sub_proof(m - k, n - k, false)
|
||||
.iter()
|
||||
.map(|x| x + 2 * k)
|
||||
.collect();
|
||||
proof.insert(0, left(root(n)));
|
||||
proof
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the copath nodes of a batch of leaves.
|
||||
pub fn batch_copath(leaves: &[u64], n: u64) -> Vec<u64> {
|
||||
// Convert the leaf indices to node indices.
|
||||
let mut nodes: Vec<u64> = leaves.iter().map(|x| 2 * x).collect();
|
||||
nodes.sort();
|
||||
|
||||
// Iteratively combine nodes until there's only one entry in the list
|
||||
// (being the root), keeping track of the extra nodes we needed to get
|
||||
// there.
|
||||
let mut out = vec![];
|
||||
let root = root(n);
|
||||
while !(nodes.len() == 1 && nodes[0] == root) {
|
||||
let mut next_level = vec![];
|
||||
|
||||
while nodes.len() > 1 {
|
||||
let p = parent(nodes[0], n);
|
||||
if right(p, n) == nodes[1] {
|
||||
// Sibling is already here.
|
||||
nodes.drain(..2);
|
||||
} else {
|
||||
// Need to fetch sibling.
|
||||
out.push(sibling(nodes[0], n));
|
||||
nodes.drain(..1);
|
||||
}
|
||||
next_level.push(p);
|
||||
}
|
||||
if nodes.len() == 1 {
|
||||
if !next_level.is_empty() && level(parent(nodes[0], n)) > level(next_level[0]) {
|
||||
next_level.push(nodes[0]);
|
||||
} else {
|
||||
out.push(sibling(nodes[0], n));
|
||||
next_level.push(parent(nodes[0], n));
|
||||
}
|
||||
}
|
||||
|
||||
nodes = next_level;
|
||||
}
|
||||
out.sort();
|
||||
|
||||
out
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_math() {
|
||||
assert_eq!(log2(0), 0);
|
||||
assert_eq!(log2(8), 3);
|
||||
assert_eq!(log2(10000), 13);
|
||||
|
||||
assert_eq!(level(1), 1);
|
||||
assert_eq!(level(2), 0);
|
||||
assert_eq!(level(3), 2);
|
||||
|
||||
assert_eq!(root(5), 7);
|
||||
assert_eq!(left(7), 3);
|
||||
assert_eq!(right(7, 8), 11);
|
||||
assert_eq!(parent(1, 4), 3);
|
||||
assert_eq!(parent(5, 4), 3);
|
||||
assert_eq!(sibling(13, 8), 9);
|
||||
assert_eq!(sibling(9, 8), 13);
|
||||
|
||||
assert_eq!(full_subtrees(7, 6), vec![3, 9]);
|
||||
|
||||
assert_eq!(batch_copath(&[0, 2, 3, 4], 8), vec![2, 10, 13]);
|
||||
assert_eq!(batch_copath(&[0, 2, 3], 8), vec![2, 11]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, displaydoc::Display)]
|
||||
pub enum Error {
|
||||
/// Empty chain
|
||||
EmptyChain,
|
||||
/// Malformed chain
|
||||
MalformedChain,
|
||||
/// Invalid input: {0}
|
||||
InvalidInput(&'static str),
|
||||
/// Malformed proof
|
||||
MalformedProof,
|
||||
/// Proof mismatch: {0}
|
||||
ProofMismatch(&'static str),
|
||||
/// Unexpected error: {0}
|
||||
Unexpected(&'static str),
|
||||
}
|
||||
|
||||
type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
// The primary wrapper struct for representing a single node in the tree.
|
||||
#[derive(Clone)]
|
||||
struct NodeData {
|
||||
leaf: bool,
|
||||
value: Hash,
|
||||
}
|
||||
|
||||
impl NodeData {
|
||||
fn marshal(&self) -> [u8; 33] {
|
||||
let mut out = [0u8; 33];
|
||||
if !self.leaf {
|
||||
out[0] = 1;
|
||||
}
|
||||
out[1..33].copy_from_slice(&self.value);
|
||||
|
||||
out
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the intermediate hash of left and right.
|
||||
fn tree_hash(left: &NodeData, right: &NodeData) -> NodeData {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(left.marshal());
|
||||
hasher.update(right.marshal());
|
||||
|
||||
NodeData {
|
||||
leaf: false,
|
||||
value: hasher.finalize().into(),
|
||||
}
|
||||
}
|
||||
|
||||
struct SimpleRootCalculator {
|
||||
chain: Vec<Option<NodeData>>,
|
||||
}
|
||||
|
||||
impl SimpleRootCalculator {
|
||||
fn new() -> Self {
|
||||
Self { chain: vec![] }
|
||||
}
|
||||
|
||||
fn insert(&mut self, level: usize, value: Hash) {
|
||||
while self.chain.len() < level + 1 {
|
||||
self.chain.push(None);
|
||||
}
|
||||
|
||||
let mut acc = NodeData {
|
||||
leaf: level == 0,
|
||||
value,
|
||||
};
|
||||
let mut i = level;
|
||||
while i < self.chain.len() {
|
||||
match self.chain[i].as_ref() {
|
||||
Some(nd) => {
|
||||
acc = tree_hash(nd, &acc);
|
||||
self.chain[i] = None;
|
||||
i += 1;
|
||||
}
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
if i == self.chain.len() {
|
||||
self.chain.push(Some(acc));
|
||||
} else {
|
||||
self.chain[i] = Some(acc);
|
||||
}
|
||||
}
|
||||
|
||||
fn root(&self) -> Result<Hash> {
|
||||
if self.chain.is_empty() {
|
||||
return Err(Error::EmptyChain);
|
||||
}
|
||||
|
||||
// Find first non-null element of chain.
|
||||
let res = self.chain.iter().enumerate().find(|(_, nd)| nd.is_some());
|
||||
let (root_pos, root) = match res {
|
||||
Some((i, Some(nd))) => (i, (*nd).clone()),
|
||||
_ => return Err(Error::MalformedChain),
|
||||
};
|
||||
|
||||
// Fold the hashes above what we just found into one.
|
||||
Ok(self.chain[root_pos + 1..]
|
||||
.iter()
|
||||
.fold(root, |acc, nd| match nd {
|
||||
Some(nd) => tree_hash(nd, &acc),
|
||||
None => acc,
|
||||
})
|
||||
.value)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the root that would result in the given proof being valid for the
|
||||
// given values.
|
||||
pub fn evaluate_batch_proof(x: &[u64], n: u64, values: &[Hash], proof: &[Hash]) -> Result<Hash> {
|
||||
if x.len() != values.len() {
|
||||
return Err(Error::InvalidInput(
|
||||
"expected same number of indices and values",
|
||||
));
|
||||
}
|
||||
let sorted = x.windows(2).all(|w| w[0] < w[1]);
|
||||
if !sorted {
|
||||
return Err(Error::InvalidInput("input entries must be in sorted order"));
|
||||
}
|
||||
if x.is_empty() {
|
||||
return Err(Error::InvalidInput(
|
||||
"can not evaluate empty batch inclusion proof",
|
||||
));
|
||||
}
|
||||
if x[x.len() - 1] >= n {
|
||||
return Err(Error::InvalidInput(
|
||||
"leaf ids can not be larger than tree size",
|
||||
));
|
||||
}
|
||||
|
||||
let copath = math::batch_copath(x, n);
|
||||
if proof.len() != copath.len() {
|
||||
return Err(Error::MalformedProof);
|
||||
}
|
||||
|
||||
let mut calc = SimpleRootCalculator::new();
|
||||
let (mut i, mut j) = (0, 0);
|
||||
while i < x.len() && j < copath.len() {
|
||||
if 2 * x[i] < copath[j] {
|
||||
calc.insert(0, values[i]);
|
||||
i += 1;
|
||||
} else {
|
||||
calc.insert(math::level(copath[j]), proof[j]);
|
||||
j += 1;
|
||||
}
|
||||
}
|
||||
while i < x.len() {
|
||||
calc.insert(0, values[i]);
|
||||
i += 1;
|
||||
}
|
||||
while j < copath.len() {
|
||||
calc.insert(math::level(copath[j]), proof[j]);
|
||||
j += 1;
|
||||
}
|
||||
|
||||
calc.root()
|
||||
}
|
||||
|
||||
// Returns the root of the tree immediately after the leaf `x[early_stop]` has
|
||||
// been sequenced.
|
||||
pub fn truncate_batch_proof(
|
||||
early_stop: usize,
|
||||
x: &[u64],
|
||||
values: &[Hash],
|
||||
proof: &[Hash],
|
||||
) -> Result<Hash> {
|
||||
if early_stop >= x.len() {
|
||||
return Err(Error::InvalidInput("early_stop is out of bounds"));
|
||||
}
|
||||
let x = &x[..early_stop + 1];
|
||||
let stop_id = x[early_stop];
|
||||
let copath = math::batch_copath(x, stop_id + 1);
|
||||
|
||||
evaluate_batch_proof(
|
||||
x,
|
||||
stop_id + 1,
|
||||
&values[..early_stop + 1],
|
||||
&proof[..copath.len()],
|
||||
)
|
||||
}
|
||||
|
||||
// Checks that `proof` is a valid consistency proof between `m_root` and
|
||||
// `n_root` where `m` < `n`.
|
||||
pub fn verify_consistency_proof(
|
||||
m: u64,
|
||||
n: u64,
|
||||
proof: &[Hash],
|
||||
m_root: Hash,
|
||||
n_root: Hash,
|
||||
) -> Result<()> {
|
||||
if m == 0 || m >= n {
|
||||
return Err(Error::InvalidInput("m must be within [0, n)"));
|
||||
}
|
||||
let ids = math::consistency_proof(m, n);
|
||||
if proof.len() != ids.len() {
|
||||
return Err(Error::MalformedProof);
|
||||
}
|
||||
|
||||
// Step 1: Verify that the consistency proof aligns with m_root.
|
||||
let mut calc = SimpleRootCalculator::new();
|
||||
let path = math::full_subtrees(math::root(m), m);
|
||||
if path.len() == 1 {
|
||||
// m is a power of two so we don't need to verify anything.
|
||||
calc.insert(math::level(math::root(m)), m_root);
|
||||
} else {
|
||||
for (i, &elem) in path.iter().enumerate() {
|
||||
if ids[i] != elem {
|
||||
// TODO: PathMismatch maybe?
|
||||
return Err(Error::Unexpected("id does not match path"));
|
||||
}
|
||||
calc.insert(math::level(elem), proof[i]);
|
||||
}
|
||||
match calc.root() {
|
||||
Ok(root) => {
|
||||
if m_root != root {
|
||||
return Err(Error::ProofMismatch("first root does not match proof"));
|
||||
}
|
||||
}
|
||||
Err(err) => return Err(err),
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: Verify that the consistency proof aligns with n_root.
|
||||
let i = match path.len() {
|
||||
1 => 0,
|
||||
i => i,
|
||||
};
|
||||
for j in i..ids.len() {
|
||||
calc.insert(math::level(ids[j]), proof[j]);
|
||||
}
|
||||
match calc.root() {
|
||||
Ok(root) if n_root == root => Ok(()),
|
||||
Ok(_root) => Err(Error::ProofMismatch("second root does not match proof")),
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use hex_literal::hex;
|
||||
|
||||
#[test]
|
||||
fn test_evaluate_batch_proof() {
|
||||
let mut values = [[0u8; 32]; 6];
|
||||
let mut proof = [[0u8; 32]; 7];
|
||||
for (i, value) in values.iter_mut().enumerate() {
|
||||
value[0] = i as u8;
|
||||
}
|
||||
for (i, elem) in proof.iter_mut().enumerate() {
|
||||
elem[0] = (6 + i) as u8;
|
||||
}
|
||||
|
||||
let got = evaluate_batch_proof(&[0, 1, 2, 4, 8, 16], 18, &values, &proof).unwrap();
|
||||
let want = hex!("435b929d1b8da2cb7f35119903c1f72d3f048e30b0dd0081a97b41f8da37f58f");
|
||||
assert!(got == want);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_truncate_batch_proof() {
|
||||
let values = &[
|
||||
hex!("92c3f73e218d073192c84247c56c12cadd8adc70624c5e879ef213afee0a927a"),
|
||||
hex!("42b59b311613ff156ce56686f690ea17794bbe155947e1893263957639e776b7"),
|
||||
hex!("c9fdbec01c9fe76f9b97c7afcc9b93829cb62b4f0fd5018c687ff6e537198d31"),
|
||||
];
|
||||
let proof = &[
|
||||
hex!("a0b219fe94b49121df5b8210ff4f14b5bbddaf49f689be971cbcfe82d47cc590"),
|
||||
hex!("5bebed9662a891f5ad369fad2a58efdedc37eef70a1979244cb3b3dd2c13782e"),
|
||||
hex!("f8d36bfd0ce37743de10910a32f1eaa1cf3d7b037342b9834b4c9e847b416618"),
|
||||
hex!("190fefcbd2f2617305b74097c449d131fe8c0b62365a1de6d0708ddb6bbf0f7d"),
|
||||
hex!("b941e7a040c42477e2f547003760821428195876b9185a95f70484868e92b900"),
|
||||
hex!("ecf8b73011345554f10c6aea96ea07c685ae2fb37e337075c30a74949ee28e14"),
|
||||
hex!("d306f87c5a08d671d1d27a0050aeb50c34bbd09bee1e04e8843143205de96bb1"),
|
||||
hex!("88d133186fc10d8bf2d11aef0fad6a984c348af392729218916e91366749c1ff"),
|
||||
];
|
||||
|
||||
let got = truncate_batch_proof(1, &[5, 10, 15], values, proof).unwrap();
|
||||
let want = hex!("1eb26fa1fac53af285479ba4536ef762648fb4c740429f2810065130b92fb00f");
|
||||
assert!(got == want);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_consistency_proof() {
|
||||
let m_root = hex!("47cffc2f3d88213d58d25ec12a2284cc94dd7736a5a2f99b5e49543f6d324409");
|
||||
let n_root = hex!("7b830576af52cb15e47f51bf0859c7918858881a2ae1945889e15e89f0b6b654");
|
||||
|
||||
let proof = &[
|
||||
hex!("817b7723f0c429cc053f1690cdd9ef6357cf544c90b2b898f2b17647379a55f0"),
|
||||
hex!("a3d4fac233766d3f546ce7d21683bcfd442db3da1fd8f672b04223cd7e26e1d4"),
|
||||
hex!("02632c875f214195b9c116a13a105b7a5a891d3bf19d77e1c807380d918623d5"),
|
||||
hex!("cb0c07deb12feceeca301453fdc65fb15a1bada91dc69f5b045a3fb647a216ba"),
|
||||
hex!("f23e0ed32b4481c6619e6175105f6a555f55a7b6d98d4f297f4a292bfeedebb1"),
|
||||
hex!("7343774893f3b7b4dac9d1a5cb4e88d5c57b71dba95aa377f88da043af030df2"),
|
||||
hex!("bc593d72ffdfda9b8cbbc758e10a8bd07e8aed332f8c9168cbf834e8d1d80012"),
|
||||
hex!("32ef158c2def8c641f5c5392b6d248508b7d0fc1ea5ccda1deaf866d38e93ca4"),
|
||||
hex!("74e16d5b930d68f3228396f35717df5a2f6b58382c8d82a14d1c1f5190900f14"),
|
||||
hex!("db84e6de2d857cf6753b7321f5c3e6c7e66aaf8ec2c7b94b7959c462a4fc8162"),
|
||||
hex!("6263b4af228c862edcc8b63ca33d4e67d1d278000e1f7c3eb8cd56039b9613b3"),
|
||||
];
|
||||
|
||||
assert!(verify_consistency_proof(1078, 2000, proof, m_root, n_root).is_ok());
|
||||
assert!(verify_consistency_proof(1078, 2000, proof, m_root, m_root).is_err());
|
||||
assert!(verify_consistency_proof(1078, 2000, proof, n_root, n_root).is_err());
|
||||
}
|
||||
}
|
66
rust/keytrans/src/prefix.rs
Normal file
66
rust/keytrans/src/prefix.rs
Normal file
@ -0,0 +1,66 @@
|
||||
//
|
||||
// Copyright 2024 Signal Messenger, LLC.
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
//
|
||||
//! Implements the Prefix Tree.
|
||||
use std::result::Result;
|
||||
|
||||
use crate::wire::PrefixProof as SearchResult;
|
||||
use sha2::{Digest as _, Sha256};
|
||||
|
||||
const KEY_LENGTH: usize = 32;
|
||||
|
||||
/// Malformed proof
|
||||
#[derive(Debug, displaydoc::Display)]
|
||||
pub struct MalformedProof;
|
||||
|
||||
fn leaf_hash(key: &[u8; 32], ctr: u32, pos: u64) -> [u8; 32] {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update([0x00]);
|
||||
hasher.update(key);
|
||||
hasher.update(ctr.to_be_bytes());
|
||||
hasher.update(pos.to_be_bytes());
|
||||
|
||||
hasher.finalize().into()
|
||||
}
|
||||
|
||||
fn parent_hash(left: &[u8; 32], right: &[u8; 32]) -> [u8; 32] {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update([0x01]);
|
||||
hasher.update(left);
|
||||
hasher.update(right);
|
||||
|
||||
hasher.finalize().into()
|
||||
}
|
||||
|
||||
fn evaluate_proof(
|
||||
key: &[u8; 32],
|
||||
value: &[u8; 32],
|
||||
proof: &[Vec<u8>],
|
||||
) -> Result<[u8; 32], MalformedProof> {
|
||||
if proof.len() != 8 * KEY_LENGTH {
|
||||
return Err(MalformedProof);
|
||||
}
|
||||
|
||||
let mut value = *value;
|
||||
for i in 0..proof.len() {
|
||||
let sibling: &[u8; 32] = proof[i].as_slice().try_into().map_err(|_| MalformedProof)?;
|
||||
|
||||
let n = proof.len() - i - 1;
|
||||
let b = key[n / 8] >> (7 - (n % 8)) & 1; // Read n^th bit of key
|
||||
|
||||
value = if b == 0 {
|
||||
parent_hash(&value, sibling)
|
||||
} else {
|
||||
parent_hash(sibling, &value)
|
||||
}
|
||||
}
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
// Takes a search result `res` as input, which was returned by searching for
|
||||
// `key`, and returns the root that would make the proof valid. `pos` is the
|
||||
// position of the first instance of `key` in the log.
|
||||
pub fn evaluate(key: &[u8; 32], pos: u64, res: &SearchResult) -> Result<[u8; 32], MalformedProof> {
|
||||
evaluate_proof(key, &leaf_hash(key, res.counter, pos), &res.proof)
|
||||
}
|
943
rust/keytrans/src/verify.rs
Normal file
943
rust/keytrans/src/verify.rs
Normal file
@ -0,0 +1,943 @@
|
||||
//
|
||||
// Copyright 2024 Signal Messenger, LLC.
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
//
|
||||
use std::collections::HashMap;
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use ed25519_dalek::{Signature, Verifier, VerifyingKey};
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
use crate::commitments::verify as verify_commitment;
|
||||
use crate::guide::{InvalidState, ProofGuide};
|
||||
use crate::implicit::{full_monitoring_path, monitoring_path};
|
||||
use crate::log::{evaluate_batch_proof, truncate_batch_proof, verify_consistency_proof};
|
||||
use crate::prefix::{evaluate as evaluate_prefix, MalformedProof};
|
||||
use crate::wire::*;
|
||||
use crate::{
|
||||
guide, log, vrf, DeploymentMode, LogStore, LogStoreError, MonitoringData, PublicConfig,
|
||||
};
|
||||
|
||||
/// The range of allowed timestamp values relative to "now".
|
||||
/// The timestamps will have to be in [now - max_behind .. now + max_ahead]
|
||||
const ALLOWED_TIMESTAMP_RANGE: &TimestampRange = &TimestampRange {
|
||||
max_behind: Duration::from_secs(24 * 60 * 60),
|
||||
max_ahead: Duration::from_secs(10),
|
||||
};
|
||||
|
||||
/// The range of allowed timestamp values relative to "now" used for auditor.
|
||||
const ALLOWED_AUDITOR_TIMESTAMP_RANGE: &TimestampRange = &TimestampRange {
|
||||
max_behind: Duration::from_secs(7 * 24 * 60 * 60),
|
||||
max_ahead: Duration::from_secs(10),
|
||||
};
|
||||
const ENTRIES_MAX_BEHIND: u64 = 10_000_000;
|
||||
|
||||
#[derive(Debug, displaydoc::Display)]
|
||||
pub enum Error {
|
||||
/// Required field not found
|
||||
RequiredFieldMissing,
|
||||
/// Proof element is wrong size
|
||||
InvalidProofElement,
|
||||
/// Value is too long to be encoded
|
||||
ValueTooLong,
|
||||
/// Verification failed: {0}
|
||||
VerificationFailed(String),
|
||||
/// Storage operation failed: {0}
|
||||
StorageFailure(String),
|
||||
}
|
||||
|
||||
impl From<log::Error> for Error {
|
||||
fn from(err: log::Error) -> Self {
|
||||
Self::VerificationFailed(err.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<vrf::Error> for Error {
|
||||
fn from(err: vrf::Error) -> Self {
|
||||
Self::VerificationFailed(err.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<guide::InvalidState> for Error {
|
||||
fn from(err: InvalidState) -> Self {
|
||||
Self::VerificationFailed(err.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<MalformedProof> for Error {
|
||||
fn from(err: MalformedProof) -> Self {
|
||||
Self::VerificationFailed(err.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<LogStoreError> for Error {
|
||||
fn from(err: LogStoreError) -> Self {
|
||||
Self::StorageFailure(err.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
fn get_proto_field<T>(field: &Option<T>) -> Result<&T> {
|
||||
field.as_ref().ok_or(Error::RequiredFieldMissing)
|
||||
}
|
||||
|
||||
fn get_hash_proof(proof: &[Vec<u8>]) -> Result<Vec<[u8; 32]>> {
|
||||
proof
|
||||
.iter()
|
||||
.map(|elem| <&[u8] as TryInto<[u8; 32]>>::try_into(elem))
|
||||
.collect::<std::result::Result<_, _>>()
|
||||
.map_err(|_| Error::InvalidProofElement)
|
||||
}
|
||||
|
||||
fn serialize_key(buffer: &mut Vec<u8>, key_material: &[u8], key_kind: &str) {
|
||||
let key_len = u16::try_from(key_material.len())
|
||||
.unwrap_or_else(|_| panic!("{} {}", key_kind, "key is too long to be encoded"));
|
||||
buffer.extend_from_slice(&key_len.to_be_bytes());
|
||||
buffer.extend_from_slice(key_material);
|
||||
}
|
||||
|
||||
fn marshal_tree_head_tbs(
|
||||
tree_size: u64,
|
||||
timestamp: i64,
|
||||
root: &[u8; 32],
|
||||
config: &PublicConfig,
|
||||
) -> Result<Vec<u8>> {
|
||||
let mut buf = vec![];
|
||||
|
||||
buf.extend_from_slice(&[0, 0]); // Ciphersuite
|
||||
buf.push(config.mode.byte()); // Deployment mode
|
||||
|
||||
serialize_key(&mut buf, config.signature_key.as_bytes(), "signature");
|
||||
serialize_key(&mut buf, config.vrf_key.as_bytes(), "VRF");
|
||||
|
||||
if let Some(key) = config.mode.get_associated_key() {
|
||||
serialize_key(&mut buf, key.as_bytes(), "third party signature")
|
||||
}
|
||||
|
||||
buf.extend_from_slice(&tree_size.to_be_bytes()); // Tree size
|
||||
buf.extend_from_slice(×tamp.to_be_bytes()); // Timestamp
|
||||
buf.extend_from_slice(root); // Root hash
|
||||
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
fn marshal_update_value(value: &[u8]) -> Result<Vec<u8>> {
|
||||
let mut buf = vec![];
|
||||
|
||||
let length = u32::try_from(value.len()).map_err(|_| Error::ValueTooLong)?;
|
||||
buf.extend_from_slice(&length.to_be_bytes());
|
||||
buf.extend_from_slice(value);
|
||||
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
/// Returns the hash of the leaf of the transparency tree.
|
||||
fn leaf_hash(prefix_root: &[u8; 32], commitment: &[u8; 32]) -> [u8; 32] {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(prefix_root);
|
||||
hasher.update(commitment);
|
||||
|
||||
hasher.finalize().into()
|
||||
}
|
||||
|
||||
/// Checks the signature on the provided transparency tree head using the given key
|
||||
fn verify_tree_head_signature(
|
||||
config: &PublicConfig,
|
||||
head: &TreeHead,
|
||||
root: &[u8; 32],
|
||||
verifying_key: &VerifyingKey,
|
||||
) -> Result<()> {
|
||||
let raw = marshal_tree_head_tbs(head.tree_size, head.timestamp, root, config)?;
|
||||
let sig = Signature::from_slice(&head.signature).map_err(|_| {
|
||||
Error::VerificationFailed("failed to verify tree head signature".to_string())
|
||||
})?;
|
||||
verifying_key
|
||||
.verify(&raw, &sig)
|
||||
.map_err(|_| Error::VerificationFailed("failed to verify tree head signature".to_string()))
|
||||
}
|
||||
|
||||
/// Checks that a FullTreeHead structure is valid. It stores the tree head for
|
||||
/// later requests if it succeeds.
|
||||
async fn verify_full_tree_head(
|
||||
storage: &mut dyn LogStore,
|
||||
fth: &FullTreeHead,
|
||||
root: [u8; 32],
|
||||
) -> Result<()> {
|
||||
let tree_head = get_proto_field(&fth.tree_head)?.clone();
|
||||
|
||||
// 1. Verify the proof in FullTreeHead.consistency, if one is expected.
|
||||
// 3. Verify that the timestamp and tree_size fields of the TreeHead are
|
||||
// greater than or equal to what they were before.
|
||||
match storage.get_last_tree_head().await? {
|
||||
None => {
|
||||
if !fth.consistency.is_empty() {
|
||||
return Err(Error::VerificationFailed(
|
||||
"consistency proof provided when not expected".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
Some((last, last_root)) if last.tree_size == tree_head.tree_size => {
|
||||
if root != last_root {
|
||||
return Err(Error::VerificationFailed(
|
||||
"root is different but tree size is same".to_string(),
|
||||
));
|
||||
}
|
||||
if tree_head.timestamp < last.timestamp {
|
||||
return Err(Error::VerificationFailed(
|
||||
"current timestamp is less than previous timestamp".to_string(),
|
||||
));
|
||||
}
|
||||
if !fth.consistency.is_empty() {
|
||||
return Err(Error::VerificationFailed(
|
||||
"consistency proof provided when not expected".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
Some((last, last_root)) => {
|
||||
if tree_head.tree_size < last.tree_size {
|
||||
return Err(Error::VerificationFailed(
|
||||
"current tree size is less than previous tree size".to_string(),
|
||||
));
|
||||
}
|
||||
if tree_head.timestamp < last.timestamp {
|
||||
return Err(Error::VerificationFailed(
|
||||
"current timestamp is less than previous timestamp".to_string(),
|
||||
));
|
||||
}
|
||||
let proof = get_hash_proof(&fth.consistency)?;
|
||||
verify_consistency_proof(last.tree_size, tree_head.tree_size, &proof, last_root, root)?
|
||||
}
|
||||
};
|
||||
|
||||
// 2. Verify the signature in TreeHead.signature.
|
||||
let public_config = storage.public_config().await?;
|
||||
verify_tree_head_signature(
|
||||
&public_config,
|
||||
&tree_head,
|
||||
&root,
|
||||
&public_config.signature_key,
|
||||
)?;
|
||||
|
||||
// 3. Verify that the timestamp in TreeHead is sufficiently recent.
|
||||
verify_timestamp(tree_head.timestamp, ALLOWED_TIMESTAMP_RANGE, None)?;
|
||||
|
||||
// 4. If third-party auditing is used, verify auditor_tree_head with the
|
||||
// steps described in Section 11.2.
|
||||
if let DeploymentMode::ThirdPartyAuditing(auditor_key) = public_config.mode {
|
||||
let auditor_tree_head = get_proto_field(&fth.auditor_tree_head)?;
|
||||
let auditor_head = get_proto_field(&auditor_tree_head.tree_head)?;
|
||||
|
||||
// 2. Verify that TreeHead.timestamp is sufficiently recent.
|
||||
verify_timestamp(
|
||||
auditor_head.timestamp,
|
||||
ALLOWED_AUDITOR_TIMESTAMP_RANGE,
|
||||
Some("auditor"),
|
||||
)?;
|
||||
|
||||
// 3. Verify that TreeHead.tree_size is sufficiently close to the most
|
||||
// recent tree head from the service operator.
|
||||
if auditor_head.tree_size > tree_head.tree_size {
|
||||
return Err(Error::VerificationFailed(
|
||||
"auditor tree head may not be further along than service tree head".to_string(),
|
||||
));
|
||||
}
|
||||
if tree_head.tree_size - auditor_head.tree_size > ENTRIES_MAX_BEHIND {
|
||||
return Err(Error::VerificationFailed(
|
||||
"auditor tree head is too far behind service tree head".to_string(),
|
||||
));
|
||||
}
|
||||
// 4. Verify the consistency proof between this tree head and the most
|
||||
// recent tree head from the service operator.
|
||||
// 1. Verify the signature in TreeHead.signature.
|
||||
if tree_head.tree_size > auditor_head.tree_size {
|
||||
let auditor_root: &[u8; 32] = get_proto_field(&auditor_tree_head.root_value)?
|
||||
.as_slice()
|
||||
.try_into()
|
||||
.map_err(|_| {
|
||||
Error::VerificationFailed("auditor tree head is malformed".to_string())
|
||||
})?;
|
||||
let proof = get_hash_proof(&auditor_tree_head.consistency)?;
|
||||
verify_consistency_proof(
|
||||
auditor_head.tree_size,
|
||||
tree_head.tree_size,
|
||||
&proof,
|
||||
*auditor_root,
|
||||
root,
|
||||
)?;
|
||||
verify_tree_head_signature(&public_config, auditor_head, auditor_root, &auditor_key)?;
|
||||
} else {
|
||||
if !auditor_tree_head.consistency.is_empty() {
|
||||
return Err(Error::VerificationFailed(
|
||||
"consistency proof provided when not expected".to_string(),
|
||||
));
|
||||
}
|
||||
if auditor_tree_head.root_value.is_some() {
|
||||
return Err(Error::VerificationFailed(
|
||||
"explicit root value provided when not expected".to_string(),
|
||||
));
|
||||
}
|
||||
verify_tree_head_signature(&public_config, auditor_head, &root, &auditor_key)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(storage.set_last_tree_head(tree_head, root).await?)
|
||||
}
|
||||
|
||||
/// The range of allowed timestamp values relative to "now".
|
||||
/// The timestamps will have to be in [now - max_behind .. now + max_ahead]
|
||||
struct TimestampRange {
|
||||
max_behind: Duration,
|
||||
max_ahead: Duration,
|
||||
}
|
||||
|
||||
fn verify_timestamp(
|
||||
timestamp: i64,
|
||||
allowed_range: &TimestampRange,
|
||||
description: Option<&str>,
|
||||
) -> Result<()> {
|
||||
let TimestampRange {
|
||||
max_behind,
|
||||
max_ahead,
|
||||
} = allowed_range;
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_millis() as i128;
|
||||
let delta = now - timestamp as i128;
|
||||
let format_message = |s: &str| match description {
|
||||
None => s.to_string(),
|
||||
Some(desc) => format!("{} {}", desc, s),
|
||||
};
|
||||
if delta > max_behind.as_millis() as i128 {
|
||||
let message = format_message("timestamp is too far behind current time");
|
||||
return Err(Error::VerificationFailed(message));
|
||||
}
|
||||
if (-delta) > max_ahead.as_millis() as i128 {
|
||||
let message = format_message("timestamp is too far ahead of current time");
|
||||
return Err(Error::VerificationFailed(message));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks that the provided FullTreeHead has a valid consistency proof relative
|
||||
/// to the provided distinguished head.
|
||||
pub async fn verify_distinguished(
|
||||
storage: &mut dyn LogStore,
|
||||
fth: &FullTreeHead,
|
||||
distinguished_size: u64,
|
||||
distinguished_root: [u8; 32],
|
||||
) -> Result<()> {
|
||||
let tree_size = get_proto_field(&fth.tree_head)?.tree_size;
|
||||
|
||||
let root = match storage.get_last_tree_head().await? {
|
||||
Some((tree_head, root)) if tree_head.tree_size == tree_size => root,
|
||||
_ => {
|
||||
return Err(Error::VerificationFailed(
|
||||
"expected tree head not found in storage".to_string(),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
// Handle special case when tree_size == distinguished_size.
|
||||
if tree_size == distinguished_size {
|
||||
let result = if root == distinguished_root {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::VerificationFailed(
|
||||
"root hash does not match expected value".to_string(),
|
||||
))
|
||||
};
|
||||
return result;
|
||||
}
|
||||
|
||||
Ok(verify_consistency_proof(
|
||||
distinguished_size,
|
||||
tree_size,
|
||||
&get_hash_proof(&fth.distinguished)?,
|
||||
distinguished_root,
|
||||
root,
|
||||
)?)
|
||||
}
|
||||
|
||||
fn evaluate_vrf_proof(proof: &[u8], vrf_key: vrf::PublicKey, search_key: &str) -> Result<[u8; 32]> {
|
||||
let proof = proof.try_into().map_err(|_| MalformedProof)?;
|
||||
Ok(vrf_key.proof_to_hash(search_key.as_bytes(), proof)?)
|
||||
}
|
||||
|
||||
/// The shared implementation of verify_search and verify_update.
|
||||
async fn verify_search_internal(
|
||||
storage: &mut dyn LogStore,
|
||||
req: &SearchRequest,
|
||||
res: &SearchResponse,
|
||||
monitor: bool,
|
||||
) -> Result<()> {
|
||||
// NOTE: Update this function in tandem with truncate_search_response.
|
||||
|
||||
let public_config = storage.public_config().await?;
|
||||
|
||||
let index = evaluate_vrf_proof(&res.vrf_proof, public_config.vrf_key, &req.search_key)?;
|
||||
|
||||
// Evaluate the search proof.
|
||||
let full_tree_head = get_proto_field(&res.tree_head)?;
|
||||
let tree_size = {
|
||||
let tree_head = get_proto_field(&full_tree_head.tree_head)?;
|
||||
tree_head.tree_size
|
||||
};
|
||||
let search_proof = get_proto_field(&res.search)?;
|
||||
|
||||
let guide = ProofGuide::new(req.version, search_proof.pos, tree_size);
|
||||
|
||||
let mut i = 0;
|
||||
let mut leaves = HashMap::new();
|
||||
let mut steps = HashMap::new();
|
||||
let result = guide.consume(|guide, next_id| {
|
||||
if i >= search_proof.steps.len() {
|
||||
return Err(Error::VerificationFailed(
|
||||
"unexpected number of steps in search proof".to_string(),
|
||||
));
|
||||
}
|
||||
let step = &search_proof.steps[i];
|
||||
let prefix_proof = get_proto_field(&step.prefix)?;
|
||||
guide.insert(next_id, prefix_proof.counter);
|
||||
|
||||
// Evaluate the prefix proof and combine it with the commitment to get
|
||||
// the value stored in the log.
|
||||
let prefix_root = evaluate_prefix(&index, search_proof.pos, prefix_proof)?;
|
||||
let commitment = step
|
||||
.commitment
|
||||
.as_slice()
|
||||
.try_into()
|
||||
.map_err(|_| MalformedProof)?;
|
||||
leaves.insert(next_id, leaf_hash(&prefix_root, commitment));
|
||||
steps.insert(next_id, step.clone());
|
||||
|
||||
i += 1;
|
||||
Ok::<(), Error>(())
|
||||
})?;
|
||||
|
||||
if i != search_proof.steps.len() {
|
||||
return Err(Error::VerificationFailed(
|
||||
"unexpected number of steps in search proof".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Verify commitment opening.
|
||||
let (result_i, result_id) = result.ok_or_else(|| {
|
||||
Error::VerificationFailed("failed to find expected version of key".to_string())
|
||||
})?;
|
||||
let result_step = &search_proof.steps[result_i];
|
||||
|
||||
let value = marshal_update_value(&get_proto_field(&res.value)?.value)?;
|
||||
let opening = res
|
||||
.opening
|
||||
.as_slice()
|
||||
.try_into()
|
||||
.map_err(|_| Error::VerificationFailed("malformed opening".to_string()))?;
|
||||
|
||||
if !verify_commitment(
|
||||
req.search_key.as_bytes(),
|
||||
&result_step.commitment,
|
||||
&value,
|
||||
opening,
|
||||
) {
|
||||
return Err(Error::VerificationFailed(
|
||||
"failed to verify commitment opening".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Evaluate the inclusion proof to get a candidate root value.
|
||||
let mut ids: Vec<u64> = leaves.keys().cloned().collect();
|
||||
ids.sort();
|
||||
|
||||
let values: Vec<[u8; 32]> = ids.iter().map(|id| *leaves.get(id).unwrap()).collect();
|
||||
|
||||
let inclusion_proof = get_hash_proof(&search_proof.inclusion)?;
|
||||
let root = evaluate_batch_proof(&ids, tree_size, &values, &inclusion_proof)?;
|
||||
|
||||
// Verify the tree head with the candidate root.
|
||||
verify_full_tree_head(storage, full_tree_head, root).await?;
|
||||
|
||||
// Update stored monitoring data.
|
||||
let size = if req.search_key == "distinguished" {
|
||||
// Make sure we don't update monitoring data based on parts of the tree
|
||||
// that we don't intend to retain.
|
||||
result_id + 1
|
||||
} else {
|
||||
tree_size
|
||||
};
|
||||
let ver = get_proto_field(&result_step.prefix)?.counter;
|
||||
|
||||
let mut mdw = MonitoringDataWrapper::load(storage, &req.search_key).await?;
|
||||
if monitor || public_config.mode == DeploymentMode::ContactMonitoring {
|
||||
mdw.start_monitoring(&index, search_proof.pos, result_id, ver, monitor);
|
||||
}
|
||||
mdw.check_search_consistency(size, &index, search_proof.pos, result_id, ver, monitor)?;
|
||||
mdw.update(size, &steps)?;
|
||||
mdw.save(storage, &req.search_key).await
|
||||
}
|
||||
|
||||
/// Checks that the output of a Search operation is valid and updates the
|
||||
/// client's stored data. `res.value.value` may only be consumed by the
|
||||
/// application if this function returns successfully.
|
||||
pub async fn verify_search(
|
||||
storage: &mut dyn LogStore,
|
||||
req: &SearchRequest,
|
||||
res: &SearchResponse,
|
||||
force_monitor: bool,
|
||||
) -> Result<()> {
|
||||
verify_search_internal(storage, req, res, force_monitor).await
|
||||
}
|
||||
|
||||
/// Checks that the output of an Update operation is valid and updates the
|
||||
/// client's stored data.
|
||||
pub async fn verify_update(
|
||||
storage: &mut dyn LogStore,
|
||||
req: &UpdateRequest,
|
||||
res: &UpdateResponse,
|
||||
) -> Result<()> {
|
||||
verify_search_internal(
|
||||
storage,
|
||||
&SearchRequest {
|
||||
search_key: req.search_key.clone(),
|
||||
version: None,
|
||||
consistency: req.consistency,
|
||||
},
|
||||
&SearchResponse {
|
||||
tree_head: res.tree_head.clone(),
|
||||
vrf_proof: res.vrf_proof.clone(),
|
||||
search: res.search.clone(),
|
||||
|
||||
opening: res.opening.clone(),
|
||||
value: Some(UpdateValue {
|
||||
value: req.value.clone(),
|
||||
}),
|
||||
},
|
||||
true,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Checks that the output of a Monitor operation is valid and updates the
|
||||
/// client's stored data.
|
||||
pub async fn verify_monitor(
|
||||
storage: &mut dyn LogStore,
|
||||
req: &MonitorRequest,
|
||||
res: &MonitorResponse,
|
||||
) -> Result<()> {
|
||||
// Verify proof responses are the expected lengths.
|
||||
if req.owned_keys.len() != res.owned_proofs.len() {
|
||||
return Err(Error::VerificationFailed(
|
||||
"monitoring response is malformed: wrong number of owned key proofs".to_string(),
|
||||
));
|
||||
}
|
||||
if req.contact_keys.len() != res.contact_proofs.len() {
|
||||
return Err(Error::VerificationFailed(
|
||||
"monitoring response is malformed: wrong number of contact key proofs".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let full_tree_head = get_proto_field(&res.tree_head)?;
|
||||
let tree_head = get_proto_field(&full_tree_head.tree_head)?;
|
||||
let tree_size = tree_head.tree_size;
|
||||
|
||||
// Process all of the individual MonitorProof structures.
|
||||
let mut mpa = MonitorProofAcc::new(tree_size);
|
||||
// TODO: futures_util::future::join_all() maybe?
|
||||
for (key, proof) in req.owned_keys.iter().zip(res.owned_proofs.iter()) {
|
||||
mpa.process(storage, key, proof).await?;
|
||||
}
|
||||
for (key, proof) in req.contact_keys.iter().zip(res.contact_proofs.iter()) {
|
||||
mpa.process(storage, key, proof).await?;
|
||||
}
|
||||
|
||||
// Evaluate the inclusion proof to get a candidate root value.
|
||||
let inclusion_proof = get_hash_proof(&res.inclusion)?;
|
||||
let root = if mpa.leaves.is_empty() {
|
||||
match inclusion_proof[..] {
|
||||
[root] => root,
|
||||
_ => {
|
||||
return Err(Error::VerificationFailed(
|
||||
"monitoring response is malformed: inclusion proof should be root".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let mut ids: Vec<u64> = mpa.leaves.keys().cloned().collect();
|
||||
ids.sort();
|
||||
|
||||
let values: Vec<[u8; 32]> = ids.iter().map(|id| *mpa.leaves.get(id).unwrap()).collect();
|
||||
|
||||
evaluate_batch_proof(&ids, tree_size, &values, &inclusion_proof)?
|
||||
};
|
||||
|
||||
// Verify the tree head with the candidate root.
|
||||
verify_full_tree_head(storage, full_tree_head, root).await?;
|
||||
|
||||
// Update monitoring data.
|
||||
for (key, entry) in req
|
||||
.owned_keys
|
||||
.iter()
|
||||
.chain(req.contact_keys.iter())
|
||||
.zip(mpa.entries.iter())
|
||||
{
|
||||
let size = if key.search_key == "distinguished" {
|
||||
// Generally an effort has been made to avoid referencing the
|
||||
// "distinguished" key in the core keytrans library, but it
|
||||
// simplifies things here:
|
||||
//
|
||||
// When working with the "distinguished" key, the last observed tree
|
||||
// head is always trimmed back to create an anonymity set. As such,
|
||||
// when monitoring the "distinguished" key, we need to make sure we
|
||||
// don't update monitoring data based on parts of the tree that we
|
||||
// don't intend to retain.
|
||||
req.consistency
|
||||
.as_ref()
|
||||
.ok_or(Error::VerificationFailed("monitoring request malformed: consistency field expected when monitoring distinguished key".to_string()))?
|
||||
.last
|
||||
} else {
|
||||
tree_size
|
||||
};
|
||||
let mut mdw = MonitoringDataWrapper::load(storage, &key.search_key).await?;
|
||||
mdw.update(size, entry)?;
|
||||
mdw.save(storage, &key.search_key).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct MonitorProofAcc {
|
||||
tree_size: u64,
|
||||
/// Map from position in the log to leaf hash.
|
||||
leaves: HashMap<u64, [u8; 32]>,
|
||||
/// For each MonitorProof struct processed, contains the map that needs to be
|
||||
/// passed to MonitoringDataWrapper::update to update monitoring data for the
|
||||
/// search key.
|
||||
entries: Vec<HashMap<u64, ProofStep>>,
|
||||
}
|
||||
|
||||
impl MonitorProofAcc {
|
||||
fn new(tree_size: u64) -> Self {
|
||||
Self {
|
||||
tree_size,
|
||||
leaves: HashMap::new(),
|
||||
entries: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
async fn process(
|
||||
&mut self,
|
||||
storage: &mut dyn LogStore,
|
||||
key: &MonitorKey,
|
||||
proof: &MonitorProof,
|
||||
) -> Result<()> {
|
||||
// Get the existing monitoring data from storage and check that it
|
||||
// matches the request.
|
||||
let data = storage.get_data(&key.search_key).await?.ok_or_else(|| {
|
||||
Error::VerificationFailed(
|
||||
"unable to process monitoring response for unknown search key".to_string(),
|
||||
)
|
||||
})?;
|
||||
|
||||
// Compute which entry in the log each proof is supposed to correspond to.
|
||||
let entries = full_monitoring_path(&key.entries, data.pos, self.tree_size);
|
||||
if entries.len() != proof.steps.len() {
|
||||
return Err(Error::VerificationFailed(
|
||||
"monitoring response is malformed: wrong number of proof steps".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Evaluate each proof step to get the candidate leaf values.
|
||||
let mut steps = HashMap::new();
|
||||
for (entry, step) in entries.iter().zip(proof.steps.iter()) {
|
||||
let prefix_proof = get_proto_field(&step.prefix)?;
|
||||
let prefix_root = evaluate_prefix(&data.index, data.pos, prefix_proof)?;
|
||||
let commitment = step
|
||||
.commitment
|
||||
.as_slice()
|
||||
.try_into()
|
||||
.map_err(|_| MalformedProof)?;
|
||||
let leaf = leaf_hash(&prefix_root, commitment);
|
||||
|
||||
if let Some(other) = self.leaves.get(entry) {
|
||||
if leaf != *other {
|
||||
return Err(Error::VerificationFailed(
|
||||
"monitoring response is malformed: multiple values for same leaf"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
} else {
|
||||
self.leaves.insert(*entry, leaf);
|
||||
}
|
||||
|
||||
steps.insert(*entry, step.clone());
|
||||
}
|
||||
self.entries.push(steps);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct MonitoringDataWrapper {
|
||||
inner: Option<MonitoringData>,
|
||||
changed: bool,
|
||||
}
|
||||
|
||||
impl MonitoringDataWrapper {
|
||||
async fn load(storage: &mut dyn LogStore, search_key: &str) -> Result<Self> {
|
||||
Ok(Self {
|
||||
inner: storage.get_data(search_key).await?,
|
||||
changed: false,
|
||||
})
|
||||
}
|
||||
|
||||
/// Adds a key to the database of keys to monitor, if it's not already
|
||||
/// present.
|
||||
fn start_monitoring(
|
||||
&mut self,
|
||||
index: &[u8; 32],
|
||||
zero_pos: u64,
|
||||
ver_pos: u64,
|
||||
version: u32,
|
||||
owned: bool,
|
||||
) {
|
||||
if self.inner.is_none() {
|
||||
self.inner = Some(MonitoringData {
|
||||
index: *index,
|
||||
pos: zero_pos,
|
||||
ptrs: HashMap::from([(ver_pos, version)]),
|
||||
owned,
|
||||
});
|
||||
self.changed = true;
|
||||
}
|
||||
}
|
||||
|
||||
fn check_search_consistency(
|
||||
&mut self,
|
||||
tree_size: u64,
|
||||
index: &[u8; 32],
|
||||
zero_pos: u64,
|
||||
ver_pos: u64,
|
||||
version: u32,
|
||||
owned: bool,
|
||||
) -> Result<()> {
|
||||
let data = match self.inner.as_mut() {
|
||||
Some(data) => data,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
if *index != data.index {
|
||||
return Err(Error::VerificationFailed(
|
||||
"given search key index does not match database".to_string(),
|
||||
));
|
||||
}
|
||||
if zero_pos != data.pos {
|
||||
return Err(Error::VerificationFailed(
|
||||
"given search start position does not match database".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
match data.ptrs.get(&ver_pos) {
|
||||
Some(ver) => {
|
||||
if *ver != version {
|
||||
return Err(Error::VerificationFailed(
|
||||
"different versions of key recorded at same position".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
None => {
|
||||
match monitoring_path(ver_pos, zero_pos, tree_size).find_map(|x| data.ptrs.get(&x))
|
||||
{
|
||||
Some(ver) => {
|
||||
if *ver < version {
|
||||
return Err(Error::VerificationFailed(
|
||||
"prefix tree has unexpectedly low version counter".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
None => {
|
||||
data.ptrs.insert(ver_pos, version);
|
||||
self.changed = true;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if !data.owned && owned {
|
||||
data.owned = true;
|
||||
self.changed = true;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Updates the internal monitoring data for a key as much as possible given
|
||||
/// a set of ProofStep structures. It should only be called after
|
||||
/// verify_full_tree_head has succeeded to ensure that we don't store updated
|
||||
/// monitoring data tied to a tree head that isn't valid.
|
||||
fn update(&mut self, tree_size: u64, entries: &HashMap<u64, ProofStep>) -> Result<()> {
|
||||
let data = match self.inner.as_mut() {
|
||||
Some(data) => data,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
let mut changed = false;
|
||||
let mut ptrs = HashMap::new();
|
||||
|
||||
for (entry, ver) in data.ptrs.iter() {
|
||||
let mut entry = *entry;
|
||||
let mut ver = *ver;
|
||||
|
||||
for x in monitoring_path(entry, data.pos, tree_size) {
|
||||
match entries.get(&x) {
|
||||
None => break,
|
||||
Some(step) => {
|
||||
let ctr = get_proto_field(&step.prefix)?.counter;
|
||||
if ctr < ver {
|
||||
return Err(Error::VerificationFailed(
|
||||
"prefix tree has unexpectedly low version counter".to_string(),
|
||||
));
|
||||
}
|
||||
changed = true;
|
||||
entry = x;
|
||||
ver = ctr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match ptrs.get(&entry) {
|
||||
Some(other) => {
|
||||
if ver != *other {
|
||||
return Err(Error::VerificationFailed(
|
||||
"inconsistent versions found".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
None => {
|
||||
ptrs.insert(entry, ver);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if changed {
|
||||
data.ptrs = ptrs;
|
||||
self.changed = true;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn save(self, storage: &mut dyn LogStore, search_key: &str) -> Result<()> {
|
||||
if self.changed {
|
||||
storage.set_data(search_key, self.inner.unwrap()).await?
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the TreeHead that would've been issued immediately after the value
|
||||
/// being searched for in `SearchResponse` was sequenced.
|
||||
///
|
||||
/// Most validation is skipped so the SearchResponse MUST already be verified.
|
||||
pub async fn truncate_search_response(
|
||||
storage: &mut dyn LogStore,
|
||||
req: &SearchRequest,
|
||||
res: &SearchResponse,
|
||||
) -> Result<(u64, [u8; 32])> {
|
||||
// NOTE: Update this function in tandem with verify_search_internal.
|
||||
|
||||
let public_config = storage.public_config().await?;
|
||||
|
||||
let index = evaluate_vrf_proof(&res.vrf_proof, public_config.vrf_key, &req.search_key)?;
|
||||
|
||||
// Evaluate the SearchProof to find the terminal leaf.
|
||||
let full_tree_head = get_proto_field(&res.tree_head)?;
|
||||
let tree_size = {
|
||||
let tree_head = get_proto_field(&full_tree_head.tree_head)?;
|
||||
tree_head.tree_size
|
||||
};
|
||||
let search_proof = get_proto_field(&res.search)?;
|
||||
|
||||
let guide = ProofGuide::new(req.version, search_proof.pos, tree_size);
|
||||
|
||||
let mut i = 0;
|
||||
let mut leaves = HashMap::new();
|
||||
|
||||
let result = guide.consume(|guide, next_id| {
|
||||
let step = &search_proof.steps[i];
|
||||
let prefix_proof = get_proto_field(&step.prefix)?;
|
||||
guide.insert(next_id, prefix_proof.counter);
|
||||
|
||||
// Evaluate the prefix proof and combine it with the commitment to get
|
||||
// the value stored in the log.
|
||||
let prefix_root = evaluate_prefix(&index, search_proof.pos, prefix_proof)?;
|
||||
let commitment = step
|
||||
.commitment
|
||||
.as_slice()
|
||||
.try_into()
|
||||
.map_err(|_| MalformedProof)?;
|
||||
leaves.insert(next_id, leaf_hash(&prefix_root, commitment));
|
||||
|
||||
i += 1;
|
||||
Ok::<(), Error>(())
|
||||
})?;
|
||||
|
||||
let (_, result_id) =
|
||||
result.expect("truncate_search_response called with search response that is not verified");
|
||||
|
||||
// Evaluate the inclusion proof to get root value.
|
||||
let mut ids: Vec<u64> = leaves.keys().cloned().collect();
|
||||
ids.sort();
|
||||
|
||||
let values: Vec<[u8; 32]> = ids.iter().map(|id| *leaves.get(id).unwrap()).collect();
|
||||
|
||||
let inclusion_proof = get_hash_proof(&search_proof.inclusion)?;
|
||||
|
||||
let early_stop = ids
|
||||
.iter()
|
||||
.position(|&id| id == result_id)
|
||||
.expect("result_id is not an id that was inspected by proof guide");
|
||||
let root = truncate_batch_proof(early_stop, &ids, &values, &inclusion_proof)?;
|
||||
|
||||
Ok((result_id + 1, root))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use assert_matches::assert_matches;
|
||||
use test_case::test_case;
|
||||
|
||||
use super::*;
|
||||
|
||||
const MAX_AHEAD: Duration = Duration::from_secs(42);
|
||||
const MAX_BEHIND: Duration = Duration::from_secs(42);
|
||||
const TIMESTAMP_RANGE: &TimestampRange = &TimestampRange {
|
||||
max_behind: MAX_BEHIND,
|
||||
max_ahead: MAX_AHEAD,
|
||||
};
|
||||
|
||||
const ONE_SECOND: Duration = Duration::from_secs(1);
|
||||
|
||||
fn make_timestamp(time: SystemTime) -> i64 {
|
||||
let duration = time.duration_since(UNIX_EPOCH).unwrap();
|
||||
duration.as_millis().try_into().unwrap()
|
||||
}
|
||||
|
||||
#[test_case(SystemTime::now() + MAX_AHEAD + ONE_SECOND; "far ahead")]
|
||||
#[test_case(SystemTime::now() - MAX_BEHIND - ONE_SECOND; "far behind")]
|
||||
fn verify_timestamps_error(time: SystemTime) {
|
||||
let ts = make_timestamp(time);
|
||||
assert_matches!(
|
||||
verify_timestamp(ts, TIMESTAMP_RANGE, None),
|
||||
Err(Error::VerificationFailed(_))
|
||||
);
|
||||
}
|
||||
|
||||
#[test_case(SystemTime::now(); "now")]
|
||||
#[test_case(SystemTime::now() + MAX_AHEAD - ONE_SECOND; "just ahead enough")]
|
||||
#[test_case(SystemTime::now() - MAX_BEHIND + ONE_SECOND; "just behind enough")]
|
||||
fn verify_timestamps_success(time: SystemTime) {
|
||||
let ts = make_timestamp(time);
|
||||
assert_matches!(verify_timestamp(ts, TIMESTAMP_RANGE, None), Ok(()));
|
||||
}
|
||||
}
|
211
rust/keytrans/src/vrf.rs
Normal file
211
rust/keytrans/src/vrf.rs
Normal file
@ -0,0 +1,211 @@
|
||||
//
|
||||
// Copyright 2024 Signal Messenger, LLC.
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
//
|
||||
|
||||
//! Implements ECVRF-EDWARDS25519-SHA512-TAI from RFC 9381.
|
||||
use curve25519_dalek::edwards::{CompressedEdwardsY, EdwardsPoint};
|
||||
use curve25519_dalek::scalar::Scalar;
|
||||
use curve25519_dalek::traits::VartimeMultiscalarMul;
|
||||
use sha2::{Digest as _, Sha512};
|
||||
|
||||
const SUITE_ID: u8 = 0x03;
|
||||
const DOMAIN_SEPARATOR_ENCODE: u8 = 0x01;
|
||||
const DOMAIN_SEPARATOR_CHALLENGE: u8 = 0x02;
|
||||
const DOMAIN_SEPARATOR_PROOF: u8 = 0x03;
|
||||
const DOMAIN_SEPARATOR_BACK: u8 = 0x00;
|
||||
|
||||
#[derive(Debug, displaydoc::Display)]
|
||||
pub enum Error {
|
||||
/// Invalid point on curve
|
||||
InvalidCurvePoint,
|
||||
/// Invalid VRF proof
|
||||
InvalidProof,
|
||||
}
|
||||
|
||||
type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
fn encode_to_curve_try_and_increment(salt: &[u8], data: &[u8]) -> EdwardsPoint {
|
||||
let mut i = 0;
|
||||
let mut hasher = Sha512::new();
|
||||
|
||||
loop {
|
||||
hasher.update([SUITE_ID, DOMAIN_SEPARATOR_ENCODE]);
|
||||
hasher.update(salt);
|
||||
hasher.update(data);
|
||||
hasher.update([i, DOMAIN_SEPARATOR_BACK]);
|
||||
|
||||
let r = hasher.finalize_reset();
|
||||
match CompressedEdwardsY(r[..32].try_into().unwrap()).decompress() {
|
||||
Some(pt) => return pt.mul_by_cofactor(),
|
||||
None => i += 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_challenge(pts: [&[u8; 32]; 5]) -> [u8; 16] {
|
||||
let mut hasher = Sha512::new();
|
||||
hasher.update([SUITE_ID, DOMAIN_SEPARATOR_CHALLENGE]);
|
||||
for pt in pts {
|
||||
hasher.update(pt);
|
||||
}
|
||||
hasher.update([DOMAIN_SEPARATOR_BACK]);
|
||||
let c = hasher.finalize();
|
||||
|
||||
c[..16].try_into().unwrap()
|
||||
}
|
||||
|
||||
fn proof_to_hash(gamma: &EdwardsPoint) -> [u8; 32] {
|
||||
let mut hasher = Sha512::new();
|
||||
hasher.update([SUITE_ID, DOMAIN_SEPARATOR_PROOF]);
|
||||
hasher.update(gamma.mul_by_cofactor().compress().0);
|
||||
hasher.update([DOMAIN_SEPARATOR_BACK]);
|
||||
let index = hasher.finalize();
|
||||
|
||||
index[..32].try_into().unwrap()
|
||||
}
|
||||
|
||||
/// PublicKey holds a VRF public key.
|
||||
#[derive(Clone)]
|
||||
pub struct PublicKey {
|
||||
compressed: [u8; 32],
|
||||
decompressed: EdwardsPoint,
|
||||
}
|
||||
|
||||
impl TryFrom<[u8; 32]> for PublicKey {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(public_key: [u8; 32]) -> Result<Self> {
|
||||
match CompressedEdwardsY(public_key).decompress() {
|
||||
Some(pt) => Ok(PublicKey {
|
||||
compressed: public_key,
|
||||
decompressed: pt,
|
||||
}),
|
||||
None => Err(Error::InvalidCurvePoint),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PublicKey {
|
||||
/// Checks that proof is the correct VRF proof for message m, and outputs
|
||||
/// the index if so.
|
||||
pub fn proof_to_hash(&self, m: &[u8], proof: &[u8; 80]) -> Result<[u8; 32]> {
|
||||
// Decode proof into its component parts: gamma, c, and s.
|
||||
let gamma = match CompressedEdwardsY(proof[..32].try_into().unwrap()).decompress() {
|
||||
Some(pt) => pt,
|
||||
None => return Err(Error::InvalidProof),
|
||||
};
|
||||
|
||||
let mut c_bytes = [0u8; 32];
|
||||
c_bytes[..16].copy_from_slice(&proof[32..48]);
|
||||
let c = Scalar::from_canonical_bytes(c_bytes);
|
||||
if c.is_none().into() {
|
||||
return Err(Error::InvalidProof);
|
||||
}
|
||||
let c = -(c.unwrap());
|
||||
|
||||
let s = Scalar::from_canonical_bytes(proof[48..80].try_into().unwrap());
|
||||
if s.is_none().into() {
|
||||
return Err(Error::InvalidProof);
|
||||
}
|
||||
let s = s.unwrap();
|
||||
|
||||
// H = encode_to_curve_try_and_increment(pk, m)
|
||||
// U = [s]B - [c]Y
|
||||
// V = [s]H - [c]Gamma
|
||||
let h = encode_to_curve_try_and_increment(&self.compressed, m);
|
||||
|
||||
let u = EdwardsPoint::vartime_double_scalar_mul_basepoint(&c, &self.decompressed, &s);
|
||||
let v = EdwardsPoint::vartime_multiscalar_mul(&[s, c], &[h, gamma]);
|
||||
|
||||
// Check challenge.
|
||||
let c_prime = generate_challenge([
|
||||
&self.compressed,
|
||||
&h.compress().0,
|
||||
proof[..32].try_into().unwrap(),
|
||||
&u.compress().0,
|
||||
&v.compress().0,
|
||||
]);
|
||||
if proof[32..48] != c_prime {
|
||||
return Err(Error::InvalidProof);
|
||||
}
|
||||
|
||||
Ok(proof_to_hash(&gamma))
|
||||
}
|
||||
|
||||
pub fn as_bytes(&self) -> &[u8] {
|
||||
&self.compressed
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use hex_literal::hex;
|
||||
|
||||
struct TestVector {
|
||||
pk: [u8; 32],
|
||||
alpha: &'static [u8],
|
||||
h: [u8; 32],
|
||||
pi: [u8; 80],
|
||||
beta: [u8; 32],
|
||||
}
|
||||
|
||||
const TEST_VECTORS: [TestVector; 3] = [
|
||||
TestVector {
|
||||
pk: hex!("d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a"),
|
||||
alpha: &hex!(""),
|
||||
h: hex!("91bbed02a99461df1ad4c6564a5f5d829d0b90cfc7903e7a5797bd658abf3318"),
|
||||
pi: hex!("8657106690b5526245a92b003bb079ccd1a92130477671f6fc01ad16f26f723f26f8a57ccaed74ee1b190bed1f479d9727d2d0f9b005a6e456a35d4fb0daab1268a1b0db10836d9826a528ca76567805"),
|
||||
beta: hex!("90cf1df3b703cce59e2a35b925d411164068269d7b2d29f3301c03dd757876ff"),
|
||||
},
|
||||
TestVector {
|
||||
pk: hex!("3d4017c3e843895a92b70aa74d1b7ebc9c982ccf2ec4968cc0cd55f12af4660c"),
|
||||
alpha: &hex!("72"),
|
||||
h: hex!("5b659fc3d4e9263fd9a4ed1d022d75eaacc20df5e09f9ea937502396598dc551"),
|
||||
pi: hex!("f3141cd382dc42909d19ec5110469e4feae18300e94f304590abdced48aed5933bf0864a62558b3ed7f2fea45c92a465301b3bbf5e3e54ddf2d935be3b67926da3ef39226bbc355bdc9850112c8f4b02"),
|
||||
beta: hex!("eb4440665d3891d668e7e0fcaf587f1b4bd7fbfe99d0eb2211ccec90496310eb"),
|
||||
},
|
||||
TestVector {
|
||||
pk: hex!("fc51cd8e6218a1a38da47ed00230f0580816ed13ba3303ac5deb911548908025"),
|
||||
alpha: &hex!("af82"),
|
||||
h: hex!("bf4339376f5542811de615e3313d2b36f6f53c0acfebb482159711201192576a"),
|
||||
pi: hex!("9bc0f79119cc5604bf02d23b4caede71393cedfbb191434dd016d30177ccbf8096bb474e53895c362d8628ee9f9ea3c0e52c7a5c691b6c18c9979866568add7a2d41b00b05081ed0f58ee5e31b3a970e"),
|
||||
beta: hex!("645427e5d00c62a23fb703732fa5d892940935942101e456ecca7bb217c61c45"),
|
||||
},
|
||||
];
|
||||
|
||||
#[test]
|
||||
fn test_encode_to_curve_try_and_increment() {
|
||||
for v in TEST_VECTORS {
|
||||
let got = encode_to_curve_try_and_increment(&v.pk, v.alpha)
|
||||
.compress()
|
||||
.0;
|
||||
assert_eq!(got, v.h);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proof_to_hash() {
|
||||
for v in TEST_VECTORS {
|
||||
let pk = PublicKey::try_from(v.pk).unwrap();
|
||||
let index = pk.proof_to_hash(v.alpha, &v.pi).unwrap();
|
||||
assert_eq!(index, v.beta);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_proof_to_hash_fails() {
|
||||
for v in TEST_VECTORS {
|
||||
let pk = PublicKey::try_from(v.pk).unwrap();
|
||||
|
||||
assert!(pk.proof_to_hash(b"a", &v.pi).is_err());
|
||||
|
||||
for i in 0..v.pi.len() {
|
||||
let mut pi = v.pi;
|
||||
pi[i] ^= 1;
|
||||
assert!(pk.proof_to_hash(v.alpha, &pi).is_err());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
133
rust/keytrans/src/wire.proto
Normal file
133
rust/keytrans/src/wire.proto
Normal file
@ -0,0 +1,133 @@
|
||||
syntax = "proto3";
|
||||
package signal.keytrans.wire;
|
||||
|
||||
message PrefixProof {
|
||||
repeated bytes proof = 1;
|
||||
uint32 counter = 2;
|
||||
}
|
||||
|
||||
// TreeHead contains the operator's signature on the most recent version of the
|
||||
// log.
|
||||
message TreeHead {
|
||||
uint64 tree_size = 1;
|
||||
int64 timestamp = 2;
|
||||
bytes signature = 3;
|
||||
}
|
||||
|
||||
// AuditorTreeHead is provided to end-users when third-party auditing is used,
|
||||
// as evidence that the log is behaving honestly.
|
||||
message AuditorTreeHead {
|
||||
TreeHead tree_head = 1;
|
||||
optional bytes root_value = 2;
|
||||
repeated bytes consistency = 3;
|
||||
}
|
||||
|
||||
// FullTreeHead wraps a basic TreeHead with additional information that may be
|
||||
// needed for validation.
|
||||
message FullTreeHead {
|
||||
TreeHead tree_head = 1;
|
||||
repeated bytes distinguished = 2;
|
||||
repeated bytes consistency = 3;
|
||||
optional AuditorTreeHead auditor_tree_head = 4;
|
||||
}
|
||||
|
||||
// ProofStep is the output of one step of a binary search through the log.
|
||||
message ProofStep {
|
||||
PrefixProof prefix = 1;
|
||||
bytes commitment = 2;
|
||||
}
|
||||
|
||||
// SearchProof contains the output of a binary search through the log.
|
||||
message SearchProof {
|
||||
uint64 pos = 1;
|
||||
repeated ProofStep steps = 2;
|
||||
repeated bytes inclusion = 3;
|
||||
}
|
||||
|
||||
// UpdateValue wraps the new value for a key with an optional signature from the
|
||||
// service provider.
|
||||
message UpdateValue {
|
||||
// optional bytes signature = 1; TODO
|
||||
bytes value = 2;
|
||||
}
|
||||
|
||||
// Consistency specifies the parameters of the consistency proof(s) that should
|
||||
// be returned.
|
||||
message Consistency {
|
||||
uint64 last = 1;
|
||||
optional uint64 distinguished = 2;
|
||||
}
|
||||
|
||||
// SearchRequest comes from a user that wishes to lookup a key.
|
||||
message SearchRequest {
|
||||
string search_key = 1;
|
||||
optional uint32 version = 2;
|
||||
optional Consistency consistency = 3;
|
||||
}
|
||||
|
||||
// SearchResponse is the output of executing a search on the tree.
|
||||
message SearchResponse {
|
||||
FullTreeHead tree_head = 1;
|
||||
bytes vrf_proof = 2;
|
||||
SearchProof search = 3;
|
||||
|
||||
bytes opening = 4;
|
||||
UpdateValue value = 5;
|
||||
}
|
||||
|
||||
// UpdateRequest comes from a user that wishes to update a key.
|
||||
message UpdateRequest {
|
||||
string search_key = 1;
|
||||
bytes value = 2;
|
||||
optional Consistency consistency = 3;
|
||||
}
|
||||
|
||||
// UpdateResponse is the output of executing an update on the tree.
|
||||
message UpdateResponse {
|
||||
FullTreeHead tree_head = 1;
|
||||
bytes vrf_proof = 2;
|
||||
SearchProof search = 3;
|
||||
|
||||
bytes opening = 4;
|
||||
// optional bytes signature = 5; TODO
|
||||
}
|
||||
|
||||
// MonitorKey is a single key that the user would like to monitor.
|
||||
message MonitorKey {
|
||||
string search_key = 1;
|
||||
repeated uint64 entries = 2;
|
||||
}
|
||||
|
||||
// MonitorRequest comes from a user that wishes to monitor a set of keys.
|
||||
message MonitorRequest {
|
||||
repeated MonitorKey owned_keys = 1;
|
||||
repeated MonitorKey contact_keys = 2;
|
||||
optional Consistency consistency = 3;
|
||||
}
|
||||
|
||||
// MonitorProof proves that a single key has been correctly managed in the log.
|
||||
message MonitorProof {
|
||||
repeated ProofStep steps = 1;
|
||||
}
|
||||
|
||||
// MonitorResponse is the output of a monitoring operation.
|
||||
message MonitorResponse {
|
||||
FullTreeHead tree_head = 1;
|
||||
repeated MonitorProof owned_proofs = 2;
|
||||
repeated MonitorProof contact_proofs = 3;
|
||||
repeated bytes inclusion = 4;
|
||||
}
|
||||
|
||||
// StoredTreeHead is an encoded tree head stored on-disk.
|
||||
message StoredTreeHead {
|
||||
TreeHead tree_head = 1;
|
||||
bytes root = 2;
|
||||
}
|
||||
|
||||
// StoredMonitoringData is encoded monitoring data stored on-disk.
|
||||
message StoredMonitoringData {
|
||||
bytes index = 1;
|
||||
uint64 pos = 2;
|
||||
map<uint64, uint32> ptrs = 3;
|
||||
bool owned = 4;
|
||||
}
|
3
rust/keytrans/src/wire.rs
Normal file
3
rust/keytrans/src/wire.rs
Normal file
@ -0,0 +1,3 @@
|
||||
#![allow(clippy::derive_partial_eq_without_eq)]
|
||||
|
||||
include!(concat!(env!("OUT_DIR"), "/signal.keytrans.wire.rs"));
|
Loading…
Reference in New Issue
Block a user